././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4582167 octavia-6.2.2/0000775000175000017500000000000000000000000013172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/.coveragerc0000664000175000017500000000013300000000000015310 0ustar00zuulzuul00000000000000[run] branch = True source = octavia omit = octavia/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/.pylintrc0000664000175000017500000000507600000000000015047 0ustar00zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=.git,tests [MESSAGES CONTROL] # NOTE: The options which do not need to be suppressed can be removed. disable= # "F" Fatal errors that prevent further processing # "I" Informational noise c-extension-no-member, locally-disabled, # "E" Error for important programming issues (likely bugs) import-error, not-callable, no-member, # "W" Warnings for stylistic problems or minor programming issues abstract-method, anomalous-backslash-in-string, arguments-differ, attribute-defined-outside-init, bad-builtin, broad-except, fixme, global-statement, no-init, pointless-string-statement, protected-access, redefined-builtin, redefined-outer-name, signature-differs, unidiomatic-typecheck, unused-argument, unused-variable, useless-super-delegation, # "C" Coding convention violations bad-continuation, invalid-name, line-too-long, missing-docstring, # "R" Refactor recommendations duplicate-code, interface-not-implemented, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements, multiple-statements, duplicate-except, keyword-arg-before-vararg, useless-object-inheritance [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= [CLASSES] [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/.stestr.conf0000664000175000017500000000010500000000000015437 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./octavia/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/AUTHORS0000664000175000017500000002335000000000000014245 0ustar00zuulzuul00000000000000Adam Harwell Adit Sarfaty Adolfo Duarte Aishwarya Thangappa Aishwarya Thangappa Ajay Kumar AjayKumar333 Al Miller Al Miller Alex Barclay Alexander Amerine Andreas Jaeger Andrew Karpow Anh Tran Ankur Gupta Ann Kamyshnikova Ann Taraday Arnaud Morin Austin Russell Aviv AvnishPal BANASHANKAR KALEBELAGUNDI VEERA Banashankar K Veerad Bar RH Ben Carr Ben Nemec Bernard Cafarelli Bertrand Lallau Bertrand Lallau Bharath M Bo Chi Bo Wang Bodo Petermann Brandon Logan Brian Haley Brian Rosmaita CALIN Cristian Andrei Cao Xuan Hoang Carlos D. Garza Carlos Goncalves Cedric Brandily Chandan Kumar Chandan Kumar Chaozhe.Chen Christian Berendt Chuck Wilson Claudiu Belu Colin Gibbons Corey Bryant Daniel 'f0o' Preussker Daniel Mellado Dao Cong Tien Dawson Coleman Deepak Dong Jun Doug Fish Doug Hellmann Doug Wiegley Dustin Lundquist Elena Ezhova EranRaichstein Erik Olof Gunnar Andersson Evan Gray Evgeny Fedoruk Fei Long Wang Flavio Percoco Franklin Naval Frode Nordahl Ganpat Agarwal Gary Kotton Gerard Braad German Eichberger German Eichberger German Eichberger Grant Murphy Gregory Haynes Gregory Thiemonge Guilherme Steinmüller Guoqiang Ding Ha Van Tu Hang Yang He Qing Hemanth Nakkina Henry Gessau Henry Gessau Hidekazu Nakamura Hongbin Lu Ian Wienand Ihar Hrachyshka Ildar Iskhakov Itzik Brown Jacky Hu James Arendt James E. Blair James Page Jamie Lennox Jason Niesz Jeffrey Longstaff Jens Harbott Jeremy Liu Jeremy Stanley Jiahao Liang Jim Rollenhagen JingLiu Jorge Miramontes Joshua Harlow Joshua Harlow Jude Cross Kamil Sambor Ken'ichi Ohmichi Kenichi Omichi Kenji Yasui Kenneth Giusti Kiall Mac Innes Kim Bao Long Kobi Samoray Kyle Mestery Lingxian Kong LiuNanke Lubosz "diltram" Kosnik Lubosz Kosnik Luis Tomas Bolivar Luong Anh Tuan M V P Nitesh Maciej Józefczyk MaoyangLiu Margarita Shakhova Mark Vanderwiel Martin Chlumsky Masayuki Igawa Matt Alline Michael Johnson Michael Johnson Michal Arbet Michal Rostecki Michał Dulko Miguel Angel Ajo Miguel Angel Ajo Mohammed Naser Monty Taylor Nakul Dahiwade Nam Nguyen Hoai Ngo Quoc Cuong Nguyen Hai Nguyen Van Trung Nir Magnezi Noboru Iwamatsu Omer Anson OpenStack Release Bot Paul Glass Paul Peereboom Pavel Abalikhin Peng Zhi Xiong Pradeep Kumar Singh Qian Min Chen Rafal Pietrzak Raimund Hook Rene Luria Roman Goncharov Sahid Orentino Ferdjaoui Sam Morrison Santhosh Fernandes Sean McGinnis Selvakumar S Sergey Belous ShangXiao Shashank Kumar Shankar Sherif Abdelwahab Shuquan Huang Sindhu Devale Stanislav Dmitriev Stefan Nica Stephen Balukoff Stephen Balukoff Susanne Balle Sven Wegener Tatsuma Matsuki Thomas Bechtold Thomas Goirand Tin Lam Tony Breeds Trevor Vardeman Trevor Vardeman Tuan Do Anh Vadim Ponomarev Valeria Perelman Van Hung Pham Vieri <15050873171@163.com> Vlad Gusev Vu Cong Tuan WangBinbin XieYingYun Xing Zhang Xing Zhang YAMAMOTO Takashi Yang JianFeng Yang Li Yatin Kumbhare Yoshihide Matsumoto Zhao Lei ZhaoBo ZhiQiang Fan ajmiller akhiljain23 archiephan bharath caoyuan caoyue chen-li cheng chengebj5238 chenghang chenghuiyu chenxiangui chimeng ckji da52700 daz dean deepakmourya e gaobin gecong1973 ghanshyam guotao.bj huangshan johnsom lidong lingyongxu liyang lungc madhusudhan-kandadai maoshuai melissaml minwang nmagnezi pandatt pengyuesheng ptoohill ptoohill1 qinchunhua rajat29 ramboman root root sapd shangxiaobj tonybrad venkatamahesh wanghui wangxiyuan wangxiyuan wei weiyj weizhijun yatin karel zhang.lei zhangdebo zhangdetong zhangyanxian zhangyanxian zhouxinyong zhouyunfeng zhufl zhulingjie zoushilin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/CONSTITUTION.rst0000664000175000017500000000477700000000000015705 0ustar00zuulzuul00000000000000==================== Octavia Constitution ==================== This document defines the guiding principles that project leadership will be following in creating, improving and maintaining the Octavia project. Octavia is an OpenStack project ------------------------------- This means we try to run things the same way other "canonized" OpenStack projects operate from a procedural perspective. This is because we hope that Octavia will eventually become a standard part of any OpenStack deployment. Octavia is as open as OpenStack ------------------------------- Octavia tries to follow the same standards for openness that the OpenStack project also strives to follow: https://wiki.openstack.org/wiki/Open We are committed to open design, development, and community. Octavia is "free" ----------------- We mean that both in the "beer" and in the "speech" sense. That is to say, the reference implementation for Octavia should be made up only of open source components that share the same kind of unencumbered licensing that OpenStack uses. Note that this does not mean we are against having vendors develop products which can replace some of the components within Octavia. (For example, the Octavia VM images might be replaced by a vendor's proprietary VM image.) Rather, it means that: * The reference implementation should always be open source and unencumbered. * We are typically not interested in making design compromises in order to work with a vendor's proprietary product. If a vendor wants to develop a component for Octavia, then the vendor should bend to Octavia's needs, not the other way around. Octavia is a load balancer for large operators ---------------------------------------------- That's not to say that small operators can't use it. (In fact, we expect it to work well for small deployments, too.) But what we mean here is that if in creating, improving or maintaining Octavia we somehow make it unable to meet the needs of a typical large operator (or that operator's users), then we have failed. Octavia follows the best coding and design conventions ------------------------------------------------------ For the most part, Octavia tries to follow the coding standards set forth for the OpenStack project in general: https://docs.openstack.org/hacking/latest More specific additional standards can be found in the HACKING.rst file in the same directory as this constitution. Any exceptions should be well justified and documented. (Comments in or near the breach in coding standards are usually sufficient documentation.) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/CONTRIBUTING.rst0000664000175000017500000000116700000000000015640 0ustar00zuulzuul00000000000000The source repository for this project can be found at: https://opendev.org/openstack/octavia Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Storyboard: https://storyboard.openstack.org/#!/project/openstack/octavia For more specific information about contributing to this repository, see the Octavia contributor guide: https://docs.openstack.org/octavia/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/ChangeLog0000664000175000017500000024427100000000000014756 0ustar00zuulzuul00000000000000CHANGES ======= 6.2.2 ----- * Fix failover of az-specific loadbalancers * Fix management network selection when calculating deltas * Fix duplicate SG creation for listener peer port * Fix MAX\_TIMEOUT value for listener * Fix periodic image builder jobs * Fix amphora image build jobs * Fix PlugVIPAmphora revert function in amphorav2 * Update nr\_open limit value in the amphora * Fix using host\_routes in VIP subnet with amphorav2 * Fix race conditions between API and worker DB calls 6.2.1 ----- * Enable lo interface in the amphora-haproxy netns * Add amphora\_id in store params for failover\_amphora * Fix comment for the ca\_certificates\_file opt * Optimize CountPoolChildrenForQuota task in amphorav2 * Fix task\_flow.max\_workers with persistence in amphorav2 * Fix rsyslog configuration when disabling logs * Fix devstack cleanup when using amphorav2 * Validate user access to vip\_subnet\_id when creating a LB * Fix using subnets with host\_routes in amphorav2 driver * Make /healthcheck cache results * Explicitely set nodeset to Bionic-based * Cap hacking * Fix empty Batch Member Update to unlock objects * Fix weighted round-robin about UDP listener * Fix LB failover for amphorav2: set security group * Fix incorrect ERROR status with IPv6 UDP members * Fix provider driver quota handling 6.2.0 ----- * Fix LB failover when IP addresses exhausted * Drop lower constraints testing (stable Ussuri) * Fix amphora image build * Followup Remove haproxy\_check\_script for UDP-only * Fix backend certificate file paths * Fix overriding default listener timeout values in config file * Add validation for VIP network parameters in amphora driver * Configure rsyslog on Octavia service nodes in devstack * Fix pools going into ERROR when updating the pool * Disable auto configuration of new interfaces in NetworkManager * Support deletion of all tags * Fix nf\_conntrack\_buckets sysctl in Amphora * Fix AttributeError on TLS-enabled pool provisioning * Fix Ussuri requirements for Victoria grenade * Fix amphorav2 bytes error * Fix operating status for empty UDP pools * Use 'bash' in the keepalived check script * Fix lower-constraints * Handle listener not found during loadbalancer status update * Fix load balancers with failed amphora failover * Map cloud-guest-utils to cloud-utils-growpart for Red Hat distros * Fix missing cronie package in RHEL-based image builds * Fix load balancers with failed amphora failover * Fix pep8 failures in gate * Remove unnecessary joinedload * Fix pool delete race on load balancer cascade delete * [Amphorav2] Healthmonitor operation minor fixes 6.1.0 ----- * Ignore DELETED amphorae when performing certificate rotation * Fixes API list handling of unscoped tokens * Fix amphora failover when VRRP port is missing * Fix invalid DOWN status when updating a UDP pool * Fix operational status for disabled UDP listeners * Add some details on enable\_anti\_affinity option * Fix user permission for WSGI configuration Task: 35692 Story: 2006172 * Add missing log line for finishing amp operations * Update devstack plugin * Set Grub timeout to 0 for fast boot times * Update amphora v2 for the failover refactor * Fix memory consumption issues with default connection\_limit * Remove haproxy\_check\_script for UDP-only LBs * Update grenade job to run one smoke test * Fix missing dependency in amphora create flow * Fix missing params in amphora base and noop driver * Add missing reload method in amphora noop driver * Refactor the failover flows * Fix API sort key for complex columns * Pin pylint for stable branches * Increase the devstack secuirty group rules quota * Switch back to using git for the agent ref * Set amphora-agent build-only packages * Fix accepting 'insert\_headers' when unsupported * Fix haproxy user flow log format substitution * Prioritize policy validation * Fix listener update with SNI certificates * Migrate grenade job to native Zuul v3 * add the verify for the session * Fix neutron subnet lookup ignoring endpoint\_type * Fix netcat option in udp\_check.sh for CentOS/RHEL * Fix batch member create for v1 amphora driver 6.0.1 ----- * Workaround peer name starting with hyphen * Fixed a bug: replace 'startwith' with 'startswith' * Fix the grenade plugin to also upgrade octavia-lib * Fix getting user neutron client * Remove deprecated exception classes * Fix healthmanager not update amphora health when LB disable * Update TOX\_CONSTRAINTS\_FILE for stable/ussuri * Update .gitreview for stable/ussuri 6.0.0 ----- * Small fix of wording in Ubuntu install doc * Validate resource access when creating loadbalancer or member * Healthmanager opts aren't CLI-related * Disable two tests due to sqlalchemy/sqlite bug * Support HTTP and TCP checks in UDP healthmonitor * Run taskflow jobboard conductor conditionally * Don't inherit enforcing bash errexit in devstack plugin * Fix py3 amphora-agent cert-rotation type bug * Imported Translations from Zanata * Correct delay between UDP healthchecks * Add availability-zone to loadbalancer object docs * Jobboard based controller * Add ability to specify TLS cipher list for pools * Don't register cli opts on import * Add ability to set TLS cipher list for listeners * Add F5 Networks provider driver by SAP SE * Update hacking for Python3 * Ussuri contributor docs community goal * Fix amphora image build jobs * Exclude acpid on Red Hat family DIB builds * Fix padding logic for UDP health daemon * Add oslo middleware healthcheck to Octavia API * Remove all usage of six library * Remove the barbican "Grant access" from cookbook * Stop "dnf makecache" in the amphora images * [Amphorav2] Fix noop driver case * Use sqlalchemy isnot(None) * Support hacking 2.0.0 * Pick stale amphora randomly * Update the available provider drivers list * Fix load balancer update with provider filtered params * Allow AZ to override valid\_vip\_networks config * Network Delta calculations should respect AZs * Select the right lb\_network\_ip interface using AZ * Fix uncaught DB exception when trying to get a spare amphora * Fix TESTING.rst example * Remove the dependency on the "mock" package * Add docs warning for PING health monitors * Add UDP LBs to the basic cookbook * Add a periodic image build job * Add diskimage-create arg for Octavia Git branch * Update the lb\_id on an amp earlier if we know it * Support haproxy development snapshot version parsing * Add listener allowed\_cidrs to the feature matrix * Transition l7rule flows to dicts * Transition l7policy flows to dicts * Convert health\_monitor flows to use provider models * Fix jobs not running and add new ones to the gate * Use LUKS1 for certfs-ramfs for all distributions * Convert Lb flows to use provider dicts * Transition amphora flows to dicts * Complete dropping py27 support goal * Add logging filter for AmpConnectionRetry exception * Fix the interface filenames for Red Hat amphora images * Add listener and pool protocol validation * Allow the Octavia wsgi to accept argv parameters * Stop supporting CentOS 7 controllers and images * Remove test calls to reset\_mock() * Make octavia reproducible * Fix diskimage-create.sh for Debian * Add install guide for Ubuntu * Support creating an LB in a specified AZ * Fix tests to correctly call reset\_mock() * Fix house keeping graceful shutdown * Update policy docs to reflect json format * Update ListenersUpdate for lb/listener dicts * Fix multi-listener LB client auth/re-encryption * Fix multi-listener LB with missing certificate * Fix unit test when run on CentOS 7 * Stop supporting xenial amphora images * Gate on octavia-v2-dsvm-tls-barbican * Add support for CentOS 8 amphora images * Accept oslopolicy-policy-generator path arguments * Adjust for new octavia-lib LB signature * Transition member flows to use dicts * Convert pool flows to use dicts * Convert listener flows to use provider models * Stop testing python 2 * Use retry for AmphoraComputeConnectivityWait * Fix update API when barbican secret is missing * Use 2048-bits keys for devstack certificates * Make octavia-grenade job non-voting * Availability Zone admin API * Remove unnecessary interface configurations * Fix filtering with API tags * Stop allowing the deletion of an in-use flavor * Fix name of VRRP configuration option * Fix filtering for provider capabilities list API * Update flavor guide to be cut/paste friendly * Fix a potential race condition with certs-ramfs * ipvsadm '--exact' arg to ensure outputs are ints * Fix listeners with SNI certificates * Fix controller worker graceful shutdown * Fix typo in doc agent.py->agent * [Trivial]Add missing ws between words * Fix batch member update error on empty change list * Fix some plug.py unit tests that broke on OSX * Allow IPv6 health network in devstack * Fix issues with unavailable secrets * Revert "Clean up requirements.txt from tox.ini" * Add backend re-encryption to the LB cookbook * Add client authentication to the LB cookbook * Switch to openstack-python3-ussuri-jobs * Clean up requirements.txt from tox.ini * Use bandit block in tox.ini * Add bash script style checker to pep8 check * Fix log offload file permissions in CentOS devstack * Do not set log format when 'no log' is set * Remove duplicate keys in sample config files * Add the Amphora image building guide to the docs * Fix urgent amphora two-way auth security bug * Add Gentoo platform detection in diskimage-create/diskimage-create.sh * Fix certificate directory creation * Update master for stable/train 5.0.0.0rc1 ---------- * Validate supported LB algorithm in Amphora provider drivers * Fix new pep8/pylint errors * Delete the periodic Fedora 28 amphora image job * Improve the error message for bad pkcs12 bundles * loadbalancer vip-network-id IP availability check * Generate PDF documentation * Fix the diskimage-create tox "build" environment * Validate server\_certs\_key\_passphrase is 32 chars * Fix 'additive\_only' parameter api-ref * Fix healthmonitor message v2 for UDP listeners * Fix the amphora no-op driver * Fix openstack port show content handling * Set neutron client logging to INFO * Fix the tips job for octavia-lib * Add new algorithm SOURCE\_IP\_PORT * Add VIP access control list * Fix a few nits from the main volume-based patch * Add long-running provider agent support * Fix cleanup of expired load balancer entries * Fix building configs for multiple listeners * Add \`additive\_only\` parameter to Batch Member call * Support create amphora instance from volume based * Bump diskimage-builder minimum to 2.24.0 * Move to using octavia-lib constants * Add get method support to the driver-agent * Fix base (VRRP) port abandoned on revert * Update the load balancing cookbook * Use dual intermediate CAs for devstack * Fix template that generates vrrp check script * worker: Re-add FailoverPreparationForAmphora * Standardizes terminology in Listener documentation * Switch TaskFlow engine to parallel * Set Libvirt hw\_rng for devstack * Work around strptime threading issue * Fix provider driver utils * Fix L7 repository create methods * Lookup interfaces by MAC directly * Allow listeners for different protocols on the same port * Use the infra pypi mirror for DIB * Remove amphora-agent build deps * Change amphora logging to dedicated socket * Fixed typos and bad markup style in maintenance guide * Add unit test for failover of spare amphorae * Re-enable the py36 tox environment * Clarify that an example is not an actual list * Add missing lib dependencies to requirements.txt * Fix wrong package names or versions for centos-minimal images * Install missing packages in nodepool instance * Fix listener deletion in ACTIVE/STANDBY topology * Add Octavia tox "tips" jobs * Add the DIB\_REPO\* variables to the README.rst * Fix a unit test for new octavia-lib * Add support for oslo\_middleware http\_proxy\_to\_wsgi * Force DIB Python version for py2 in diskimage-create * Fixed down server issue after reloading keepalived * Fixed pool and members status with UDP loadbalancers * Add support for monitor\_{address,port} in UDP members * Fix flavor profile API handling of None updates * Fix l7rule API handling of None updates * Correcting typo in healthmonitors-list-response.json - http\_vesion to http\_version * Add failover logging to show the amphora details * Build amphora image using centos-minimal element * Remove octavia-v2-dsvm-scenario-ubuntu-bionic job * Make Ubuntu bionic the default for amphora images * Fix multi-listener load balancers * Add missing test requirement "fixtures" * Add project\_id to all of the provider objects * Update api-ref location * Bump the openstackdocstheme extension to 1.20 * Fix cryptsetup --pbkdf-memory failures * Add cloud-init-datasources DIB element * Blacklist sphinx 2.1.0 (autodoc bug) * elements: add arch property for \`\`open-vm-tools\`\` * Add active-standby scenario jobs to check queue * Add warning log if auth\_strategy is not keystone * Fix dhclient enter hook path on RHEL-based amps * Switch the default Ubuntu kernel to -virtual * Prevent UDP LBs to use different IP protocol versions in amphora driver * Add Python 3 Train unit tests * Update tox.ini for new upper constraints strategy * only rollback DB when we have a connection to the DB * Specify the linux-image-kvm kernel for ubuntu * Add bindep.txt for Octavia * Switch python3 versions of test jobs to match Train PTI * Workaround pyroute2 platform-dependent imports * Add a note about nova hardware architectures * Fix a python3 issue in the amphora-agent * Fix TCP listener logging bug * Fix allocate\_and\_associate DB deadlock * Amphora logging * Align logging in the amphora * Add RHEL 8 amphora support * Limit cryptsetup key RAM usage * Create Amphora V2 provider driver * Remove references to OpenStack Anchor * Clarify that the certificate guide should be used * Fix health monitor API handling of None updates * Fix member API handling of None/null updates * Rename review.openstack.org to review.opendev.org * Fix catching driver exceptions * Document health monitor UDP-CONNECT type * Fix tox for functional py36 and py37 * Correct OVN driver feature matrix * Delete unused files in tests/contrib * Enable fail-fast on the gate queue * db: add missing primary key in spares\_pool table * Remove tempest.test\_plugins from setup.cfg * Add provider feature support matrix * Remove tests/tempest path from devstack and tox * Fix pool API handling of None/null updates * Update hacking version to latest * Force amp-agent communication to TLSv1.2 * Add Python 3.7 support * Cap to sphinx 2.0.0 on Python 2.7 * Replace git.openstack.org URLs with opendev.org URLs * Remove v1 API and associated code * Performance improvement for non-udp health checks * Bandit test exclusions syntax change * Ignore .vscode directory * Make sure amphora logging works the same on py2 and py3 * Fix missing INCLUDEPKGS var in haproxy element * Make amphora cert validity time configurable * Update some octavia documentation * Removing an erroneous comment * Update operator maintenance doc * Fix listener API handling of None/null updates * Remove python3.5 jobs for Train * OpenDev Migration Patch * Adding changes to support ppc64le amphora builds with CentOS * Fix spare amphora failover * Update documentation about rotating amphorae * Fix missing REDIRECT\_PREFIX in the api-ref * Constraint networkx to <2.3 for Python 2 * Fix the amphora base port coming up * Fix IPv6 in Active/Standby topology on CentOS * Fix spare amphora check and creation * Fix invalid query selector with list\_ports * Fix setting of VIP QoS policy * Fix VIP plugging on CentOS-based amphorae * Add support to the Python Visual Studio Debugger * Fix ifup failures on member interfaces with IPv6 * Adds server\_certs\_key\_passphrase to octavia.conf * Replace openstack.org git:// URLs with https:// * Make sure the expected codes list is sorted * Update master for stable/stein 4.0.0.0rc1 ---------- * Fix a lifecycle bug with child objects * Fix initialization of Barbican client * Refactors LB delete flow and removes listener delete * Add a bionic test image periodic job * Add error-handling for permission of /etc/octavia * Limit spares pool to the spare\_amphora\_pool\_size * Fix typo and remove useless lines in user guide * Remove unnecessary \n from create\_certificates.sh * Add a prelude to the Stein release notes * Updates Octavia to support octavia-lib * Fix diskimage-create tox, add \`\`build\`\` and \`\`test\`\` targets * Remove outdated/incorrect certificate advice * Fix updates to the provider driver guide * Support Host header inject for healthmonitor HTTP 1.1 health check * Trivial: Remove unused OCTAVIA\_AMP\_SUBNET\_ID * Fix health monitor exception * Support L7policy redirect http code * Fix an amphora driver bug for TLS client auth * Amp driver support sni option to send the hostname to backend * Add boolean tls\_enabled option into Pool * Add 2 new fields into Pool API for support re-encryption * Pool support sni cert for backend re-encryption * Fix parallel plug vip * Set octavia available in tempest config * L7rule support client certificate cases * Add new ssl header into Listener for client certificate * Add crl-file option for certification * Fix the loss of access to barbican secrets * Encrypt certs and keys * Add an option to the Octavia V2 listener API for client cert * Add client\_ca\_tls\_container\_ref to listener API * Enable debug for Octavia services in grenade job * Fix LB failover when in ERROR * Update json module to jsonutils * Update requirements for ubuntu * Fix performance of housekeeping DB clean up * Fix oslo messaging connection leakage * Speed up pylint by using multiple cores * Set the default retries down to 120 * Add amphora agent configuration update admin API * Refactor the pluggin of the VIP * Install DIB binary dependencies from bindep.txt * Resolve amphora agent read timeout issue * Fix functional tests under Python >= 3.6 * Fix check redirect pool for creating a fully populated load balancer * Cleaning up logging * Fix flavors support when using spares pool * Fix the amphora noop driver * Add missing import octavia/opts.py * Fix VIP address DB storage size to support IPv6 * Fix a topology bug in flavors support * Add compute flavor support to the amphora driver * Update the amphora driver for flavor support * Add flavor docs and releae notes * Add provider driver capabilities API * Adds flavor support to the amphora driver * Add flavors/flavor\_profile api-ref * Add flavor, flavor\_profile table and their APIs * Update api-ref for tag filtering * Support to filter resources by tags * Add octavia-v2-dsvm-tls-barbican check job * Clarify the amp\_ssh\_key\_name configuration setting * Fix typo in doc * Improve local hacking checks test coverage * Fix missing print format error * Adds a simple golang udp server * Fix prefix for vip\_ipv6 * Fix a typo in the certificates admin guide * Fix grenade job to clone Octavia from base branch * Workaround for a dnf bug * diskimage-create: Support testing other image types * fix the spelling mistake word * Exit with error if DIB output dir doesn't exist * Add amphora statistics to the admin API * Fixing data model to\_dict() recursive function * Set same debug log level as per DevStack local.conf * Add compute\_flavor field for amphora api * Change openstack-dev to openstack-discuss * Adds a certificates configuration guide * Fix v1 scenario tests gates * Tags support for lb resources * fix typo mistakes * Support remote debugging with PyDev * Stop Logging Amphora Cert * demystify the calculate\_network\_delta unit test * Enable non-voting two node gates * Transition to python 3.x primary gates * Prepare for new multi-node jobs * Bring up secondary IPs on member networks * Add auditing support to the Octavia API * Make the CentOS 7 scenario gate non-voting * Allow release id to be specified with fedora * Modify the doc word error * Fix IPv6 in Active/Standby topology * Fix the grenade gate * Correct the base class for v2 l7rules type tests * Fix devstack plugin for /var/log/dib-build exists * Add missing ws separator between words * Fix a release note to call out a fix instead * Treat null admin\_state\_up as False * Delete duplicate word in component-design.rst * Update the HTTP links to HTTPS * Add framework for octavia-status upgrade check * Fix VIP plug failure if netns directory exists * Log the compute ID when nova fails * Fix possible state machine hole in failover * Update min tox version to 2.0 * Update README by adding Mailing List and Wiki URL * Redirect disk-image-builder logs, make verbose * Fix diskimage-create README.rst * Don't quote {posargs} in tox.ini * Remove unused methods * Add posibilities to set default timeouts * Update docs conf.py for openstackdocstheme change * Remove deprecated API settings * Ensure pool object contains the listener\_id if passed * Refactor the AAP driver to not depend on nova * Increment versioning with pbr instruction * Fix logging error in get\_current\_loadbalancer\_from\_db * Delete zombie amphorae when detected * Add certificate configs to config reference * Remove deprecated parameters * Add API versioning to the api-ref * Add a periodic job to build + publish diskimage for fedora * HTTPS HMs need the same validation path as HTTP++ * Fix for utils LB DM transformation function * Fix auto setup Barbican's ACL in the legacy driver * Make disk image buildable for fedora * Fix an upgrade issue for CentOS 7 amphora * Support REDIRECT\_PREFIX action for L7Policy * Pass through DIB\_LOCAL\_ELEMENTS from localrc * HTTPS HMs need the same validation path as HTTP * Remove duplicate check gate jobs * Fix the API list performance regression * Updates the operator docs with the new lb failover command * Simplify keepalived lvsquery parsing for UDP * Updates README-Vagrant.md to use OSC commands * Gate on octavia-dsvm-base based jobs and housekeeping * Make health checks resilient to DB outages * Validate member address for batch update members * Fix batch update members * Support DNF package manager in diskimage-create * Disable KVM at limestone (again) * Fix health manager performance regression * Raise minimum coverage to 90% * Use openstack-tox-cover template * Update zuul/projects.yaml * Validate member address for lb graph creation * devstack: Define packages for (open)SUSE * Update amphora-agent to report UDP listener health * Set some amphora driver optimizations * Re-enable flow diagrams * Fix passphrase None errors * add python 3.6 unit test job * switch documentation job to new PTI * Fix the amphora noop driver * import zuul job settings from project-config * Separate the thread pool for health and stats update * Temporarily remove octavia-v2-dsvm-scenario-ubuntu.bionic * Disable KVM at OVH * Fix compat with Python >= 3.6 * Allow blocking IPs from member addresses * "Resolve" bandit issue with sha1 hashes * Fix neutron "tenat\_id" compatibility * Fix Octavia for host host routes * Imported Translations from Zanata * Update reno for stable/rocky 3.0.0.0rc1 ---------- * Remove user\_group option * Fix UDP release note for CentOS * [UDP] Fix failed member always in DRAIN status * Followup patch for UDP support * UDP for [3][5][6] * UDP for [2] * Clarify that the driver support library is interim * UDP jinja template * Delete amphora regardless of status * Fix the bionic gate to actually run Ubuntu bionic 3.0.0.0b3 --------- * Updates the amphora driver for new commit model * Automatically set Barbican ACLs * Add listener\_id to the pool provider object * Correct naming for quota resources * Add baseline object in the drivers update callbacks * Init provider drivers at app setup * Fix DIB\_REPOREF\_amphora\_agent not set on Git !=1.8.5 * Fixing Octavia generating json named file containing yaml * Fix failover when multiple amphora have failed * Temporarily disable flow diagraming * Add the missing markup for the hyperlink title * Correct the usage on listing neutron ports * Update pypi url to new url * Fixes unlimited listener connection limit * Use object instead of object id in the drivers delete callbacks * Cleanup Octavia create VIP ports on LB delete * Add lb\_id comment to amp haproxy listener config * Translate expected package names to installed ones * Fix package version querying on non-dpkg distros * Add release note for HMAC python3 fix * Add a config to surpress amphora logging * Add the missing markup for the hyperlink title * Use openstack client command to replace nova client * Allow setting load balancer topology in Devstack * Implement provider drivers - Driver Library * [doc] Add the missing markup for the hyperlink title * Follow the new PTI for document build * Fix version discovery for the Octavia API * [doc]Use openstack client commands to replace nova client * Add the missing markup for the hyperlink title * Improve resource quota response message * Introduce ipvsadm to the amphora image * Ignore a port not found when deleting an LB * [amphora-agent] add local net to routing table 1 * Fix fields translation on filtering * Enable oslo\_config mutable configurations * Move from platform.linux\_distribution to distro.id * Re-enable KVM * fix tox python3 overrides * Add exception handling for housekeeping service * Add upgrade guide * Clarify IPv6 support for the lb-mgmt-net * Exclude amphora instances already DELETED when failover loadbalancer * Fix configured haproxy restarts * Providers: propose changes to create pool API * Change tox.ini coverage to always generate html * Make octavia-grenade job voting * Add error logging for amphora agent exceptions * Fix health manager to be spare amphora aware * Adding support for the octavia listener X-Forwarded-Proto header insertion * Add health monitor API validation * Exclude limestone from running with kvm * Stop logging API not found as ERROR * Align logging on oslo\_log * Add grenade support * Allow using spaces for primary common name in SSL certificates * Improve the error logging for zombie amphora * When SG delete fails on vip deallocate, try harder * Amend the spelling error of a word * Remove a duplicated key in unit test dict 3.0.0.0b2 --------- * Fix amp failover where failover already failed * Providers: propose changes to API * Implement provider drivers - Cleanup * Implement provider drivers - L7 Rules * Fix listener update race with default pool removal * Implement provider drivers - L7 Policy * Implement provider drivers - Health Monitor * Allow DB retries on controller\_worker creates * Use HMAC.hexdigest to avoid non-ascii characters for package data * Fix requirements gate + add local tox profile * Octavia devstack plugin API mode * Implement provider drivers - Members * Add coverage.xml output for PTI compliance * Implement provider drivers - Pool * Implement provider drivers - Listener * Implement provider drivers - Load Balancer * Add release notes link to README * Fix amphora failover API to work with spares * Mark LBs in failover PENDING\_UPDATE * Add config for disabling creation of PING type HMs * Fix releasenotes build in tox * Fix typo * Add tenant\_id back to objects for backwards compat * Replace port 35357 with 5000 for "auth\_url" * Increase devstack neutron secgroups quota to 100 * Trivial: Update pypi url to new url * Let healthmanager process shutdown cleanly (again) * Fix sphinx-docs job for sphinx >1.7 * Create disabled members in haproxy * Add raw format support to image creator script * Use openstack client commands to replace neutron client * Devstack plugin: Check for IPv6 support * Healthmanager shouldn't update NO\_MONITOR members * Slightly reorder member flows * Fix periodic job * Updates the docs with new admin tips * Allow fields filter on single object GETs * Update HAProxy version for Centos * Correct field filtering for member/l7rule/amphora * Improve Health Manager error handling * Create noop provider driver and data model * Creates provider driver base class and exceptions * Remove PluggedVIPNotFound references * Fix keepalived vrrp check script to be in PATH * Fix lower-constraints to actually match our reqs * Health Monitor url\_path requires leading slash * Pool PUT should validate name/description length * Correct flavor to flavor\_id in LB API * Fix calls to "DELETED" items * Add sos element to Red Hat family images 3.0.0.0b1 --------- * Adding Gophercloud to the SDKs list * Trivial: fix syntax error in command examples * Add Octavia v2 tempest plugin jobs to Octavia * Switch to ubuntu-minimal for default amphora image * Fix statistics update typo * Install client from pip if not in LIBS\_FROM\_GIT * Add pool session persistence validation * Update auth\_uri option to www\_authenticate\_uri * Improve some log messages in health\_monitor.py * Add API alias for '/statuses' * Move o-hm0 dhcp config under /etc/dhcp * Update introduction documention page * Add debug timing logging to amphora health update * Make keepalived initialization more predictable * Minor refactor of health\_sender.py * Defend against neutron error response missing keys * Expose timeout options * Fix revert method for batch member update * Correctly validate member subnet\_id in batches * Allow members to be set as "backup" * Update API-REF for x-forwarded-port is string * Fix a no-op network driver bug on plug\_port * Set lower-constraint to run unit and functional * Add deadlock retry wrapper for inital quota create * Fix logging level for keystone auth bypass * Fixes the directory we copy the image to * Don't failover amphora with LB in PENDING\_\* * The MarkL7PolicyActiveInDB is not necessary when l7policy deleted * add lower-constraints job * Fix health manager edge case with zombie amphora * Switch multinode tests to non-voting * Change used keystone token provider in devstack * Rename python-openstacksdk to openstacksdk * Updated from global requirements * fix a typo in documentation * Updated from global requirements * Add a devstack variable for legacy RBAC * Periodic job to build + publish diskimage * Updated from global requirements * Log health manager exceptions * Log output of subprocess check\_output() errors * Reduce runtime for l7rule too\_many\_rules test * Updated from global requirements * Properly test access to tls\_refs in the API layer * Devstack plugin image build option quoting fix * Fix functional jobs * Change import order * Reduce stestr concurrency to 1 for tempest jobs * Add license for empty \_\_init\_\_.py * Rename q- to neutron- services * Move scenario-lxd over to experimental * Split up extra init steps and start processes * Updated from global requirements * Fix kvm-centos.7 gate * Migrate to stestr * Updated from global requirements * Imported Translations from Zanata * Add image\_id to amphora table * Add timestamps to amphora table * Update configuration samples (QoS) * Fix network no-op driver get\_network\_configs * Overhaul HealthManager update threading * Updated from global requirements * Fix filtering by admin\_state\_up * Fix load balancers using IPv6 for the VIP subnet * Updates API for deleting "DELETED" objects * Fix missing test-requirement and cleanup docs * Update reno for stable/queens 2.0.0.0rc1 ---------- * Imported Translations from Zanata * Fix release notes job * Enable hacking-extensions H204, H205 * Deallocate the VIP prior to deleting the amphorae * Re-enable "Page Contents" and update user docs * Repalce Chinese quotes to English quotes * Update api-ref for uuid type and fix tox * Do not install pip in amphora when using distribution packages * Zuul: Remove project name * Update docs config for storyboard bugs * Handle missing security groups on listener delete * Fix session persistence update * Updated from global requirements * Add SDK list to octavia user docs 2.0.0.0b3 --------- * Updated from global requirements * Allow setting full mandatory access control in amphora * Updates the Octavia cookbook to use OSC commands * Switch to using PKCS12 for TLS Term certs * Rework amphora agent installation element * Fix processing pool statuses for LBs with multiple listeners * Updated the function name with "get\_remove\_vip\_subflow" to fix the typo * Don't run fucntional jobs for docs changes * Fix a typo of "octavia" in comment * Provider driver spec * Healthmanager health\_check timer config fix * Add unit tests for neutron utils, add model/util for floating\_ip * Add api-ref for amphora failover * Fix a typo in the test HTTP server * ignore api-ref/build directory * Check if it is used when creating a load balancer using vip\_port\_id * Updated from global requirements * TrivialFix: fix a typo in comment * Minimize the effect overloaded Health Manager processes * Fix exception when querying for non-existing items * Add VIP qos into our cookbook * Remove the redundant mock patches in tests * TrivialFix: remove redundant import alias * Amphora API Failover call * Producer/endpoint code to allow for amphora failovers * Change how path munging works for sphinx docs * Support UDP load balancing * Improve user error messages for duplicate objects * Remove reliance on NeutronException message field * Extend api to accept qos\_policy\_id * Updated from global requirements * Use common functional tox jobs * Update gitignore * Fix PING health monitor for old amphora * Improve Neutron driver \_get\_resource() * Move loading the network driver into the flows * Force SELinux context for amphora keepalived process * Reorder ComputeCreate arguments * Have doc8 ignore autodoc content * Update compute noop driver/tests (many were disabled/useless) * Updated from global requirements 2.0.0.0b2 --------- * Fix filtering in list API calls * Updated from global requirements * Clean up test\_update\_db.py a little bit * Optimize update\_health process * Fix the failover API to not fail with immutable LB * Fix health monitor type "PING" to use icmp ping * Adds the user's project id to the VIP port creation * Updated from global requirements * Fix functional tests for MacOS * Split amphora haproxy systemd unit * Rename 'remove' to 'reset' in Quota API * Update devstack plugin and examples * Enable some off-by-default checks * ACTIVE-ACTIVE: Initial distributor driver * Handle race condition deleting security group rule * Refine api-ref regarding loadbalancer creation * Make the allowed\_address\_pairs driver better * Remove setting of version/release from releasenotes * Fix keepalived check script * Updated from global requirements * Fix extracting nova fault string on build error * Updated from global requirements * Fix gate breakage: unit tests * Updated from global requirements * Fix TaskFlow loader to honor engine configuration * Fix member operating status when add/remove HM * Fix possible DB deadlock scenario * Updated from global requirements * Fix health\_manager to exit without waiting * Updated from global requirements * Fix body error of update loadbalancer * Add cached\_zone to the amphora record * Zuul: add file extension to playbook path * Disable kvm on OVH infra instances * Update Octavia zuulv3 for newton EOL * Make the event streamer transport URL configurable * Stop child objects changing status when LB locked * Fix non-cascade LB delete with children * Update tests to do not use deprecated test.services() * Move Octavia zuulv3 configuration in repo 2.0.0.0b1 --------- * Support pylint * Updated from global requirements * Trivial change to adapt pylint rules * Valuable change reported by pylint * import \_ from octavia.i18n * Update cookbooks for transition to OSC * L7 policy API-REF update for result codes * Updated from global requirements * Adds automatic ERD generation for the Octavia DB * Add dependent packages to config reference * Remove SCREEN\_LOGDIR from devstack * Fix KeyError in failover error log * Correctly unlock amphora health on failover revert * Force PCI bus rescan if interface is not found * Updated from global requirements * Shrink Tempest scenario manager copy * Add admin endpoint for amphora info * Update the documentation link for doc migration * blueprint: l3-active-active * Allow PUT to /pools//members to batch update members * Update links in CONTRIBUTING.rst * Ensure DIB\_REPOREF\_amphora\_agent is set * Use flake8-import-order plugin * add page\_reverse to GET API * Updated from global requirements * Fix inappropriate provisioning\_status of l7 policy * fix “\_send\_member\_to\_handler” debug log Story: 2001194 task: 5696 * Updated from global requirements * Gatefix: Migrate from testr to stestr * Make housekeeping build spares with an executor * Stop masking VIP allocate neutron errors * Allow members to use the VIP address * Add 'futures' to requirements, as we use it multiple places * Updated from global requirements * Updated from global requirements * [Trivialfix]Fix typos in octavia * Update release notes theme to openstackdocs * Adds the ability to failover a load balancer * Add ability to build amp image on SUSE * Updated from global requirements * devstack: Allow 3rd party neutron backends deployment * Add a CLI section to the Octavia docs * Whitespace fixes and simplification of keepalived template * Updated from global requirements * Correct diskimage-create sshd removal bug * Move doc-requirements to test-requirements * Change 14.04 to 16.04 in devstack setup guide * Add flag to disable SSHD on the amphora image * Fix health monitor DB locking * Fix a bad revert method and add hacking check * Fix octavia logging to be more friendly * Update reno for stable/pike 1.0.0.0rc1 ---------- * Fix LB creation with VIP port * Fix some typos * Update devstack readme.md * Fix sg\_rule didn't set protocol field * Properly store VIP data on LB Create * Updated from global requirements * Update links in README * Remove usage of credentials\_factory.AdminManager * Fix DB update reverts for provisioning status * Add allocate vip port when create loadbalancer in server side * LB Admin down should show operating\_status OFFLINE * Updated from global requirements * Remove WebTest from test requirements * Ignore 404 amphora error when deleting resources * Correct status for disabled members (honest abe edition) * Stop using deprecated CORS.set\_latent() * Properly handle more states from HAProxy health messages * Barbicanclient is refactoring, this fixes one of our bad tests * Updated from global requirements 1.0.0.0b3 --------- * Updated from global requirements * Fix haproxy\_check\_script for delete listener * Updated from global requirements * Spec detailing Octavia service flavors support * Update release notes for work done in Pike * Fix url\_path valid check * Fixing typo in controller/worker/tasks/database\_tasks.py * Allow special paths to not require keystone auth * diskimage-builder: Acceptable RHEL based Amphora disk size =< 3GB * Updated from global requirements * Remove key\_path from devstack plugin * Fix invalid escape sequence in certs-ramfs.service * Option to enable provisioning status sync with neutron db * Updated from global requirements * DIB: drop custom mirror elements * Force get\_all to only make one DB call * Updated from global requirements * Remove unused client directory * Enable H904 check * Update some links to the new docs locations * Install amphora agent from distribution package on RHEL * Add placement service to new-octavia-devstack.sh * Add statistics to V2 API * Add status tree to V2 API * Update Octavia docs for documentation migration * Fix error 500 when using SOURCE\_IP and APP\_COOKIE * Failover should cause LB to be Immutable * Don't reload the policy for every API request * Update api-ref for openstackdocstheme 1.11 * Update our docs to point to the new api-ref * Correct RBAC Not Authorized status code * Enable uwsgi support for octavia devstack * Event Streamer for StatsDb should respect config value * Make developer debugging easier * Add v2 Quota API section * Fix nova secgroup-add-rule which is unavailable * Change default vagrant box to xenial64(16.04) * Add v2 L7 Rule API section * Allow operators to disallow creation of TLS Termination listeners * DELETE operations don't actually return data (204) * Allow operators to disable v1 or v2.0 api endpoints * Add v2 L7 Policy API section * Replace the usage of 'admin\_manager' with 'os\_admin' * No need to do a shutdown(wait=True) when in a context-manager * Add filtering and field selection to API * Stop provider and flavor API stubs failing calls * Provide better ComputeBuildExcpetion messages * SSL Health Monitors didn't actually ... check very much * Refactor the RBAC auth enforcement a bit * Use 'get\_rpc\_transport' for RPC clients and servers * Add RBAC enforcement to quotas v2 API * Add RBAC enforcement to l7rules v2 API * Add RBAC enforcement to L7 policies v2 API * Add RBAC enforcement to health monitors v2 API * Add RBAC enforcement to members v2 API * HM Update for url\_path uses incorrect validation * Add RBAC enforcement to pools v2 API * Add RBAC enforcement to Octavia v2 API * Updated from global requirements * Agent: swap flask responses to webob, handle 404 retries better * Allow operators to tune VIP creation parameters * Enable DIB trace logging * Small refactor for load\_balancer v2 vip validation * Updated from global requirements * Also remove amphora\_health entry during house\_keeping amps * Remove deprecated signing\_dir config setting * Updated from global requirements * Fix an issue with failover on VIP net without DHCP 1.0.0.0b2 --------- * Handle log message interpolation by the logger * Add check O346 to disallow backslash line continuation * Remove deprecated oslo\_messaging.get\_transport * Minor code cleanup in l7policy controller * Updated from global requirements * Add v2 health monitor API section * Add v2 member API section * Replace deprecated oslo.messaging.get\_transport with get\_notification\_transport * Remove hardcoded amphorae namespace name * Updated from global requirements * Fix HM flows to set Pool back to active * Check keepalived health in the amphora * Fix keepalived systemd race with haproxy namespace * VRRP amphora\_driver functions weren't handled by noop driver * Add v2 pool API section * Updated from global requirements * Pool name/desc needs to be "" when empty, not null * Devstack plugin should mark the HM ovs port for cleanup skip * Optional L7Policies param was marked as required * Fix pool response to fill healthmonitor\_id properly * Remove lb\_network\_name from config (it was bogus) * Don't leave LBs in PENDING\_DELETE after refusing to cascade * Updated from global requirements * Remove \_LI, \_LW, \_LE, \_LC from i18n * Remove nova cert from devstack examples * Fix broken database migration * Replace assertRaisesRegexp with assertRaisesRegex * Update example config to have more correct keystone\_authtoken example * Remove invalid url link * CalculateAmphoraDelta networks based on vrrp\_port * Allow specifying empty amp\_boot\_network\_list * Additional logging in udp\_heartbeat and post\_plug\_vip * Reintroducing local certificate manager, as it is useful for testing * Two quick fixes (one log message, one cert create script thing) * Trivial fix typos in documents * Add cascade delete for APIv2 * Add monitor address and port to pool member * Implement sorting and pagination for octavia * Add support for single-create for APIv2 * Don't show deleted objects in v2 API * Noop network driver should provide a more hydrated network * Purge more tenant\_id references that I missed * Add some hacking checks borrowed from neutron * Remove log translations from octavia * project\_id should remain in the response for v2 objects * Updated from global requirements * Fix devstack plugin to stop octavia processes * Fix an failure if the vip network has no gateway * Shutdown a endpoint workers executor on termination * Fix a bug where ports may not be deleted * Change auth\_strategy default to keystone * Run Octavia API in a WSGI server * Set up octaviaclient as part of devstack * Add new PROXY protocol to lbaas pool * Fix set cookie\_name to None if not in PUT content * Initial version of the operator guide * Add v2 listener API section * Fixes mysterious gdbm error * Fixes Octavia not working with Barbican * Fixes unicode problems in p35 * Install DIB from source so depends-on will work * Fix the coverage HTML report * Auto-detect haproxy user\_group * Correct some v2 Listener response entries * Fix import in v2 pool types * Add quotas to Octavia v2 API * Removing dependency on eventlet and oslo.service * Introduce Octavia v2 API for Health Monitor * Remove singleton pattern from API controllers * Fix doc generation for Python3 * Adjust tox cover to check for 80% coverage * Update keystone URL to be consistent * Fix some configuration type issues * Scenario base uses the wrong exception lib * Fix dumb error message typo 1.0.0.0b1 --------- * Updated from global requirements * Fix house\_keeping daemon to use Event.wait() * Remove duplicate oslo\_messaging config line * Fix imports in v2 listener types * Loadbalancer v2 needs to accept tenant\_id on POST * Fixed Octavia tempest plugin * Add Apache License Content in index.rst * Introduce TESTING.rst for quick developer reference * Explicitly list neutron api extensions for tests * Updated from global requirements * Optimize the link address * blueprint LBaaS Alternative Monitoring IP/Port * Support QoS policy apply on vip port * Updated from global requirements * Adds v2 load balancer API section * Align Octavia API to n-lbaasv2 for L7Rules * Align Octavia API to n-lbaasv2 for L7Policy * Fix pool validation exception typo * Introduce Octavia v2 API for Members * Updated from global requirements * Adds a new config for soft-anti-affinity * Failover logic doesn't handle anti-affinity * Tests helper function \_send\_requests should catch socket.error * Introduce Octavia v2 API for pools * Updated from global requirements * Changing the service-type to load-balancer * Fix some reST field lists in docstrings * change genconfig output-file path * Fix get\_all method for v2 LB controller * Octavia v2 API for listeners * DIB: drop some dependencies * Remove unused logging import * Assorted trivial fixes for v2 loadbalancer tests * Fix v2 LB API return for null description and name * Change v2 API LB create return status code * Move API v1, v2 hooks to API Common * Use HostAddressOpt for opts that accept IP and hostnames * Remove local DIB elements dir * DB migration to add ID column to HM * Adds a new feature to limit the amphora build rate * Look for all qemu-img/kvm package names in rhel-based distros * Update Octavia v2 API for vip\_network\_id * Fix load balancer project\_id handling on POST * Add v2 load balancer type and controllers * fix py3x test bug so functional tests will pass * devstack: install qemu-kvm on RHEL-based distros * Updated from global requirements * Devstack plugin: API only mode * Fix typo * Updated from global requirements * Replace six.iteritems() with .items() * Fix quota lock on single\_create with auth enabled * Update for new pep8 rules E402 and W503 * Updated from global requirements * Fix cascade delete flow (pool quota requirements missing) * Allow to create vip in lb-mgmt-net * Look for diskimage-builder not dib-utils when building amps * Switch to use stable data\_utils * Switch to use exec\_command instead of mount/umount * Adds v2 general API information section * Add network\_id to VIP to match neutron-lbaas * Recurse dictionary generation in model to\_dict() * Updated from global requirements * Adds devstack plugin support for all amp base OS * Updated from global requirements * tempest: Switch to local copy of tempset/scenario/manager.py * Updated from global requirements * Don't use Tempest internal methods * Work around devstack selecting incorrect image * gate\_hook: Stop writing local.conf directly * Set project\_id on sub-objects during single-create * Remove unused logging import * Fix the service type for octavia * Updated from global requirements * create\_certificates.sh needs to be executable * Add common api handlers for v1 and v2 * Use cryptsetup/LUKS for encrypted ramfs * Remove v1 base type * Create api-ref base for Octavia * Update test-requirements hacking * Add option to choose an availability\_zone for amps * Remove mention of deprecated verbose option * Updated from global requirements * Remove config option "amp\_network" * Fix Octavia v1 API project\_id for POST * Updated from global requirements * Switch to use test\_utils.call\_until\_true * Allow a configuration option for random Amphora name * Fix the amphora failover flow docs diagram * Fix py3x gates and functional * Remove white space between print () * Update reno for stable/ocata * Fix the PKG-INFO to 5 - Production/Stable * Fix "P2" note references in act/act specs 0.10.0 ------ * Fix unit tests for OSX * Improve the failover handling for spares-pool amps * Fixes misspelled amp\_ssh\_access\_allowed config * Fix failover for unallocated amps (spares-pool) * Remove outdated kernel tuning for haproxy * Add placement services to devstack example files * Fix a typo in the release notes * Fix the amphora image support for RH Linux flavors * Add common base type for v1 and v2 * Remove python3.4 from tox * Fix active/standby under python3 * Updated from global requirements * Updated from global requirements * Fix the docs page title * Fix connection timeout spam in subunit logs * Cleanup noop network driver to fix py3x * Clarify that octavia-db-manage should be used * Archive the testrepository file before processing * Remove dumplicated config option 'cert\_generator' * Archive the raw testrepository log * Remove an erroneous MarkHealthMonitorActiveInDB task * Updated from global requirements * Fix octavia multinode setup for flavor and image * Add quota support to Octavia * Updated from global requirements * Correct the paramters of l7 policy creation * Introduce Handlers for V2 API * Updated from global requirements * Introduce Types Directory * Update Python classifiers * Add check when plug vrrp port in LB creation * Adds user\_group option under haproxy\_amphora * Add support for policy.json * Updated from global requirements * Fix file mode * Fix tenant\_id reference * Updated from global requirements * Correcting error message for CA Key validation failure * Fix multi-typo error in Octavia * Set access\_policy for messaging's dispatcher * Adds diskimage-create scripts to pypi package * Fix a typo * Remove MANIFEST.in from repo * Fix typo in doc/source/api/octaviaapi.rst * Add context to unit tests * Remove OSC deprecation warnings * Do not use log hints for exceptions * Save neutron calls if plugin does not support dns-integration * Updated from global requirements * Fix missing NovaServerGroupDelete * Introduct Test Base classes for V2 API * Remove an erroneous MarkMemberActiveInDB task * add CONTRIBUTING.rst * Fix typo * Updated from global requirements * oslo\_messaging\_rabbit settings are deprecated * Adding name to health monitor and member * Add build-essential package dependency to amphora-agent element * Amphora-agent should log to a distinct location * Change bind\_host IP address * Fix some python3 string issues in amphora agent * Updated from global requirements * Fix the agent install to use pip -U * The amphora agent plug\_vip has an indention issue * Stop depending on tripleo-image-elements * Setup translation for octavia * Add support for diskimage-builder tracing * Introduce API Decorators * Use pip install rather than setup.py * Create Octavia api V2 directory * Fix a unit test that was attempting real requests * Updated from global requirements * Updated from global requirements * Spec detailing capability gaps with n-lbaas API * Properly format IPv6 bind address strings * Only set default DIB\_RELEASE for ubuntu * Remove unused method - get\_random\_string * Fix devstack plugin for KEYSTONE\_CATALOG\_BACKEND * Show team and repo badges on README * Enable IPv6 load balancer networks * Add keystone authentication of token * Remove CONF.import\_group * Simplify cert-ramfs-ecrypt package dependencies * Adds distribution ID option to diskimage-create.sh * Backend Keystone authentication * Updated from global requirements * Adds support for systemd amphora images * Enable EPEL repository for CentOS amphorae * diskimage-create check for qemu-kvm-ev/qemu-img-ev for CentOS * Updated from global requirements * Modify variable's using method in Log Messages * Updated from global requirements * Remove last vestiges of oslo-incubator * Basic listener scenario test * Use test assertEqual instead of assert() * Updated from global requirements * Run amphora agent with gunicorn * UDPStatusGetter socket bind fix for ipv6 * Fix health\_sender test to use a list instead of tuple * Updated from global requirements * C10k load testing environment script * Scenario: test with concurrent clients * Increase the verbosity of haproxy.rest\_api\_driver failures * Update .coveragerc after the removal of openstack directory * Enable support for multinode in the gate hook * Add a new API to list loadbalancer statistics * Add class 'StatsMixin' * Updated from global requirements * Update haproxy logging configuration * Load system sysctl in amphora network namespace * Fixes pregenerated certificates validity to 50y * Updated from global requirements * Drop MANIFEST.in - it's not needed by pbr * Updated from global requirements * Add linuxbridge support to devstack plugin * Scenario tests: improve test http server * Update devstack plugin to use openstack client * Fix to run functional tests * Fix functional tests after HM change * Fixes unit test config override * Add MTU configuraion when plugging vip or network * Update listener stats * Fix revert on 404 from amphora agent startup * Enable release notes translation * Updated from global requirements * Add support for PKCS7 bundles and encrypted keys * Add defaults for health monitor object * Fix nova image-list which is unavailable * Switch HAProxy health check timeout to seconds * Use get-pip.py from cache if it exists * Use a cached get-pip.py if it is availble * Fix typo in active-active-distributor.rst * Remove pypi download shield from Readme * Updated from global requirements * Update reno for stable/newton 0.9.0 ----- * Fix a typo in README.rst * Add a cookbook for basic load balancing * Adding introduction and developer quick start docs * Updated from global requirements * Terminated HTTPS certs and keys in encrypted ramfs * Fix orphan port/security group on revert * Octavia: Basic LoadBalancer Scenario Test * Fix typos in amphora\_flows.py & octavia.conf * Drop the log level for "neutron extension found" * Adds tasks so that reverts set objects to ERROR * Updated from global requirements * Fixes a test order issue with image tag owner * Amphora agent refactor to classes * Setup gate test scripts for Octavia * Fixes the revert methods with unhandled exceptions * Option to restrict amp glance image owner * Fixes a revert failure in AllocateVIP task * Add L7 user guides * \_extract\_amp\_image\_id\_by\_tag should return last created image * Adding a unit test for graph create failure case * Set fullconn parameter in haproxy backends * Set haproxy global connection limit * Fix admin\_state\_up for loadbalancer and listener * Add tox 'specs' test to pep8 environment * Updated from global requirements * Stop using bandit-baseline * Update tox 'docs' environment to test specs * Fix active-active specs tox test * Fix No sql\_connection parameter is established * Remove deprecated (and unused) 'sqlite\_db' config option * Add \_\_ne\_\_ built-in function * Move bandit to pep8 * Subnet host routes support for plug\_network * Updated from global requirements * Update readme file * Rewrite create\_load\_balancer flow * Corrected test\_dir path in TempestPlugin * Update config reference to use auto generate * Updated from global requirements * Octavia: Implement Tempest Plugin * Deperecate apt-mirror element * Add spec for active-active * Add spec for adding statistics gathering API for loadbalancer * Updated from global requirements * Fixes amphora error of multiple IP addresses * Use upper constraints for all jobs in tox.ini * Make pyroute2 a requirement of octavia * Fix failover * Updated from global requirements * Fixes failover flow with namespace driver * Remove oslo.rootwrap from requirements.txt * Updated from global requirements * Updating network base utils for more functionality * DIB: relax check on pip executable * Condense amphora-agent-ubuntu in to amphora-agent * Remove unnecessary if and exception * Updated from global requirements * Allow IPv6 VIPs * DIB output filename and requirements fixes * Adds a process to generate key flow documentation * Updated from global requirements * Remove datetime\_to\_str * Fix some typos in the haproxy-amphora-api.rst * Enable DeprecationWarning in test environments * Updated from global requirements * Mock time.sleep in tests to reduce test time * Fixed UnmappedInstanceError thrown when LB delete * Revise pagination\_max\_limit description * Skip removing sec\_group if none * Updated from global requirements * Fixes Octavia handling of subnets without DHCP * fix a misspelling * Updated from global requirements * Allow deletion of ERRORed load balancers * Replace 'c = a if a else b' with 'c = a or b' * Use the from\_environ features of oslo\_context * We should set status to be 1 if get nothing from socket * Fix a small bug when list opts * Remove unnecessary check to get all listener ids * Update the home-page info with the developer documentation * Add debug information when skipped some errors * Instead name, use uuid to get security group instance * Instead of info, use exception for logging an exception * No need to use getattr func to get a value * Py3: Don't use dict.values()[0] * DIB image tests speedups * Fix a typo and replace unused variable with '\_' * Use seperate security group for o-hm0 port in devstack * Updated from global requirements * Disbale DNS update for port o-hm0 in devstack * Adding improved logging to amphora rest driver * Correct reraising of exception * Fix typo in the files * Cache neutron extension lookup state * Removing FK relationships from ListenerStatistics * Revert "Condense amphora-agent-ubuntu in to amphora-agent" * Fixing bug in single-create jinja config generation * Allow to run DIB when installed system-wide * Add Python 3.5 classifier and venv * Support RHEL base image in DIB * Fixed running tox while stacked * Add \_get\_resource func to get any type resource in neutron * Updated from global requirements * Insert haproxy in front of all the o-api endpoints * Allow deploying secondary worker and API nodes * Fix small typo in log message * Fix Octavia release notes header * Fix docstrings for database tasks * Decrease default resource expiry age in DevStack setup * Cleanup deleted load balancers in housekeeper's db\_cleanup * Fixes failover issue with neutron dns integration * Use correct code version for amphora agent image * Updated from global requirements * Add revert method in database\_tasks MapLoadbalancerToAmphora * Condense amphora-agent-ubuntu in to amphora-agent * Use git.o.o rather than review.o.o for cloning * Modernize amphora-agent element * Remove the conditional check for topology * Remove untrue comment message * Updated from global requirements * Updates test with overridden timeout values * Updated from global requirements * Updated from global requirements * Fixed unit test for amphorae backend utils * Repaired spacing mistake in info message * Fix alembic migration on MySQL 5.7 * Set device owner for health manager port * Use transport\_url instead of oslo\_messaging\_rabbit * Killed existing downgrade rules in migration scripts * Updated from global requirements * Drop support for SQL Schema Downgrades * Use RABBIT\_USERID instead of 'guest' * Disable dhclient requests the default route info * Fix docstrings for AmphoraLoadBalancerDriver and ControllerWorker * Adds methods in version.py for proper oslo\_reports setup * Tox: Remove exclude directories that no longer exist * Updated from global requirements * health-manager listen port security group * Fix unreachable TimeOutException bug in amphora driver * Fix Mac OS X socket.AF\_INET6 code * Fixes small typos in comments in source files of api * Fix vrrp\_success\_count help string * [docs] Fixes to octavia-api docs * Using utils function to get the network driver * Update nova api version to 2.1 * validate the subnet id for create loadbalancer and member * Use devstack RABBIT\_HOST to point to rabbit, instead of localhost * Allow deployment with pre-generated ssh keys and certificates * Use upper-constraints.txt file for all tox targets * Updated from global requirements * Make registering error handlers compliant with Flask 0.11 * Use glance image-list command in devstack/plugin.sh * Add timestamp to octavia resources * Updated from global requirements * Allowing for a host amphora configuration * Network driver should generate network configs * Small cleanup of plugin.sh * Remove oslo.serialization module requirement * Add env variables for memory and vcpu in the Vagrantfile * Add libvirt provider support * Updated from global requirements * Make sure ~/.ssh/known\_hosts exists before calling ssh-keygen -R * Use the local /vagrant dir for copying the config and support scripts * Implement custom header support for Octavia * [Trivial] Remove unnecessary executable privilege * Wait for loadbalancer to be ready instead of sleeping * Fix keystone auth url in devstack plugin * Add WSGI support for octavia api * Updated from global requirements * Updated from global requirements * Added amphora\_id to listener\_statistics table to stop data loss * Whitespace bug in sysvinit jinja template * Taskflow update exposed inconsistencies in tasks * Support db-manage command for db migration * Updated from global requirements * Updated from global requirements * Fixed duplicate constants declarations * Attach port to amphora when adding member to shared pool * Updated from global requirements * Amphora fails to build * Updated from global requirements * Run amphora haproxy in a network namespace * Update amp\_network to allow multiple networks * Updated from global requirements * Defer updating role and vrrp\_priority for failover * Add missing unit test for cascade lb delete * TLS/SNI Listener creation fails when using intermiate certificates * Updated from global requirements * Updated from global requirements * Replace mock open with namespace builtins with OpenFixture * Replace the os.open method with safer way * Remove the default anchor usrename and password value * Removes port\_detach code that was mistakenly added * Fixes the SNI issues in master(mitaka) for octavia * Add neutron-lbaas-dashboard to local.conf sample * Fixes failover when using a spares pool * Devstack: Use OVS\_BRIDGE when value is set * Make GIT\_REPO can be configured * Fix local.sh script for Keystone authentication * Exit local.sh immediately when error occurs * Adds documentation for the Octavia configuration * Fix devstack plugin clean of o-hm0 * Update release notes for Mitaka release 0.8.0 ----- * Fix admin-state-up * Add VHD support in DIB * Updating unit testing * Add hacking check to ensure not use xrange() * Single-Create Documentation * Change render\_unsets default to False * Add unit tests for L7 data model additions * Fix L7Policy update regression * Fix session\_persistence deletion bug * Use consistent type checking of project\_id in API * Use rfc3986 module for URL validations * Add CA Cert file config option to validate against SSL endpoints * Remove an unused configuration option * Replace deprecated LOG.warn with LOG.warning * Add missing unit tests for L7 repo classes * Add unit tests for L7Policy API * Add missing unit tests for L7Rule API * Fix default\_pool\_id type in l7policy API * Add maximum number of l7rules per l7policy * Adds release notes for Active/Standby * Updated from global requirements * Update doc to make it consistent with the actual api * Add pre\_test\_hook to run gate jobs * Make SSH bind to management net only * Better L7Policy API validations * Remove swift related content in the sample local.conf * Use "--pub-key" instead of "--pub\_key" * Updated from global requirements * Add bandit baseline to tox * Adds the Cascade Deelete REST API doc * Updated from global requirements * glance: support relying on tags to extract image id * Add release notes for L7 and shared pools * Get Me A Load Balancer Controller * Get me a Load Balancer API * Cascading LB delete * Barbican Cert Manager fails to correctly parse intermediates * Block deletion of pool in use by L7Policies * Updated from global requirements * Improved LoadBalancer Delete * Use unique SNI identifier when building data model * Fix LB/Listener status updates for HM/Member * Implements: blueprint anti-affinity server group * Remove old SSH specific config options from sample config * Updated from global requirements * Fix health monitor URL in API documentation * Add a request timeout to the REST API driver * Change HMAC compare to use constant\_time\_compare * Add listener stats API * Add L7 documentation * Add L7 jinja template updates * Add L7 controller worker flows and tasks * Add L7 api - rules * Add L7 api - policies * Update repos for L7 rules / validations * Update repos for L7 policy / methods * Add L7 database structures * Assign peer\_port on listener creation * Fix model update flows * Trivial: cleanup unused conf and log variables * Reduce devstack build time by properly using pip caches * Remove unused \`paramiko\` * Update flake8 exclude * Fix improper egress security rule deletion * Fixed make sure to get IPv4 value by awk in sample local.sh file * project\_id should not be UUIDType in API validation * Stop using non-existent method of Mock * Update data model conversion to be more complete * Fix LB status updates * Improve test case test\_upload\_certificate\_md5 * Fix amphora failover after health monitor start * Fix deleted amphora expiration check * Delete SSH amphora driver * Fix hacking rule of assert\_equal\_or\_not\_none * Stop logging amphora cert generation in debug * Fixes an intermittent load balancer delete failure * Octavia: API docs Entity Status Fix * Allow user-data on amphora creation * Reverts a workaround that is impacting amp updates * Adding "region and endpoint\_type" parameters to barbican\_acl.py * Do not use \_\_builtin\_\_ in python3 * Adds a parameter to specify endpoint type * Fix / and /v1 API uris * Use netifaces constants correctly in testing * Remove dead code around certificate handling * Revert "Fix for ssl context wrapper spam", add warning filter * Fixes Octavia flows that are not reverting * Remove out-dated README file * Default api\_handler to queue\_producer * Shared pools support * Updates the failover flow for active/standby * Fix for ssl context wrapper spam * Fix event\_queue entry in setup.cfg from EventStreamer break * Add SubjectAltName and KeyUsage for localcertgen * Fix misspellings * Use uppercase 'S' in word "OpenStack" * Replace depcrecated Nova networks with Nova interfaces * Add endpoint override conf option for neutron and nova * Fixes Octavia Amphora RHEL based images * Amphora Create now fails when amphora goes into ERROR status * Fix minor typo in warning message * Optimizations for tempest tests * Fixes intermittend udp heartbeat test failure * Add service name conf option for neutron and nova * Let drivers decide to use region or not * Use signing digest defined in config to sign CSR * Updated from global requirements * Make sample web server close connections * Updated from global requirements * Add APP\_COOKIE session persistence type * Change the length of name in the algorithm * Fix typo in migration README.rst * Implementing EventStreamer * Updated from global requirements * Fix a typo in the comment * Updated from global requirements * Disable amphora health monitoring on amp delete * Remove unused logging import * Fix database\_tasks log formating * Make Barbican the default certificate manager * Make pool.lb\_algorithm field larger * Case-sensitive string checking for Enum fields * Fix updating session persistence of a pool in DB * Disable default haproxy process in amphora image * Fix a problem of memebrs not deleted from pool * Add debug testenv in tox * Fixed revert of update LB * Remove OpenSSL from cert\_parser * Replace deprecated library function os.popen() with subprocess * Fix some inconsistency in docstrings * Setup health manager networking for devstack * Updated from global requirements * Fix minor inconsistency in models code * Move docstrings to the first line of the function definition * Assign load\_balancer in \_port\_to\_vip() * Test: make enforce\_type=True in CONF.set\_override * Updated from global requirements * Make Consumer an oslo\_service * Fixes gate failure 'update\_stats' called 0 times * Fix sysctl settings for amphora image * Use keystoneauth to prevent logging sensitive data * Updated from global requirements * Database repository delete to use one() * Updated from global requirements * Make Octavia devstack pass env settings to sudo pip install * Updated from global requirements * Expose project\_id in octavia api * Rename tenant\_id in the network models to project\_id * Updated from global requirements * Optimize Taskflow storage (remove some requires parameter) * Refactor BarbicanAuth to allow for configurable auth method * Allow choice of upstart or sysvinit * Rename tenant\_id to project\_id * Fix db error when running python34 Unit tests * Add missing unit tests for cw/tasks/network\_tasks * Add nopreempt support for active/standby * Add reno for release notes management * Add backport keepalived * Un-pin the amphora-agent version * Amphora Flows and Drivers for Active Standby * Anchor support to Octavia * Updated from global requirements * Add cert tracking and rotating in Housekeeping * Fix api\_server agent test that broke due to a dep change * Updated from global requirements * Unify constants usage in flows tests * Fix a test order issue with oslo\_config fixture * Stop nova driver delete failure on already deleted * Fixes interface\_file jinja misconfiguration * Add unit tests for cmd/house\_keeping * REST Amp Agent: Handle interfaces file too * Updated from global requirements * Add Guru Meditation Report feature * Get rid of unused dependencies in requirements.txt * Get rid of unused dependencies in test-requirements.txt * pyasn1 and pyasn1-modules modules no more needed * Updated from global requirements * Make amphora ssh access configurable * Fix a problem building spare amphora * New local certificate generator uses local time * Use assertIs(Not)None instead of assert(Not)Equal * Updated from global requirements * Removes the use of mutables as default args * Update HACKING.rst to reflect real syntax checks * Fix log message formatting that cause log errors * Consumer start log appears at appropriate time * Update Octavia’s Operator API Docs * chmod +x on local.sh to allow it to be executed by stack.sh * Launch consumer without calling oslo messaging wait * Swap out pyOpenSSL for cryptography in LocalCertGenerator * Use oslo\_config PortOpt type for port options * Set Hacking rules to check some tests assert * Stop checking if listener exists when uploading cert * Updated from global requirements * Change assertTrue(isinstance()) by optimal assert * Use oslo\_config IPOpt support * Revert "Add Pillow to docs-requirements.txt" * Updated from global requirements * Revert "Bypass listener check on cert deploy during create" * Bypass listener check on cert deploy during create * REST Amp Agent: Compare lower case MAC Addresses * Fixes TLS processing in the rest driver * Updated from global requirements * Adds cert\_manager option to octavia.conf * Fix argument order for assertEqual * Use assertTrue instead of assertEqual(True, \*\*\*) * Use assertIn and assertNotIn * Mock module import standardisation * Updated from global requirements * Add environment varible to skip amp image build * Updated from global requirements * Fixing help string for octavia.conf option * Fixes missing testresource dependency * Updated from global requirements * Minor restructuring of documentation tree * Add .eggs/ to .gitignore * Adding init.d script * Change amphora\_id to compute\_id in compute drivers * Unify constants usage in taskflow flows * Improve logging in octavia.controller * Improve logging in octavia.common * Improve logging in octavia.amphorae * Improve logging in octavia.api * Improve logging in octavia.network * Improve logging in octavia.cmd * Improve logging in octavia.certificates * Replace wrong Nova references to Compute ones * Add Pillow to docs-requirements.txt * Update some release related texts * Remove oslo-incubator files * Unify constants usage final instalment * Allow drivers to be overridden for devstack 0.5.2 ----- * Unify constants usage in taskflow flows * Add a missing tox environment "venv" * Get noop drivers to function within flows * Fix a typo in the .coveragerc file * Remove quotes from subshell call in bash script 0.5.1 ----- * Increase the number of network call retries * Update controller logging * Don't build amphora image if one is available * Make keystone verify configurable * Updated from global requirements * Remove SQL GET request from UpdateAmphoraComputeId * Move roadmap to wiki * Fix issues uncovered by neutron lbaas tempest tests * Add health\_manager config options * Correct status\_update\_threads configuration help * Remove bad INFO log "Starting Octavia API server" * Default controller\_ip\_port\_list option to empty * Update the cirros version in devstack samples * Adds the NO\_MONITOR operational status for members * Updated from global requirements * Increase connection\_max\_retries to be 300 * Fix the shebang line and permission of webserver.sh * Implement UDP heartbeat sender and receiver * health manager service * Adding amphora failover flows * Plug vip and networks by port mac address * Updated from global requirements * Switch to OSC for devstack (keystone client deprecated) * Rest API plug network interface handling * Update Active/Standby blueprint * Fix allowed address pairs driver to use region * Adding sni\_containers to Octavia API * Updated from global requirements * Set the Amphora REST driver as devstack default * Removes unused posix-ipc requirement * Fix doctrings typo in delete\_member * Updated from global requirements * Do not remove egress sec group rules on plug vip * Updated from global requirements * Increase the amp active retries to be 500 times * Added support of multi-region environment * Adding the line to delete amphora * Removing unused dependency: discover * Clean NovaAuth.\_nova\_client during test cleanup * Enable automatic sample config generation * Updated from global requirements * Fixes for WSME updates * Updated from global requirements * Correct rabbit messaging config set in devstack * Ensure mocks are stopped during test cleanup * Updated from global requirements * Updated from global requirements * test\_plug\_vip not testing properly in py34 * Fixes missed refactoring of barbican client * Check for port ownership on vip deallocation * Setup base neutron network driver * Updated from global requirements * Add Housekeeping to manage spare amphora and DB cleanup * Correct shebang in create\_certificates.sh * Updating cert\_parser for cert loading * Updated from global requirements * Updating ssh driver with root user check * Updated from global requirements * Correct usage and configuration of CertManager/Generator with Stevedore * Fix a typo in the devstack plugin.sh which caused a truncated error * Fixes REST driver json encoding * Updated from global requirements * Updated from global requirements * Adds the reload on cert change * Adds policy based routing for the amphora * Better parsing for HAProxy stats\_socket data * Updated from global requirements * Updated from global requirements * Fix member creation when retrieving network * Exclude virtual interfaces for collecting stats * Fixing a couple minor terminology errors * Hooking TLS together * Updated from global requirements * Decouple amp driver from network using new models * Adds method to update server.pem to Amphora REST spec * Pluggable controller worker * Adding model changes to support active/standby * Adding methods to network driver to get more info * Blueprint: Active/Standby Amphora Setup using VRRP * Updated from global requirements * Adding vrrp\_port\_id and ha\_port\_id to amphora * Temporarily disable send\_hmac test as it fails intermittently in py34 * Use 2 years for validity period for generated amphora certs * Updated from global requirements * Updated from global requirements * Updated from global requirements * Keepalived supporting amphorae image * Update unit tests for mock changes * Updated from global requirements * Update VIP to subnet id to match neutron-lbaas API * Fix the delete health monitor flow * Install etc/octavia/\* files * Drop use of 'oslo' namespace package * Various py34 fixes * Updated from global requirements * Updated from global requirements * haproxy reference amphora REST API client * Add content for octavia in plugin.sh and settings * Add webserver in octavia/devstack/samples * Adding Python 3.4 Compatibility and Tox Check * Updated from global requirements * Updated from global requirements * Fixes missing admin/user for devstack plugin * Fix the delete member flow * Update version for Liberty * Drop use of 'oslo' namespace package * Update PUT methods to send update through queue * Fix the delete listener flow * Fix the devstack octavia plugin for non ubuntu distributions * Fix the delete pool flow * Do not delete vip port if octavia didn't create it * AAP Driver deletes sec grp rule on listener delete * Updates for repo rename stackforge->openstack * Update .gitreview file for project rename * Fix the delete loadbalancer flow * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Implements the haproxy amphora agent api server * Updated from global requirements * Fix the devstack settings admin account handling * Fixed failing unit tests due to wsme change * Adding sudo permissions to SSH Driver commands * Fixes health monitor create issues * Setup stubs for health manager and housekeeping * Fixes issue with non-unicode ip addresses in wsme * Fixed post\_network\_plug issues * Updating member flow * Updates for calculate delta task * Updates devstack plugin to add service endpoint * Added update\_vip method to network driver * Fixes issue where loadbalancer is None in flow * Fixed a parameter issue for config drive files * Updated from global requirements * Fix a bug in the amphora create revert * Edit the plugin to add alembic information * Fixed various issues with create lb and amphora flow * Fix queue producer handler to send ids * Add devstack plugin for octavia * Updated from global requirements * Fixing inconsistencies and applying sane defaults * Update vip and network plug methods * Fixes load balancer create flow issue * Implemented Queue Consumer * Update nova\_driver to use amp\_network * Remove extra requirements * Implements Octavia Controller Worker * Amphora SSH Driver * Fixes a config load bug for keystone v3 domains * Removes include\_members from jinja\_cfg * Fix Octavia complexity issues * Fix ups so pbr and setup works * Fix "Revises" comment to be consistent with reality * Use stevedore to load API handler * Adds plug VIP and plug Port to spec * Fix the common/keystone.py for identity v3 * Allow id to be added in POSTs for all entities * Updates service and config for Octavia API * Switched from sha265 to sha256 in octavia.conf * Corrected some \_\_init\_\_() parameters * Add compute noop driver and test case * Add network noop driver * Fix arg order for Update functions in Controller-Worker * Added post\_vip\_plug method to the amphora driver * Fixed sessionpersistence and healthmonitor refs * Updating the disk image builder code * Added neutron allowed address pairs network driver * Added vrrp\_ip and ha\_ip to amphora models * Removing a log parameter from the noop driver * Add health check amphora-driver-interface * Implementation for bp/api-to-oslo-messing-handler * Adding post\_network\_plug to the noop amphora driver * Updated from global requirements * Adds rise threshold to configuration generation * Updates Jinja templater with additional optional params * Updated from global requirements * Fix stale pyc issue for unit tests * DRY'ed keystone session creation and retrieval * Sync with oslo-incubator, tweak as needed * Add tox -ecover environment * Create database models for healthmanager * Fixes some inconsistencies * Add the config file for nova compute drive * Modified amphora driver spec to include a new, optional method * Jinja Haproxy templates * Updated from global requirements * change specs for amphora manager interface * Updated from global requirements * Remove the 2.6 classifier * Moving to novaclient v2 * Updated from global requirements * Updated from global requirements * removing unused import on novaclient * Fixes import error in novaclient test * Fix over-zealous oslo incubator namespace fixing * Updated from global requirements * Specification of reference haproxy amphora API * Tweak requirements to be compatible with global-requirements * Octavia Controller Worker specification * Interface specification for housekeeping manager * Queue Consumer Specification * Patches nova client to work with HP Cloud * Update certificate generator implementations * Nova driver implementation * Fixed my alembic version fail * Updates the controller spec to clarify API Manager * Common TLS utilies * Removes flows from the amphora-driver-interface * Adding network driver interface * Change back the amhora driver interface * Spec defining the networking driver interface * Renaming amphora.host\_id to amphora.compute\_id * Creation of Octavia API Documentation * Add nsCertType and ExtendedKey usage extensions to CertGenerator * Removing byte order marker that caused issues * Update TLS Specification * updates the amphora driver interface spec * Octavia Controller specification * Initial amphora status daemon * Add Cert+PK generation to Certificate Interface * Added pymysql as default DBAPI * Implementing simple operator API * Add Amphora base image creation scripts for Octavia * Added versioning and migration mandates * Oslo incubator updates * Remove python 2.6 from tox env list * Nova virtual machine driver spec * Defining interface for compute drivers * Barbican implementation for Certificates * Defining interface for amphora base driver * Local development implementation for Certificates * Remove VM management calls * Support for Certificate data handling * KeystoneAuth global configuration data * TLS Data Security Overview * Initial draft of compute driver spec * Allow .diag file extensions in spec reviews * PEP8 fixes, spelling fixes * Updated the Octavia 0.5 spec * Initial creation of repository classes and tests * Spec for the amphora driver interface * Stop using intersphinx * Adding initial glossary of terms * Initial creation of db models, modules, and tests * Added Operator API spec for Octavia v0.5 * Adding Octavia Amphora base image specification for Octavia v0.5 * Initial migration for database structure * Initial directory skeleton * Octavia v0.5 component design * Hacking fixes in CONSTITUTION, ROADMAP, & HACKING * Documenting project direction and design * Adding a couple files to .gitignore * Fix dependency on googlechart for graphviz * Populate repository with common initial files * Added .gitreview ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/HACKING.rst0000664000175000017500000001641500000000000014777 0ustar00zuulzuul00000000000000.. _octavia-style-commandments: Octavia Style Commandments ========================== This project was ultimately spawned from work done on the Neutron project. As such, we tend to follow Neutron conventions regarding coding style. - We follow the OpenStack Style Commandments: https://docs.openstack.org/hacking/latest Octavia Specific Commandments ----------------------------- - [O316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B). - [O318] Change assert(Not)Equal(A, None) or assert(Not)Equal(None, A) by optimal assert like assertIs(Not)None(A). - [O319] Validate that debug level logs are not translated. - [O321] Validate that jsonutils module is used instead of json - [O322] Don't use author tags - [O323] Change assertEqual(True, A) or assertEqual(False, A) to the more specific assertTrue(A) or assertFalse(A) - [O324] Method's default argument shouldn't be mutable - [O338] Change assertEqual(A in B, True), assertEqual(True, A in B), assertEqual(A in B, False) or assertEqual(False, A in B) to the more specific assertIn/NotIn(A, B) - [O339] LOG.warn() is not allowed. Use LOG.warning() - [O340] Don't use xrange() - [O341] Don't translate logs. - [0342] Exception messages should be translated - [O343] Python 3: do not use basestring. - [O344] Python 3: do not use dict.iteritems. - [O345] Usage of Python eventlet module not allowed - [O346] Don't use backslashes for line continuation. - [O347] Taskflow revert methods must have \*\*kwargs. Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. Everything is python -------------------- Although OpenStack apparently allows either python or C++ code, at this time we don't envision needing anything other than python (and standard, supported open source modules) for anything we intend to do in Octavia. Idempotency ----------- With as much as is going on inside Octavia, its likely that certain messages and commands will be repeatedly processed. It's important that this doesn't break the functionality of the load balancing service. Therefore, as much as possible, algorithms and interfaces should be made as idempotent as possible. Centralize intelligence, de-centralize workload ----------------------------------------------- This means that tasks which need to be done relatively infrequently but require either additional knowledge about the state of other components in the Octavia system, advanced logic behind decisions, or otherwise a high degree of intelligence should be done by centralized components (ex. controllers) within the Octavia system. Examples of this might include: * Generating haproxy configuration files * Managing the lifecycle of Octavia amphorae * Moving a loadbalancer instance from one Octavia amphora to another. On the other hand, tasks done extremely often, or which entail a significant load on the system should be pushed as far out to the most horizontally scalable components as possible. Examples of this might include: * Serving actual client requests to end-users (ie. running haproxy) * Monitoring pool members for failure and sending notifications about this * Processing log files There will often be a balance that needs to be struck between these two design considerations for any given task for which an algorithm needs to be designed. In considering how to strike this balance, always consider the conditions that will be present in a large operator environment. Also, as a secondary benefit of centralizing intelligence, minor feature additions and bugfixes can often be accomplished in a large operator environment without having to touch every Octavia amphora running in said environment. All APIs are versioned ---------------------- This includes "internal" APIs between Octavia components. Experience coding in the Neutron LBaaS project has taught us that in a large project with many heterogeneous parts, throughout the lifecycle of this project, different parts will evolve at different rates. It is important that these components are allowed to do so without hindering or being hindered by parallel development in other components. It is also likely that in very large deployments, there might be tens- or hundreds-of-thousands of individual instances of a given component deployed (most likely, the Octavia amphorae). It is unreasonable to expect a large operator to update all of these components at once. Therefore it is likely that for a significant amount of time during a roll-out of a new version, both the old and new versions of a given component must be able to be controlled or otherwise interfaced with by the new components. Both of the above considerations can be allowed for if we use versioning of APIs where components interact with each other. Octavia must also keep in mind Neutron LBaaS API versions. Octavia must have the ability to support multiple simultaneous Neutron LBaaS API versions in an effort to allow for Neutron LBaaS API deprecation of URIs. The rationale is that Neutron LBaaS API users should have the ability to transition from one version to the next easily. Scalability and resilience are as important as functionality ------------------------------------------------------------ Octavia is meant to be an *operator scale* load balancer. As such, it's usually not enough just to get something working: It also needs to be scalable. For most components, "scalable" implies horizontally scalable. In any large operational environment, resilience to failures is a necessity. Practically speaking, this means that all components of the system that make up Octavia should be monitored in one way or another, and that where possible automatic recovery from the most common kinds of failures should become a standard feature. Where automatic recovery is not an option, then some form of notification about the failure should be implemented. Avoid premature optimization ---------------------------- Understand that being "high performance" is often not the same thing as being "scalable." First get the thing to work in an intelligent way. Only worry about making it fast if speed becomes an issue. Don't repeat yourself --------------------- Octavia strives to follow DRY principles. There should be one source of truth, and repetition of code should be avoided. Security is not an afterthought ------------------------------- The load balancer is often both the most visible public interface to a given user application, but load balancers themselves often have direct access to sensitive components and data within the application environment. Security bugs will happen, but in general we should not approve designs which have known significant security problems, or which could be made more secure by better design. Octavia should follow industry standards ---------------------------------------- By "industry standards" we either mean RFCs or well-established best practices. We are generally not interested in defining new standards if a prior open standard already exists. We should also avoid doing things which directly or indirectly contradict established standards. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/LICENSE0000664000175000017500000002363700000000000014212 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4582167 octavia-6.2.2/PKG-INFO0000664000175000017500000000640300000000000014272 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: octavia Version: 6.2.2 Summary: OpenStack Octavia Scalable Load Balancer as a Service Home-page: https://docs.openstack.org/octavia/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/octavia.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ======= Octavia ======= .. image:: https://img.shields.io/pypi/v/octavia.svg :target: https://pypi.org/project/octavia/ :alt: Latest Version Octavia is an operator-grade open source scalable load balancer for use in large OpenStack deployments. It delivers load balancing services on `amphorae `_ and provides centralized command and control. Octavia is currently the reference backend for Neutron LBaaS. In the near future, Octavia is likely to become the standard OpenStack LBaaS API endpoint. Octavia is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Project resources ~~~~~~~~~~~~~~~~~ Developer documentation for the Octavia project is available at https://docs.openstack.org/octavia/latest/ Release notes for the Octavia project are available at https://docs.openstack.org/releasenotes/octavia/ The project source code repository is located at https://opendev.org/openstack/octavia Project status, bugs, and requests for feature enhancements are tracked on https://storyboard.openstack.org/#!/project/openstack/octavia For more information on project direction and guiding principles for contributors, please see the CONSTITUTION.rst file in this directory, or specifications in the specs/ sub-directory. The project roadmap is available at https://wiki.openstack.org/wiki/Octavia/Roadmap External Resources ~~~~~~~~~~~~~~~~~~ * Octavia Wiki: https://wiki.openstack.org/wiki/Octavia * For help on usage and hacking of Octavia, please send an email to OpenStack-dev Mailing List with **[Octavia]** tag. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/README.rst0000664000175000017500000000371700000000000014671 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/octavia.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ======= Octavia ======= .. image:: https://img.shields.io/pypi/v/octavia.svg :target: https://pypi.org/project/octavia/ :alt: Latest Version Octavia is an operator-grade open source scalable load balancer for use in large OpenStack deployments. It delivers load balancing services on `amphorae `_ and provides centralized command and control. Octavia is currently the reference backend for Neutron LBaaS. In the near future, Octavia is likely to become the standard OpenStack LBaaS API endpoint. Octavia is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Project resources ~~~~~~~~~~~~~~~~~ Developer documentation for the Octavia project is available at https://docs.openstack.org/octavia/latest/ Release notes for the Octavia project are available at https://docs.openstack.org/releasenotes/octavia/ The project source code repository is located at https://opendev.org/openstack/octavia Project status, bugs, and requests for feature enhancements are tracked on https://storyboard.openstack.org/#!/project/openstack/octavia For more information on project direction and guiding principles for contributors, please see the CONSTITUTION.rst file in this directory, or specifications in the specs/ sub-directory. The project roadmap is available at https://wiki.openstack.org/wiki/Octavia/Roadmap External Resources ~~~~~~~~~~~~~~~~~~ * Octavia Wiki: https://wiki.openstack.org/wiki/Octavia * For help on usage and hacking of Octavia, please send an email to OpenStack-dev Mailing List with **[Octavia]** tag. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/TESTING.rst0000664000175000017500000000404500000000000015044 0ustar00zuulzuul00000000000000==================== Testing with Octavia ==================== Unit Testing ------------ Octavia uses tox to manage the virtual environments for running test cases. Install python-tox: .. code-block:: bash $ pip install tox To run the full suite of tests maintained within Octavia. .. code-block:: bash $ tox .. NOTE:: The first time you run ``tox``, it will take additional time to build virtualenvs. You can later use the ``-r`` option with ``tox`` to rebuild your virtualenv in a similar manner. To run tests for one or more specific test environments(for example, the most common configuration of Python 3.7 and PEP-8), list the environments with the ``-e`` option, separated by spaces: .. code-block:: bash $ tox -e py37,pep8 See ``tox -l`` for the full list of available test environments. Functional Testing ------------------ Octavia creates a simulated API and handler for its functional tests. The tests then run requests against the mocked up API. To run the entire suite of functional tests: .. code-block:: bash $ tox -e functional To run a specific functional test: .. code-block:: bash $ tox -e functional octavia.tests.functional.api.v2.test_load_balancer Tests can also be run using partial matching, to run all API tests for v2: .. code-block:: bash $ tox -e functional api.v2 Additional options can be used while running tests. Two useful options that can be used when running tests are ``-- --until-failure`` which will run the tests in a loop until the first failure is hit, and ``-- --failing`` which if used after an initial run will only run the tests that failed in the previous run. Scenario Testing ---------------- Octavia uses Tempest to cover the scenario tests for the project. These tests are run against actual cloud deployments. To run the entire suite of scenario tests: .. code-block:: bash $ tox -e scenario .. NOTE:: The first time running the Tempest scenario tests export the Tempest configuration directory (i.e. TEMPEST_CONFIG_DIR=/opt/stack/tempest/etc) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3182166 octavia-6.2.2/api-ref/0000775000175000017500000000000000000000000014515 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3382165 octavia-6.2.2/api-ref/source/0000775000175000017500000000000000000000000016015 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/conf.py0000664000175000017500000001613700000000000017324 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import openstackdocstheme from octavia.version import version_info extensions = [ 'os_api_ref', 'openstackdocstheme' ] html_theme = 'openstackdocs' html_theme_options = { "sidebar_dropdown": "api_ref", "sidebar_mode": "toc" } repository_name = 'openstack/octavia' bug_project = '908' bug_tag = 'api-ref' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2017-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'octaviadoc' # -- Options for LaTeX output ------------------------------------------------- pdf_theme_path = openstackdocstheme.get_pdf_theme_path('openstackdocs') openstack_logo = openstackdocstheme.get_theme_logo_path('openstackdocs') latex_custom_template = r""" \newcommand{\openstacklogo}{%s} \usepackage{%s} """ % (openstack_logo, pdf_theme_path) # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Octavia.tex', u'OpenStack Octavia API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3382165 octavia-6.2.2/api-ref/source/examples/0000775000175000017500000000000000000000000017633 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/examples/versions-get-resp.json0000664000175000017500000000060700000000000024125 0ustar00zuulzuul00000000000000{ "versions": [{ "status": "SUPPORTED", "updated": "2016-12-11T00:00:00Z", "id": "v2.0", "links": [{ "href": "http://10.21.21.53/load-balancer/v2", "rel": "self" }] }, { "status": "CURRENT", "updated": "2018-04-20T00:00:00Z", "id": "v2.1", "links": [{ "href": "http://10.21.21.53/load-balancer/v2", "rel": "self" }] }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/http-status.yaml0000664000175000017500000000250700000000000021205 0ustar00zuulzuul00000000000000200: default: | Request was successful. 201: default: | Request has been fulfilled and new resource created. 202: default: | Request is accepted, but processing may take some time. 203: default: | Returned information is not full set, but a subset. 204: default: | Request fulfilled but service does not return anything. 300: default: | The resource corresponds to more than one representation. 400: default: | Some content in the request was invalid. 401: default: | Access is denied due to invalid credentials. 403: default: | Policy does not allow current user to do this operation. 404: default: | The requested resource could not be found. 405: default: | Method is not valid for this endpoint and resource. 409: default: | This resource has an action in progress that would conflict with this request. 413: default: | This operation cannot be completed. 415: default: | The entity of the request is in a format not supported by the requested resource for the method. 500: default: | Something went wrong with the service which prevents it from fulfilling the request. 501: default: | The service does not have the functionality required to fulfill this request. 503: default: | The service cannot handle the request right now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/index.rst0000664000175000017500000000414700000000000017664 0ustar00zuulzuul00000000000000:tocdepth: 2 ============= Octavia API ============= This is a reference for the OpenStack Load Balancing API which is provided by the Octavia project. Current API version :doc:`Octavia API v2` Supported API version None .. toctree:: :hidden: v2/index Octavia API minor releases are additive to the API major revision and share the same URL path. Minor revision changes to the API are called out in the API reference in the section the change occurred in. Subsequent minor versions are a superset of the previous versions of the same major revision. The API status reflects the state of the endpoint on the service. * Current indicates a stable version that is up-to-date, recent, and might receive future versions. This endpoint should be prioritized over all others. * Supported is a stable version that is available on the server. However, it is not likely the most recent available and might not be updated or might be deprecated at some time in the future. * Deprecated is a stable version that is still available but is being deprecated and might be removed in the future. * Experimental is not a stable version. This version is under development or contains features that are otherwise subject to change. For more information about API status values and version information, see `Version Discovery `__. .. rest_expand_all:: ------------- API Discovery ------------- List All Versions ======================= .. rest_method:: GET / This fetches all the information about all known API versions in the deployment. Response codes -------------- .. rest_status_code:: success http-status.yaml - 200 .. rest_status_code:: error http-status.yaml - 500 Response -------- .. rest_parameters:: parameters.yaml - id: api_version_id - links: links - status: api_version_status - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/versions-get-resp.json :language: javascript .. note:: This is just an example output and does not represent the current API versions available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/parameters.yaml0000664000175000017500000013003000000000000021041 0ustar00zuulzuul00000000000000############################################################################### # Path fields ############################################################################### path-amphora-id: description: | The ID of the amphora to query. in: path required: true type: uuid path-availability-zone-name: description: | The name of the availability zone to query. in: path required: true type: string path-availability-zone-profile-id: description: | The ID of the availability zone profile to query. in: path required: true type: uuid path-flavor-id: description: | The ID of the flavor to query. in: path required: true type: uuid path-flavorprofile-id: description: | The ID of the flavor profile to query. in: path required: true type: uuid path-healthmonitor-id: description: | The ID of the health monitor to query. in: path required: true type: uuid path-l7policy-id: description: | The ID of the L7 policy to query. in: path required: true type: uuid path-l7rule-id: description: | The ID of the L7 rule to query. in: path required: true type: uuid path-listener-id: description: | The ID of the listener to query. in: path required: true type: uuid path-loadbalancer-id: description: | The ID of the load balancer to query. in: path required: true type: uuid path-member-id: description: | The ID of the member to query. in: path required: true type: uuid path-pool-id: description: | The ID of the pool to query. in: path required: true type: uuid path-project-id: description: | The ID of the project to query. in: path required: true type: string path-provider: description: | The provider to query. in: path required: true type: string ############################################################################### # Query fields ############################################################################### additive-only: description: | If ``true`` no members will be deleted during the batch operation. in: query min_version: 2.11 required: false type: boolean cascade-delete: description: | If ``true`` will delete all child objects of the load balancer. in: query required: false type: boolean fields: description: | The fields that you want the server to return. If no ``fields`` query parameter is specified, the octavia API returns all attributes allowed by the policy settings. By using the ``fields`` parameter, the API returns only the requested set of attributes. The ``fields`` parameter can be specified multiple times. For example, if you specify ``fields=id&fields=name`` in the request URL, only the ``id`` and ``name`` attributes will be returned. in: query required: false type: string project_id_query: description: | The ID of the project to query. in: query required: false type: string ############################################################################### # Body fields ############################################################################### action: description: | The action associated with the resource. in: body required: true type: string active_connections: description: | The currently active connections. in: body required: true type: integer address: description: | The IP address of the resource. in: body required: true type: string address-member: description: | The IP address of the backend member server. in: body required: true type: string admin_state_up: description: | The administrative state of the resource, which is up (``true``) or down (``false``). in: body required: true type: boolean admin_state_up-default-optional: description: | The administrative state of the resource, which is up (``true``) or down (``false``). Default is ``true``. in: body required: false type: boolean admin_state_up-optional: description: | The administrative state of the resource, which is up (``true``) or down (``false``). in: body required: false type: boolean allowed_cidrs: description: | A list of IPv4, IPv6 or mix of both CIDRs. in: body min_version: 2.12 required: true type: array allowed_cidrs-optional: description: | A list of IPv4, IPv6 or mix of both CIDRs. The default is all allowed. When a list of CIDRs is provided, the default switches to deny all. in: body min_version: 2.12 required: false type: array amphora-id: description: | The associated amphora ID. in: body required: true type: uuid amphora-role: description: | The role of the amphora. One of ``STANDALONE``, ``MASTER``, ``BACKUP``. in: body required: true type: string amphora-stats: description: | A list of amphora statistics objects, one per listener. in: body min_version: 2.3 required: true type: array amphora-status: description: | The status of the amphora. One of: ``BOOTING``, ``ALLOCATED``, ``READY``, ``PENDING_CREATE``, ``PENDING_DELETE``, ``DELETED``, ``ERROR``. in: body required: true type: string api_links: description: | Links to the resources in question. in: body required: true type: array api_version_id: description: | A common name for the version. in: body required: true type: string api_version_status: description: | The status of this API version. This can be one of: - ``CURRENT``: This is the preferred version of the API to use. - ``SUPPORTED``: This is an older, but still supported version of the API. - ``DEPRECATED``: A deprecated version of the API that is slated for removal. in: body required: true type: string availability-zone-capabilities: description: | The provider availability zone capabilities dictonary object. in: body required: true type: object availability-zone-capability-description: description: | The provider availability zone capability description. in: body required: true type: string availability-zone-capability-name: description: | The provider availability zone capability name. in: body required: true type: string availability-zone-data: description: | The JSON string containing the availability zone metadata. in: body required: true type: string availability-zone-data-optional: description: | The JSON string containing the availability zone metadata. in: body required: false type: string availability-zone-name: description: | An availability zone name. in: body required: true type: object availability-zone-name-optional: description: | An availability zone name. in: body required: false type: object availability-zone-profile: description: | An ``availability zone profile`` object. in: body required: true type: object availability-zone-profile-id: description: | The ID of the availability zone profile. in: body required: true type: uuid availability-zone-profiles: description: | A list of ``availability zone profile`` objects. in: body required: true type: array availability-zones: description: | A list of ``availability zone`` objects. in: body required: true type: array availability_zone: description: | An availability zone object. in: body required: true type: object backup: description: | Is the member a backup? Backup members only receive traffic when all non-backup members are down. in: body min_version: 2.1 required: true type: boolean backup-optional: description: | Is the member a backup? Backup members only receive traffic when all non-backup members are down. in: body min_version: 2.1 required: false type: boolean bytes_in: description: | The total bytes received. in: body required: true type: integer bytes_out: description: | The total bytes sent. in: body required: true type: integer ca_tls_container_ref: description: | The reference of the `key manager service `__ secret containing a PEM format CA certificate bundle for ``tls_enabled`` pools. in: body min_version: 2.8 required: true type: string ca_tls_container_ref-optional: description: | The reference of the `key manager service `__ secret containing a PEM format CA certificate bundle for ``tls_enabled`` pools. in: body min_version: 2.8 required: false type: string cached-zone: description: | The availability zone of a compute instance, cached at create time. This is not guaranteed to be current. May be an empty-string if the compute service does not use zones. in: body required: true type: string cert-busy: description: | Whether the certificate is in the process of being replaced. in: body required: true type: string cert-expiration: description: | The date the certificate for the amphora expires. in: body required: true type: string client_authentication: description: | The TLS client authentication mode. One of the options ``NONE``, ``OPTIONAL`` or ``MANDATORY``. in: body min_version: 2.8 required: true type: string client_authentication-optional: description: | The TLS client authentication mode. One of the options ``NONE``, ``OPTIONAL`` or ``MANDATORY``. in: body min_version: 2.8 required: false type: string client_ca_tls_container_ref: description: | The ref of the `key manager service `__ secret containing a PEM format client CA certificate bundle for ``TERMINATED_HTTPS`` listeners. in: body min_version: 2.8 required: true type: string client_ca_tls_container_ref-optional: description: | The ref of the `key manager service `__ secret containing a PEM format client CA certificate bundle for ``TERMINATED_HTTPS`` listeners. in: body min_version: 2.8 required: false type: string client_crl_container_ref: description: | The URI of the `key manager service `__ secret containing a PEM format CA revocation list file for ``TERMINATED_HTTPS`` listeners. in: body min_version: 2.8 required: true type: string client_crl_container_ref-optional: description: | The URI of the `key manager service `__ secret containing a PEM format CA revocation list file for ``TERMINATED_HTTPS`` listeners. in: body min_version: 2.8 required: false type: string compute-flavor: description: | The ID of the compute flavor used for the amphora. in: body min_version: 2.3 required: true type: string compute-id: description: | The ID of the amphora resource in the compute system. in: body required: true type: uuid connection_limit: description: | The maximum number of connections permitted for this listener. Default value is -1 which represents infinite connections or a default value defined by the provider driver. in: body required: true type: integer connection_limit-optional: description: | The maximum number of connections permitted for this listener. Default value is -1 which represents infinite connections or a default value defined by the provider driver. in: body required: false type: integer created_at: description: | The UTC date and timestamp when the resource was created. in: body required: true type: string crl_container_ref: description: | The reference of the `key manager service `__ secret containing a PEM format CA revocation list file for ``tls_enabled`` pools. in: body required: true type: string crl_container_ref-optional: description: | The reference of the `key manager service `__ secret containing a PEM format CA revocation list file for ``tls_enabled`` pools. in: body required: false type: string default_pool_id: description: | The ID of the pool used by the listener if no L7 policies match. The pool has some restrictions. See :ref:`valid_protocol`. in: body required: true type: uuid default_pool_id-optional: description: | The ID of the pool used by the listener if no L7 policies match. The pool has some restrictions. See :ref:`valid_protocol`. in: body required: false type: uuid default_tls_container_ref: description: | The URI of the `key manager service `__ secret containing a PKCS12 format certificate/key bundle for ``TERMINATED_HTTPS`` listeners. DEPRECATED: A secret container of type "certificate" containing the certificate and key for ``TERMINATED_HTTPS`` listeners. in: body required: true type: string default_tls_container_ref-optional: description: | The URI of the `key manager service `__ secret containing a PKCS12 format certificate/key bundle for ``TERMINATED_HTTPS`` listeners. DEPRECATED: A secret container of type "certificate" containing the certificate and key for ``TERMINATED_HTTPS`` listeners. in: body required: false type: string description: description: | A human-readable description for the resource. in: body required: true type: string description-optional: description: | A human-readable description for the resource. in: body required: false type: string enabled: description: | If the resource is available for use. in: body required: true type: boolean enabled-optional: description: | If the resource is available for use. The default is True. in: body required: false type: boolean flavor: description: | A flavor object. in: body required: true type: object flavor-capabilities: description: | The provider flavor capabilities dictonary object. in: body required: true type: object flavor-capability-description: description: | The provider flavor capability description. in: body required: true type: string flavor-capability-name: description: | The provider flavor capability name. in: body required: true type: string flavor-data: description: | The JSON string containing the flavor metadata. in: body required: true type: string flavor-data-optional: description: | The JSON string containing the flavor metadata. in: body required: false type: string flavor-id: description: | The ID of the flavor. in: body required: true type: uuid flavor-id-optional: description: | The ID of the flavor. in: body required: false type: uuid flavor-profile-id: description: | The ID of the flavor profile. in: body required: true type: uuid flavorprofile: description: | A ``flavorprofile`` object. in: body required: true type: object flavorprofiles: description: | A list of ``flavorprofile`` objects. in: body required: true type: array flavors: description: | A list of ``flavor`` objects. in: body required: true type: array healthmonitor-delay: description: | The time, in seconds, between sending probes to members. in: body required: true type: integer healthmonitor-delay-optional: description: | The time, in seconds, between sending probes to members. in: body required: false type: integer healthmonitor-domain_name: description: | The domain name, which be injected into the HTTP Host Header to the backend server for HTTP health check. in: body min_version: 2.10 required: true type: string healthmonitor-domain_name-optional: description: | The domain name, which be injected into the HTTP Host Header to the backend server for HTTP health check. in: body min_version: 2.10 required: false type: string healthmonitor-expected_codes: description: | The list of HTTP status codes expected in response from the member to declare it healthy. Specify one of the following values: - A single value, such as ``200`` - A list, such as ``200, 202`` - A range, such as ``200-204`` in: body required: true type: string healthmonitor-expected_codes-optional: description: | The list of HTTP status codes expected in response from the member to declare it healthy. Specify one of the following values: - A single value, such as ``200`` - A list, such as ``200, 202`` - A range, such as ``200-204`` The default is 200. in: body required: false type: string healthmonitor-http_method: description: | The HTTP method that the health monitor uses for requests. One of ``CONNECT``, ``DELETE``, ``GET``, ``HEAD``, ``OPTIONS``, ``PATCH``, ``POST``, ``PUT``, or ``TRACE``. in: body required: true type: string healthmonitor-http_method-optional: description: | The HTTP method that the health monitor uses for requests. One of ``CONNECT``, ``DELETE``, ``GET``, ``HEAD``, ``OPTIONS``, ``PATCH``, ``POST``, ``PUT``, or ``TRACE``. The default is ``GET``. in: body required: false type: string healthmonitor-http_version: description: | The HTTP version. One of ``1.0`` or ``1.1``. The default is ``1.0``. in: body min_version: 2.10 required: true type: float healthmonitor-http_version-optional: description: | The HTTP version. One of ``1.0`` or ``1.1``. The default is ``1.0``. in: body min_version: 2.10 required: false type: float healthmonitor-id: description: | The associated health monitor ID. in: body required: true type: uuid healthmonitor-max-retries: description: | The number of successful checks before changing the ``operating status`` of the member to ``ONLINE``. A valid value is from ``1`` to ``10``. in: body required: true type: integer healthmonitor-max-retries-down: description: | The number of allowed check failures before changing the ``operating status`` of the member to ``ERROR``. A valid value is from ``1`` to ``10``. in: body required: true type: integer healthmonitor-max-retries-down-optional: description: | The number of allowed check failures before changing the ``operating status`` of the member to ``ERROR``. A valid value is from ``1`` to ``10``. The default is ``3``. in: body required: false type: integer healthmonitor-max-retries-optional: description: | The number of successful checks before changing the ``operating status`` of the member to ``ONLINE``. A valid value is from ``1`` to ``10``. in: body required: false type: integer healthmonitor-status: description: | The associated health monitor status object. in: body required: true type: object healthmonitor-timeout: description: | The maximum time, in seconds, that a monitor waits to connect before it times out. This value must be less than the delay value. in: body required: true type: integer healthmonitor-timeout-optional: description: | The maximum time, in seconds, that a monitor waits to connect before it times out. This value must be less than the delay value. in: body required: false type: integer healthmonitor-type: description: | The type of health monitor. One of ``HTTP``, ``HTTPS``, ``PING``, ``TCP``, ``TLS-HELLO``, or ``UDP-CONNECT``. in: body required: true type: string healthmonitor-url_path: description: | The HTTP URL path of the request sent by the monitor to test the health of a backend member. Must be a string that begins with a forward slash (``/``). in: body required: true type: string healthmonitor-url_path-optional: description: | The HTTP URL path of the request sent by the monitor to test the health of a backend member. Must be a string that begins with a forward slash (``/``). The default URL path is ``/``. in: body required: false type: string id: description: | The ID of the resource. in: body required: true type: uuid image-id: description: | The ID of the glance image used for the amphora. in: body min_version: 2.1 required: true type: uuid insert_headers: description: | A dictionary of optional headers to insert into the request before it is sent to the backend ``member``. See :ref:`header_insertions`. Both keys and values are always specified as strings. in: body required: true type: object insert_headers-optional: description: | A dictionary of optional headers to insert into the request before it is sent to the backend ``member``. See :ref:`header_insertions`. Both keys and values are always specified as strings. in: body required: false type: object l7policies-optional: description: | A list of L7 policy objects. in: body required: false type: array l7policies-status-object-list: description: | A list of L7 policy status objects. in: body required: true type: array l7policy-action: description: | The L7 policy action. One of ``REDIRECT_PREFIX``, ``REDIRECT_TO_POOL``, ``REDIRECT_TO_URL``, or ``REJECT``. in: body required: true type: string l7policy-action-optional: description: | The L7 policy action. One of ``REDIRECT_PREFIX``, ``REDIRECT_TO_POOL``, ``REDIRECT_TO_URL``, or ``REJECT``. in: body required: false type: string l7policy-id: description: | The ID of the L7 policy. in: body required: true type: uuid l7policy-ids: description: | A list of L7 policy IDs. in: body required: true type: array l7policy-position: description: | The position of this policy on the listener. Positions start at 1. in: body required: true type: integer l7policy-position-optional: description: | The position of this policy on the listener. Positions start at 1. in: body required: false type: integer l7policy-redirect-http-code: description: | Requests matching this policy will be redirected to the specified URL or Prefix URL with the HTTP response code. Valid if ``action`` is ``REDIRECT_TO_URL`` or ``REDIRECT_PREFIX``. Valid options are: 301, 302, 303, 307, or 308. Default is 302. in: body min_version: 2.9 required: true type: integer l7policy-redirect-http-code-optional: description: | Requests matching this policy will be redirected to the specified URL or Prefix URL with the HTTP response code. Valid if ``action`` is ``REDIRECT_TO_URL`` or ``REDIRECT_PREFIX``. Valid options are: 301, 302, 303, 307, or 308. Default is 302. in: body min_version: 2.9 required: false type: integer l7policy-redirect-pool_id: description: | Requests matching this policy will be redirected to the pool with this ID. Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some restrictions, See :ref:`valid_protocol`. in: body required: true type: uuid l7policy-redirect-pool_id-optional: description: | Requests matching this policy will be redirected to the pool with this ID. Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some restrictions, See :ref:`valid_protocol`. in: body required: false type: uuid l7policy-redirect-prefix: description: | Requests matching this policy will be redirected to this Prefix URL. Only valid if ``action`` is ``REDIRECT_PREFIX``. in: body required: true type: string l7policy-redirect-prefix-optional: description: | Requests matching this policy will be redirected to this Prefix URL. Only valid if ``action`` is ``REDIRECT_PREFIX``. in: body required: false type: string l7policy-redirect-url: description: | Requests matching this policy will be redirected to this URL. Only valid if ``action`` is ``REDIRECT_TO_URL``. in: body required: true type: string l7policy-redirect-url-optional: description: | Requests matching this policy will be redirected to this URL. Only valid if ``action`` is ``REDIRECT_TO_URL``. in: body required: false type: string l7policy-rule-ids: description: | List of associated L7 rule IDs. in: body required: true type: array l7rule-compare_type: description: | The comparison type for the L7 rule. One of ``CONTAINS``, ``ENDS_WITH``, ``EQUAL_TO``, ``REGEX``, or ``STARTS_WITH``. in: body required: true type: string l7rule-compare_type-optional: description: | The comparison type for the L7 rule. One of ``CONTAINS``, ``ENDS_WITH``, ``EQUAL_TO``, ``REGEX``, or ``STARTS_WITH``. in: body required: false type: string l7rule-id: description: | The ID of the L7 rule. in: body required: true type: uuid l7rule-invert: description: | When ``true`` the logic of the rule is inverted. For example, with invert ``true``, `equal to` would become `not equal to`. in: body required: true type: boolean l7rule-invert-optional: description: | When ``true`` the logic of the rule is inverted. For example, with invert ``true``, `equal to` would become `not equal to`. Default is ``false``. in: body required: false type: boolean l7rule-key: description: | The key to use for the comparison. For example, the name of the cookie to evaluate. in: body required: true type: string l7rule-key-optional: description: | The key to use for the comparison. For example, the name of the cookie to evaluate. in: body required: false type: string l7rule-type: description: | The L7 rule type. One of ``COOKIE``, ``FILE_TYPE``, ``HEADER``, ``HOST_NAME``, ``PATH``, ``SSL_CONN_HAS_CERT``, ``SSL_VERIFY_RESULT``, or ``SSL_DN_FIELD``. in: body required: true type: string l7rule-type-optional: description: | The L7 rule type. One of ``COOKIE``, ``FILE_TYPE``, ``HEADER``, ``HOST_NAME``, ``PATH``, ``SSL_CONN_HAS_CERT``, ``SSL_VERIFY_RESULT``, or ``SSL_DN_FIELD``. in: body required: false type: string l7rule-value: description: | The value to use for the comparison. For example, the file type to compare. in: body required: true type: string l7rule-value-optional: description: | The value to use for the comparison. For example, the file type to compare. in: body required: false type: string l7rules-status-object-list: description: | A list of L7 rule status objects. in: body required: true type: array lb-algorithm: description: | The load balancing algorithm for the pool. One of ``LEAST_CONNECTIONS``, ``ROUND_ROBIN``, ``SOURCE_IP``, or ``SOURCE_IP_PORT``. in: body required: true type: string lb-algorithm-optional: description: | The load balancing algorithm for the pool. One of ``LEAST_CONNECTIONS``, ``ROUND_ROBIN``, or ``SOURCE_IP``. in: body required: false type: string lb-network-ip: description: | The management IP of the amphora. in: body required: true type: string links: description: | A list of relative links. Includes the self link for the API. in: body required: true type: array listener: description: | A listener object. in: body required: true type: object listener-id: description: | The ID of the listener. in: body required: true type: uuid listener-id-pool-optional: description: | The ID of the listener for the pool. Either ``listener_id`` or ``loadbalancer_id`` must be specified. The listener has some restrictions, See :ref:`valid_protocol`. in: body required: false type: uuid listener-ids: description: | A list of listener IDs. in: body required: true type: array listeners: description: | The associated listener IDs, if any. in: body required: true type: array listeners-optional: description: | The associated listener IDs, if any. in: body required: false type: array listeners-status-object-list: description: | A list of listener status objects. in: body required: true type: array loadbalancer: description: | A load balancer object. in: body required: true type: object loadbalancer-id: description: | The ID of the load balancer. in: body required: true type: uuid loadbalancer-id-pool-optional: description: | The ID of the load balancer for the pool. Either ``listener_id`` or ``loadbalancer_id`` must be specified. in: body required: false type: uuid loadbalancer-ids: description: | A list of load balancer IDs. in: body required: true type: array loadbalancer-status: description: | A load balancer status object. in: body required: true type: object loadbalancers: description: | A list of ``loadbalancer`` objects. in: body required: true type: array member-id: description: | The ID of the member. in: body required: true type: uuid member-ids: description: | A list of member IDs. in: body required: true type: array members-status-object-list: description: | A list of members status objects. in: body required: true type: array monitor_address: description: | An alternate IP address used for health monitoring a backend member. Default is ``null`` which monitors the member ``address``. in: body required: true type: string monitor_address-optional: description: | An alternate IP address used for health monitoring a backend member. Default is ``null`` which monitors the member ``address``. in: body required: false type: string monitor_port: description: | An alternate protocol port used for health monitoring a backend member. Default is ``null`` which monitors the member ``protocol_port``. in: body required: true type: integer monitor_port-optional: description: | An alternate protocol port used for health monitoring a backend member. Default is ``null`` which monitors the member ``protocol_port``. in: body required: false type: integer name: description: | Human-readable name of the resource. in: body required: true type: string name-optional: description: | Human-readable name of the resource. in: body required: false type: string operating_status: description: | The operating status of the resource. See :ref:`op_status`. in: body required: true type: string pool-id: description: | The ID of the pool. in: body required: true type: uuid pool-optional: description: | A pool object. in: body required: false type: object pools-status-list: description: | The list of pools status objects. in: body required: true type: array pools_ids: description: | The associated pool IDs, if any. in: body required: true type: array project_id: description: | The ID of the project owning this resource. in: body required: true type: string project_id-optional: description: | The ID of the project owning this resource. in: body required: false type: string project_id-optional-deprecated: description: | The ID of the project owning this resource. (deprecated) in: body required: false type: string protocol: description: | The protocol for the resource. One of ``HTTP``, ``HTTPS``, ``TCP``, ``TERMINATED_HTTPS``, or ``UDP``. in: body required: true type: string protocol-pools: description: | The protocol for the resource. One of ``HTTP``, ``HTTPS``, ``PROXY``, ``TCP``, or ``UDP``. in: body required: true type: string protocol_port: description: | The protocol port number for the resource. in: body required: true type: integer protocol_port-member: description: | The protocol port number the backend member server is listening on. in: body required: true type: integer provider: description: | Provider name for the load balancer. in: body required: true type: string provider-description: description: | Provider description. in: body required: true type: string provider-name: description: | Provider name. in: body required: true type: string provider-name-optional: description: | Provider name. in: body required: false type: string provider-optional: description: | Provider name for the load balancer. Default is ``octavia``. in: body required: false type: string provisioning_status: description: | The provisioning status of the resource. See :ref:`prov_status`. in: body required: true type: string quota-health_monitor: description: | The configured health monitor quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-health_monitor-optional: description: | The configured health monitor quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: false type: integer quota-listener: description: | The configured listener quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-listener-optional: description: | The configured listener quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-load_balancer: description: | The configured load balancer quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-load_balancer-optional: description: | The configured load balancer quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-member: description: | The configured member quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-member-optional: description: | The configured member quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-pool: description: | The configured pool quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer quota-pool-optional: description: | The configured pool quota limit. A setting of ``null`` means it is using the deployment default quota. A setting of ``-1`` means unlimited. in: body required: true type: integer request_errors: description: | The total requests that were unable to be fulfilled. in: body required: true type: integer session_persistence: description: | A JSON object specifying the session persistence for the pool or ``null`` for no session persistence. See :ref:`session_persistence`. Default is ``null``. in: body required: true type: object session_persistence-optional: description: | A JSON object specifying the session persistence for the pool or ``null`` for no session persistence. See :ref:`session_persistence`. Default is ``null``. in: body required: false type: object session_persistence_cookie: description: | The name of the cookie to use for session persistence. Only applicable to the ``APP_COOKIE`` session persistence type where it is required. in: body required: false type: string session_persistence_granularity: description: | The netmask used to determine UDP session persistence. Currently only valid for UDP pools with session persistence of SOURCE_IP. Default netmask is 255.255.255.255, meaning per client full IP. in: body min_version: 2.2 required: false type: string session_persistence_timeout: description: | The timeout, in seconds, after which a UDP flow may be rescheduled to a different member. Currently only applies to UDP pools with session persistence of SOURCE_IP. Default is 360. in: body min_version: 2.2 required: false type: integer session_persistence_type: description: | Session persistence type for the pool. One of ``APP_COOKIE``, ``HTTP_COOKIE``, or ``SOURCE_IP``. in: body required: true type: string sni_container_refs: description: | A list of URIs to the `key manager service `__ secrets containing PKCS12 format certificate/key bundles for ``TERMINATED_HTTPS`` listeners. (DEPRECATED) Secret containers of type "certificate" containing the certificates and keys for ``TERMINATED_HTTPS`` listeners. in: body required: true type: array sni_container_refs-optional: description: | A list of URIs to the `key manager service `__ secrets containing PKCS12 format certificate/key bundles for ``TERMINATED_HTTPS`` listeners. (DEPRECATED) Secret containers of type "certificate" containing the certificates and keys for ``TERMINATED_HTTPS`` listeners. in: body required: false type: array stats: description: | A statistics object. in: body required: true type: object statuses: description: | The status tree of a load balancer object contains all provisioning and operating statuses for its children. in: body required: true type: object subnet_id: description: | The subnet ID the member service is accessible from. in: body required: true type: uuid subnet_id-optional: description: | The subnet ID the member service is accessible from. in: body required: false type: uuid tags: description: | A list of simple strings assigned to the resource. in: body min_version: 2.5 required: true type: list tags-optional: description: | A list of simple strings assigned to the resource. in: body min_version: 2.5 required: false type: list timeout_client_data: description: | Frontend client inactivity timeout in milliseconds. Default: 50000. in: body min_version: 2.1 required: true type: integer timeout_client_data-optional: description: | Frontend client inactivity timeout in milliseconds. Default: 50000. in: body min_version: 2.1 required: false type: integer timeout_member_connect: description: | Backend member connection timeout in milliseconds. Default: 5000. in: body min_version: 2.1 required: true type: integer timeout_member_connect-optional: description: | Backend member connection timeout in milliseconds. Default: 5000. in: body min_version: 2.1 required: false type: integer timeout_member_data: description: | Backend member inactivity timeout in milliseconds. Default: 50000. in: body min_version: 2.1 required: true type: integer timeout_member_data-optional: description: | Backend member inactivity timeout in milliseconds. Default: 50000. in: body min_version: 2.1 required: false type: integer timeout_tcp_inspect: description: | Time, in milliseconds, to wait for additional TCP packets for content inspection. Default: 0. in: body min_version: 2.1 required: true type: integer timeout_tcp_inspect-optional: description: | Time, in milliseconds, to wait for additional TCP packets for content inspection. Default: 0. in: body min_version: 2.1 required: false type: integer tls_ciphers: description: | List of ciphers in OpenSSL format (colon-separated). See https://www.openssl.org/docs/man1.1.1/man1/ciphers.html in: body min_version: 2.15 required: true type: string tls_ciphers-optional: description: | List of ciphers in OpenSSL format (colon-separated). See https://www.openssl.org/docs/man1.1.1/man1/ciphers.html in: body min_version: 2.15 required: false type: string tls_container_ref: description: | The reference to the `key manager service `__ secret containing a PKCS12 format certificate/key bundle for ``tls_enabled`` pools for TLS client authentication to the member servers. in: body min_version: 2.8 required: true type: string tls_container_ref-optional: description: | The reference to the `key manager service `__ secret containing a PKCS12 format certificate/key bundle for ``tls_enabled`` pools for TLS client authentication to the member servers. in: body min_version: 2.8 required: false type: string tls_enabled: description: | When ``true`` connections to backend member servers will use TLS encryption. Default is ``false``. in: body min_version: 2.8 required: true type: boolean tls_enabled-optional: description: | When ``true`` connections to backend member servers will use TLS encryption. Default is ``false``. in: body min_version: 2.8 required: false type: boolean total_connections: description: | The total connections handled. in: body required: true type: integer type: description: | The type associated with the resource. in: body required: true type: string updated_at: description: | The UTC date and timestamp when the resource was last updated. in: body required: true type: string vip_address: description: | The IP address of the Virtual IP (VIP). in: body required: true type: string vip_address-optional: description: | The IP address of the Virtual IP (VIP). in: body required: false type: string vip_network_id: description: | The ID of the network for the Virtual IP (VIP). in: body required: true type: uuid vip_network_id-optional: description: | The ID of the network for the Virtual IP (VIP). One of ``vip_network_id``, ``vip_port_id``, or ``vip_subnet_id`` must be specified. in: body required: false type: uuid vip_port_id: description: | The ID of the Virtual IP (VIP) port. in: body required: true type: uuid vip_port_id-optional: description: | The ID of the Virtual IP (VIP) port. One of ``vip_network_id``, ``vip_port_id``, or ``vip_subnet_id`` must be specified. in: body required: false type: uuid vip_qos_policy_id: description: | The ID of the QoS Policy which will apply to the Virtual IP (VIP). in: body required: true type: uuid vip_qos_policy_id-optional: description: | The ID of the QoS Policy which will apply to the Virtual IP (VIP). in: body required: false type: uuid vip_subnet_id: description: | The ID of the subnet for the Virtual IP (VIP). in: body required: true type: uuid vip_subnet_id-optional: description: | The ID of the subnet for the Virtual IP (VIP). One of ``vip_network_id``, ``vip_port_id``, or ``vip_subnet_id`` must be specified. in: body required: false type: uuid vrrp-id: description: | The vrrp group's ID for the amphora. in: body required: true type: string vrrp-interface: description: | The bound interface name of the vrrp port on the amphora. in: body required: true type: string vrrp-ip: description: | The address of the vrrp port on the amphora. in: body required: true type: string vrrp-port-id: description: | The vrrp port's ID in the networking system. in: body required: true type: uuid vrrp-priority: description: | The priority of the amphora in the vrrp group. in: body required: true type: string weight: description: | The weight of a member determines the portion of requests or connections it services compared to the other members of the pool. For example, a member with a weight of 10 receives five times as many requests as a member with a weight of 2. A value of 0 means the member does not receive new connections but continues to service existing connections. A valid value is from ``0`` to ``256``. Default is ``1``. in: body required: true type: integer weight-optional: description: | The weight of a member determines the portion of requests or connections it services compared to the other members of the pool. For example, a member with a weight of 10 receives five times as many requests as a member with a weight of 2. A value of 0 means the member does not receive new connections but continues to service existing connections. A valid value is from ``0`` to ``256``. Default is ``1``. in: body required: false type: integer ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3382165 octavia-6.2.2/api-ref/source/v2/0000775000175000017500000000000000000000000016344 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/amphora.inc0000664000175000017500000001416400000000000020474 0ustar00zuulzuul00000000000000.. -*- rst -*- List Amphora ============ .. rest_method:: GET /v2/octavia/amphorae Lists all amphora for the project. If you are not an administrative user, the service returns the HTTP ``Forbidden (403)`` response code. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. The list might be empty. .. NOTE:: The field `cached_zone` should be used for quick filtering and reference only, as it may out of date. If an up-to-date zone is vital, we recommend retrieving details directly from the compute service. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields Curl Example ------------ .. literalinclude:: examples/amphora-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - id: amphora-id - loadbalancer_id: loadbalancer-id - compute_id: compute-id - lb_network_ip: lb-network-ip - vrrp_ip: vrrp-ip - ha_ip: vip_address - vrrp_port_id: vrrp-port-id - ha_port_id: vip_port_id - cert_expiration: cert-expiration - cert_busy: cert-busy - role: amphora-role - status: amphora-status - vrrp_interface: vrrp-interface - vrrp_id: vrrp-id - vrrp_priority: vrrp-priority - cached_zone: cached-zone - created_at: created_at - updated_at: updated_at - image_id: image-id - compute_flavor: compute-flavor Response Example ---------------- .. literalinclude:: examples/amphora-list-response.json :language: javascript Show Amphora details ==================== .. rest_method:: GET /v2/octavia/amphorae/{amphora_id} Shows the details of an amphora. If you are not an administrative user, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. NOTE:: The field `cached_zone` should be used for quick filtering and reference only, as it may out of date. If an up-to-date zone is vital, we recommend retrieving details directly from the compute service. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - amphora_id: path-amphora-id Curl Example ------------ .. literalinclude:: examples/amphora-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - id: amphora-id - loadbalancer_id: loadbalancer-id - compute_id: compute-id - lb_network_ip: lb-network-ip - vrrp_ip: vrrp-ip - ha_ip: vip_address - vrrp_port_id: vrrp-port-id - ha_port_id: vip_port_id - cert_expiration: cert-expiration - cert_busy: cert-busy - role: amphora-role - status: amphora-status - vrrp_interface: vrrp-interface - vrrp_id: vrrp-id - vrrp_priority: vrrp-priority - cached_zone: cached-zone - created_at: created_at - updated_at: updated_at - image_id: image-id - compute_flavor: compute-flavor Response Example ---------------- .. literalinclude:: examples/amphora-show-response.json :language: javascript Show Amphora Statistics ======================= .. rest_method:: GET /v2/octavia/amphorae/{amphora_id}/stats Show the statistics for an amphora. If you are not an administrative user, the service returns the HTTP ``Forbidden (403)`` response code. Use the ``fields`` query parameter to control which fields are returned in the response body. **New in version 2.3** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - amphora_id: path-amphora-id - fields: fields Curl Example ------------ .. literalinclude:: examples/amphora-show-stats-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - active_connections: active_connections - amphora_stats: amphora-stats - bytes_in: bytes_in - bytes_out: bytes_out - id: amphora-id - listener_id: listener-id - loadbalancer_id: loadbalancer-id - request_errors: request_errors - total_connections: total_connections Response Example ---------------- .. literalinclude:: examples/amphora-show-stats-response.json :language: javascript Configure Amphora ================= .. rest_method:: PUT /v2/octavia/amphorae/{amphora_id}/config Update the amphora agent configuration. This will push the new configuration to the amphora agent and will update the configuration options that are mutatable. If you are not an administrative user, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. **New in version 2.7** .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - amphora_id: path-amphora-id Curl Example ------------ .. literalinclude:: examples/amphora-config-curl :language: bash Response -------- There is no body content for the response of a successful PUT request. Failover Amphora ================ .. rest_method:: PUT /v2/octavia/amphorae/{amphora_id}/failover Force an amphora to failover. If you are not an administrative user, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - amphora_id: path-amphora-id Curl Example ------------ .. literalinclude:: examples/amphora-failover-curl :language: bash Response -------- There is no body content for the response of a successful PUT request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/availabilityzone.inc0000664000175000017500000001375200000000000022415 0ustar00zuulzuul00000000000000.. -*- rst -*- List Availability Zones ======================= .. rest_method:: GET /v2.0/lbaas/availabilityzones List all available availability zones. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. The list might be empty. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields Curl Example ------------ .. literalinclude:: examples/availabilityzone-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - availability_zone_profile_id: availability-zone-profile-id - availability_zones: availability-zones - name: name Response Example ---------------- .. literalinclude:: examples/availabilityzone-list-response.json :language: javascript Create Availability Zone ======================== .. rest_method:: POST /v2.0/lbaas/availabilityzones Creates an availability zone. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - description: description-optional - enabled: enabled-optional - availability_zone: availability_zone - availability_zone_profile_id: availability-zone-profile-id - name: name Request Example --------------- .. literalinclude:: examples/availabilityzone-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/availabilityzone-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - availability_zone_profile_id: availability-zone-profile-id - availability_zone: availability_zone - name: name Response Example ---------------- .. literalinclude:: examples/availabilityzone-create-response.json :language: javascript Show Availability Zone Details ============================== .. rest_method:: GET /v2.0/lbaas/availabilityzones/{availability_zone_name} Shows the details of an availability zone. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. This operation does not require a request body. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - availability_zone_name: path-availability-zone-name Curl Example ------------ .. literalinclude:: examples/availabilityzone-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - availability_zone_profile_id: availability-zone-profile-id - availability_zone: availability_zone - name: name Response Example ---------------- .. literalinclude:: examples/availabilityzone-show-response.json :language: javascript Update an Availability Zone =========================== .. rest_method:: PUT /v2.0/lbaas/availabilityzones/{availability_zone_name} Update an availability zone. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - description: description-optional - enabled: enabled-optional - availability_zone: availability_zone - availability_zone_name: path-availability-zone-name Request Example --------------- .. literalinclude:: examples/availabilityzone-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/availabilityzone-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - availability_zone_profile_id: availability-zone-profile-id - availability_zone: availability_zone - name: name Response Example ---------------- .. literalinclude:: examples/availabilityzone-update-response.json :language: javascript Remove an Availability Zone =========================== .. rest_method:: DELETE /v2.0/lbaas/availabilityzones/{availability_zone_name} Remove an availability zone and its associated configuration. If any load balancers are using this availability zone the service returns the HTTP ``Conflict (409)`` response code. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - availability_zone_name: path-availability-zone-name Curl Example ------------ .. literalinclude:: examples/availabilityzone-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/availabilityzoneprofile.inc0000664000175000017500000001516400000000000023775 0ustar00zuulzuul00000000000000.. -*- rst -*- List Availability Zone Profiles =============================== .. rest_method:: GET /v2.0/lbaas/availabilityzoneprofiles List all available Availability Zone Profiles. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. The list might be empty. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields Curl Example ------------ .. literalinclude:: examples/availabilityzoneprofile-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - availability_zone_data: availability-zone-data - availability_zone_profiles: availability-zone-profiles - id: availability-zone-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/availabilityzoneprofile-list-response.json :language: javascript Create Availability Zone Profile ================================ .. rest_method:: POST /v2.0/lbaas/availabilityzoneprofiles Creates a Availability Zone Profile. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - availability_zone_data: availability-zone-data - availability_zone_profile: availability-zone-profile - name: name - provider_name: provider-name Request Example --------------- .. literalinclude:: examples/availabilityzoneprofile-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/availabilityzoneprofile-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - availability_zone_data: availability-zone-data - availability_zone_profile: availability-zone-profile - id: availability-zone-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/availabilityzoneprofile-create-response.json :language: javascript Show Availability Zone Profile Details ====================================== .. rest_method:: GET /v2.0/lbaas/availabilityzoneprofiles/{availability_zone_profile_id} Shows the details of a Availability Zone Profile. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - availability_zone_profile_id: path-availability-zone-profile-id Curl Example ------------ .. literalinclude:: examples/availabilityzoneprofile-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - availability_zone_data: availability-zone-data - availability_zone_profile: availability-zone-profile - id: availability-zone-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/availabilityzoneprofile-show-response.json :language: javascript Update a Availability Zone Profile ================================== .. rest_method:: PUT /v2.0/lbaas/availabilityzoneprofiles/{availability_zone_profile_id} Update a Availability Zone Profile. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - availability_zone_data: availability-zone-data-optional - availability_zone_profile: availability-zone-profile - availability_zone_profile_id: path-availability-zone-profile-id - name: name-optional - provider_name: provider-name-optional Request Example --------------- .. literalinclude:: examples/availabilityzoneprofile-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/availabilityzoneprofile-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - availability_zone_data: availability-zone-data - availability_zone_profile: availability-zone-profile - id: availability-zone-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/availabilityzoneprofile-update-response.json :language: javascript Remove a Availability Zone Profile ================================== .. rest_method:: DELETE /v2.0/lbaas/availabilityzoneprofiles/{availability_zone_profile_id} Remove a Availability Zone Profile and its associated configuration. If any availability zone is using this Availability Zone Profile the service returns the HTTP ``Conflict (409)`` response code. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - availability_zone_profile_id: path-availability-zone-profile-id Curl Example ------------ .. literalinclude:: examples/availabilityzoneprofile-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/api-ref/source/v2/examples/0000775000175000017500000000000000000000000020162 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-config-curl0000664000175000017500000000020100000000000023733 0ustar00zuulzuul00000000000000curl -X PUT -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/6bd55cd3-802e-447e-a518-1e74e23bb106/config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-failover-curl0000664000175000017500000000020300000000000024277 0ustar00zuulzuul00000000000000curl -X PUT -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/6bd55cd3-802e-447e-a518-1e74e23bb106/failover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-list-curl0000664000175000017500000000021200000000000023443 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae?loadbalancer_id=09eedfc6-2c55-41a8-a75c-2cd4e95212ca ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-list-response.json0000664000175000017500000000370700000000000025320 0ustar00zuulzuul00000000000000{ "amphorae": [ { "id": "6bd55cd3-802e-447e-a518-1e74e23bb106", "load_balancer_id": "09eedfc6-2c55-41a8-a75c-2cd4e95212ca", "compute_id": "f0f79f90-733d-417a-8d70-cc6be62cd54d", "lb_network_ip": "192.168.1.2", "vrrp_ip": "192.168.1.5", "ha_ip": "192.168.1.10", "vrrp_port_id": "ab2a8add-76a9-44bb-89f8-88430193cc83", "ha_port_id": "19561fd3-5da5-46cc-bdd3-99bbdf7246e6", "cert_expiration": "2019-09-19 00:34:51", "cert_busy": 0, "role": "MASTER", "status": "ALLOCATED", "vrrp_interface": "eth1", "vrrp_id": 1, "vrrp_priority": 100, "cached_zone": "zone1", "created_at": "2017-05-10T18:14:44", "updated_at": "2017-05-10T23:08:12", "image_id": "c1c2ad6f-1c1e-4744-8d1a-d0ef36289e74", "compute_flavor": "5446a14a-abec-4455-bc0e-a34e5ff001a3" }, { "id": "89c186a3-cb16-497b-b099-c4bd40316642", "load_balancer_id": "09eedfc6-2c55-41a8-a75c-2cd4e95212ca", "compute_id": "24b1cb54-122d-4960-9035-083642f5c2bb", "lb_network_ip": "192.168.1.3", "vrrp_ip": "192.168.1.6", "ha_ip": "192.168.1.10", "vrrp_port_id": "cae421f6-dcf0-4866-9438-d0c682645799", "ha_port_id": "19561fd3-5da5-46cc-bdd3-99bbdf7246e6", "cert_expiration": "2019-09-19 00:34:51", "cert_busy": 0, "role": "BACKUP", "status": "ALLOCATED", "vrrp_interface": "eth1", "vrrp_id": 1, "vrrp_priority": 200, "cached_zone": "zone2", "created_at": "2017-06-11T19:15:45", "updated_at": "2017-06-11T24:09:13", "image_id": "1014292d-cbaa-4ad6-b38b-2e138389f87f", "compute_flavor": "5446a14a-abec-4455-bc0e-a34e5ff001a3" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-show-curl0000664000175000017500000000017200000000000023455 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/6bd55cd3-802e-447e-a518-1e74e23bb106 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-show-response.json0000664000175000017500000000161400000000000025320 0ustar00zuulzuul00000000000000{ "amphora": { "id": "6bd55cd3-802e-447e-a518-1e74e23bb106", "load_balancer_id": "09eedfc6-2c55-41a8-a75c-2cd4e95212ca", "compute_id": "f0f79f90-733d-417a-8d70-cc6be62cd54d", "lb_network_ip": "192.168.1.2", "vrrp_ip": "192.168.1.5", "ha_ip": "192.168.1.10", "vrrp_port_id": "ab2a8add-76a9-44bb-89f8-88430193cc83", "ha_port_id": "19561fd3-5da5-46cc-bdd3-99bbdf7246e6", "cert_expiration": "2019-09-19 00:34:51", "cert_busy": 0, "role": "MASTER", "status": "ALLOCATED", "vrrp_interface": "eth1", "vrrp_id": 1, "vrrp_priority": 100, "cached_zone": "zone1", "created_at": "2017-05-10T18:14:44", "updated_at": "2017-05-10T23:08:12", "image_id": "c1c2ad6f-1c1e-4744-8d1a-d0ef36289e74", "compute_flavor": "5446a14a-abec-4455-bc0e-a34e5ff001a3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-show-stats-curl0000664000175000017500000000020000000000000024601 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/63d8349e-c4d7-4156-bc94-29260607b04f/stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/amphora-show-stats-response.json0000664000175000017500000000145600000000000026460 0ustar00zuulzuul00000000000000{ "amphora_stats": [ { "active_connections": 48629, "bytes_in": 65671420, "bytes_out": 774771186, "id": "63d8349e-c4d7-4156-bc94-29260607b04f", "listener_id": "bbe44114-cda2-4fe0-b192-d9e24ce661db", "loadbalancer_id": "65b5a7c3-1437-4909-84cf-cec9f7e371ea", "request_errors": 0, "total_connections": 26189172 }, { "active_connections": 0, "bytes_in": 5, "bytes_out": 100, "id": "63d8349e-c4d7-4156-bc94-29260607b04f", "listener_id": "af45a658-4eeb-4ce9-8b7e-16b0e5676f87", "loadbalancer_id": "65b5a7c3-1437-4909-84cf-cec9f7e371ea", "request_errors": 0, "total_connections": 1 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-create-curl0000664000175000017500000000045200000000000025660 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone":{"name":"my_az","description":"My availability zone.","enabled":true,"availability_zone_profile_id":"5712097e-0092-45dc-bff0-ab68b61ad51a"}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzones ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-create-request.json0000664000175000017500000000032600000000000027353 0ustar00zuulzuul00000000000000{ "availability_zone": { "name": "my_az", "description": "My availability zone.", "enabled": true, "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-create-response.json0000664000175000017500000000032600000000000027521 0ustar00zuulzuul00000000000000{ "availability_zone": { "name": "my_az", "description": "My availability zone.", "enabled": true, "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-delete-curl0000664000175000017500000000014700000000000025660 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzones/my_az ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-list-curl0000664000175000017500000000013600000000000025367 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzones ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-list-response.json0000664000175000017500000000037300000000000027233 0ustar00zuulzuul00000000000000{ "availability_zones": [ { "name": "my_az", "description": "My availability zone.", "enabled": true, "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-show-curl0000664000175000017500000000014400000000000025373 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzones/my_az ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-show-response.json0000664000175000017500000000032600000000000027236 0ustar00zuulzuul00000000000000{ "availability_zone": { "name": "my_az", "description": "My availability zone.", "enabled": true, "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-update-curl0000664000175000017500000000033300000000000025675 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone":{"description":"My availability zone.","enabled":false}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzones/my_az ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-update-request.json0000664000175000017500000000015600000000000027373 0ustar00zuulzuul00000000000000{ "availability_zone": { "description": "My availability zone.", "enabled": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzone-update-response.json0000664000175000017500000000032700000000000027541 0ustar00zuulzuul00000000000000{ "availability_zone": { "name": "my_az", "description": "My availability zone.", "enabled": false, "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-create-curl0000664000175000017500000000042100000000000027235 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone_profile":{"name":"some_az","provider_name":"amphora","availability_zone_data":"{\"compute_zone\": \"az1\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-create-request.json0000664000175000017500000000030400000000000030730 0ustar00zuulzuul00000000000000{ "availability_zone_profile": { "name": "some_az", "provider_name": "amphora", "availability_zone_data": "{\"compute_zone\": \"az1\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-create-response.json0000664000175000017500000000037600000000000031107 0ustar00zuulzuul00000000000000{ "availability_zone_profile": { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "some_az", "provider_name": "amphora", "availability_zone_data": "{\"compute_zone\": \"az1\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-delete-curl0000664000175000017500000000021500000000000027235 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-list-curl0000664000175000017500000000014500000000000026750 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-list-response.json0000664000175000017500000000040700000000000030612 0ustar00zuulzuul00000000000000{ "availability_zone_profiles": [ { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "some_az", "provider_name": "amphora", "availability_zone_data": "{\"compute_zone\": \"az1\"}" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-show-curl0000664000175000017500000000021200000000000026750 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-show-response.json0000664000175000017500000000037600000000000030624 0ustar00zuulzuul00000000000000{ "availability_zone_profile": { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "some_az", "provider_name": "amphora", "availability_zone_data": "{\"compute_zone\": \"az1\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-update-curl0000664000175000017500000000046600000000000027265 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone_profile":{"name":"other_az","provider_name":"amphora","availability_zone_data":"{\"compute_zone\": \"az2\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-update-request.json0000664000175000017500000000030500000000000030750 0ustar00zuulzuul00000000000000{ "availability_zone_profile": { "name": "other_az", "provider_name": "amphora", "availability_zone_data": "{\"compute_zone\": \"az2\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/availabilityzoneprofile-update-response.json0000664000175000017500000000037700000000000031127 0ustar00zuulzuul00000000000000{ "availability_zone_profile": { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "other_az", "provider_name": "amphora", "availability_zone_data": "{\"compute_zone\": \"az2\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-create-curl0000664000175000017500000000043600000000000023605 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavor":{"name":"Basic","description":"A basic standalone Octavia load balancer.","enabled":true,"flavor_profile_id":"5712097e-0092-45dc-bff0-ab68b61ad51a"}}' http://198.51.100.10:9876/v2.0/lbaas/flavors ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-create-request.json0000664000175000017500000000032400000000000025274 0ustar00zuulzuul00000000000000{ "flavor": { "name": "Basic", "description": "A basic standalone Octavia load balancer.", "enabled": true, "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-create-response.json0000664000175000017500000000043600000000000025446 0ustar00zuulzuul00000000000000{ "flavor": { "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", "name": "Basic", "description": "A basic standalone Octavia load balancer.", "enabled": true, "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-delete-curl0000664000175000017500000000017400000000000023603 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavors/8f94060c-8d5b-4472-9cfd-e8a2b909481d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-list-curl0000664000175000017500000000012400000000000023307 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavors ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-profile-list-curl0000664000175000017500000000013300000000000024745 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-show-curl0000664000175000017500000000017100000000000023316 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavors/8f94060c-8d5b-4472-9cfd-e8a2b909481d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-show-response.json0000664000175000017500000000043600000000000025163 0ustar00zuulzuul00000000000000{ "flavor": { "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", "name": "Basic", "description": "A basic standalone Octavia load balancer.", "enabled": true, "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-update-curl0000664000175000017500000000041000000000000023614 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavor":{"name":"Basic","description":"A basic standalone Octavia load balancer.","enabled":false}}' http://198.51.100.10:9876/v2.0/lbaas/flavors/8f94060c-8d5b-4472-9cfd-e8a2b909481d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-update-request.json0000664000175000017500000000022000000000000025306 0ustar00zuulzuul00000000000000{ "flavor": { "name": "Basic", "description": "A basic standalone Octavia load balancer.", "enabled": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavor-update-response.json0000664000175000017500000000043700000000000025466 0ustar00zuulzuul00000000000000{ "flavor": { "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", "name": "Basic", "description": "A basic standalone Octavia load balancer.", "enabled": false, "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-create-curl0000664000175000017500000000041600000000000025164 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavorprofile":{"name":"amphora-act-stdby","provider_name":"amphora","flavor_data":"{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-create-request.json0000664000175000017500000000031300000000000026653 0ustar00zuulzuul00000000000000{ "flavorprofile": { "name": "amphora-act-stdby", "provider_name": "amphora", "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-create-response.json0000664000175000017500000000040500000000000027023 0ustar00zuulzuul00000000000000{ "flavorprofile": { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "amphora-act-stdby", "provider_name": "amphora", "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-delete-curl0000664000175000017500000000020300000000000025155 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-show-curl0000664000175000017500000000020000000000000024670 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-show-response.json0000664000175000017500000000040500000000000026540 0ustar00zuulzuul00000000000000{ "flavorprofile": { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "amphora-act-stdby", "provider_name": "amphora", "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-update-curl0000664000175000017500000000045300000000000025204 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavorprofile":{"name":"amphora-standalone","provider_name":"amphora","flavor_data":"{\"loadbalancer_topology\": \"SINGLE\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-update-request.json0000664000175000017500000000030400000000000026672 0ustar00zuulzuul00000000000000{ "flavorprofile": { "name": "amphora-standalone", "provider_name": "amphora", "flavor_data": "{\"loadbalancer_topology\": \"SINGLE\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofile-update-response.json0000664000175000017500000000037600000000000027051 0ustar00zuulzuul00000000000000{ "flavorprofile": { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "amphora-standalone", "provider_name": "amphora", "flavor_data": "{\"loadbalancer_topology\": \"SINGLE\"}" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavorprofiles-list-response.json0000664000175000017500000000041600000000000026720 0ustar00zuulzuul00000000000000{ "flavorprofiles": [ { "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", "name": "amphora-act-stdby", "provider_name": "amphora", "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/flavors-list-response.json0000664000175000017500000000046300000000000025341 0ustar00zuulzuul00000000000000{ "flavors": [ { "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", "name": "Basic", "description": "A basic standalone Octavia load balancer.", "enabled": true, "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-create-curl0000664000175000017500000000071600000000000025172 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"healthmonitor":{"name":"super-pool-health-monitor","admin_state_up":true,"pool_id":"4029d267-3983-4224-a3d0-afb3fe16a2cd","delay":"10","expected_codes":"200","max_retries":"1","http_method":"GET","timeout":"5","url_path":"/","type":"HTTP","max_retries_down":3,"tags":["test_tag"],"http_version":1.1,"domain_name":"testlab.com"}}' http://198.51.100.10:9876/v2/lbaas/healthmonitors ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-create-request.json0000664000175000017500000000074300000000000026665 0ustar00zuulzuul00000000000000{ "healthmonitor": { "name": "super-pool-health-monitor", "admin_state_up": true, "pool_id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", "delay": "10", "expected_codes": "200", "max_retries": "1", "http_method": "GET", "timeout": "5", "url_path": "/", "type": "HTTP", "max_retries_down": 3, "tags": ["test_tag"], "http_version": 1.1, "domain_name": "testlab.com" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-create-response.json0000664000175000017500000000146200000000000027032 0ustar00zuulzuul00000000000000{ "healthmonitor": { "project_id": "e3cd678b11784734bc366148aa37580e", "name": "super-pool-health-monitor", "admin_state_up": true, "pools": [ { "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" } ], "created_at": "2017-05-11T23:53:47", "provisioning_status": "ACTIVE", "updated_at": "2017-05-11T23:53:47", "delay": 10, "expected_codes": "200", "max_retries": 1, "http_method": "GET", "timeout": 5, "max_retries_down": 3, "url_path": "/", "type": "HTTP", "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", "operating_status": "ONLINE", "tags": ["test_tag"], "http_version": 1.1, "domain_name": "testlab.com" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-delete-curl0000664000175000017500000000020100000000000025156 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/healthmonitors/8ed3c5ac-6efa-420c-bedb-99ba14e58db5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-list-curl0000664000175000017500000000020500000000000024673 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/healthmonitors?project_id=e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-show-curl0000664000175000017500000000017600000000000024707 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/healthmonitors/8ed3c5ac-6efa-420c-bedb-99ba14e58db5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-show-response.json0000664000175000017500000000145100000000000026545 0ustar00zuulzuul00000000000000{ "healthmonitor": { "project_id": "e3cd678b11784734bc366148aa37580e", "name": "super-pool-health-monitor", "admin_state_up": true, "pools": [ { "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" } ], "created_at": "2017-05-11T23:53:47", "provisioning_status": "ACTIVE", "updated_at": "2017-05-11T23:53:47", "delay": 10, "expected_codes": "200", "max_retries": 1, "http_method": "GET", "timeout": 5, "max_retries_down": 3, "url_path": "/", "type": "HTTP", "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", "operating_status": "ONLINE", "tags": ["test_tag"], "http_version": 1.0, "domain_name": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-update-curl0000664000175000017500000000064600000000000025213 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"healthmonitor":{"name":"super-pool-health-monitor-updated","admin_state_up":true,"delay":5,"expected_codes":"200","http_method":"HEAD","timeout":2,"url_path":"/index.html","max_retries":2,"max_retries_down":2,"tags":["updated_tag"],"http_version":1.1}}' http://198.51.100.10:9876/v2/lbaas/healthmonitors/8ed3c5ac-6efa-420c-bedb-99ba14e58db5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-update-request.json0000664000175000017500000000057100000000000026703 0ustar00zuulzuul00000000000000{ "healthmonitor": { "name": "super-pool-health-monitor-updated", "admin_state_up": true, "delay": 5, "expected_codes": "200", "http_method": "HEAD", "timeout": 2, "url_path": "/index.html", "max_retries": 2, "max_retries_down": 2, "tags": ["updated_tag"], "http_version": 1.1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitor-update-response.json0000664000175000017500000000150600000000000027050 0ustar00zuulzuul00000000000000{ "healthmonitor": { "project_id": "e3cd678b11784734bc366148aa37580e", "name": "super-pool-health-monitor-updated", "admin_state_up": true, "pools": [ { "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" } ], "created_at": "2017-05-11T23:53:47", "provisioning_status": "PENDING_UPDATE", "updated_at": "2017-05-11T23:53:47", "delay": 5, "expected_codes": "200", "max_retries": 2, "http_method": "HEAD", "timeout": 2, "max_retries_down": 2, "url_path": "/index.html", "type": "HTTP", "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", "operating_status": "ONLINE", "tags": ["updated_tag"], "http_version": 1.1, "domain_name": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/healthmonitors-list-response.json0000664000175000017500000000163600000000000026730 0ustar00zuulzuul00000000000000{ "healthmonitors": [ { "project_id": "e3cd678b11784734bc366148aa37580e", "name": "super-pool-health-monitor", "admin_state_up": true, "pools": [ { "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" } ], "created_at": "2017-05-11T23:53:47", "provisioning_status": "ACTIVE", "updated_at": "2017-05-11T23:53:47", "delay": 10, "expected_codes": "200", "max_retries": 1, "http_method": "GET", "timeout": 5, "max_retries_down": 3, "url_path": "/", "type": "HTTP", "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", "operating_status": "ONLINE", "tags": ["test_tag"], "http_version": 1.0, "domain_name": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/http-header-insertion-obj.json0000664000175000017500000000015000000000000026036 0ustar00zuulzuul00000000000000{ "insert_headers": { "X-Forwarded-For": "true", "X-Forwarded-Port": "true" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policies-list-curl0000664000175000017500000000020100000000000024064 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies?project_id=e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policies-list-response.json0000664000175000017500000000171500000000000025740 0ustar00zuulzuul00000000000000{ "l7policies": [ { "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", "description": "Redirect requests to example.com", "admin_state_up": true, "rules": [ { "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" } ], "created_at": "2017-06-24T23:25:14", "provisioning_status": "ACTIVE", "updated_at": "2017-06-24T23:30:05", "redirect_http_code": 302, "redirect_pool_id": null, "redirect_prefix": null, "redirect_url": "http://www.example.com", "action": "REDIRECT_TO_URL", "position": 1, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", "operating_status": "ONLINE", "name": "redirect-example.com", "tags": ["test_tag"] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-create-curl0000664000175000017500000000064500000000000024060 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"l7policy":{"description":"Redirect requests to example.com","admin_state_up":true,"listener_id":"023f2e34-7806-443b-bfae-16c324569a3d","redirect_http_code":301,"redirect_url":"http://www.example.com","name":"redirect-example.com","action":"REDIRECT_TO_URL","position":1,"tags":["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-create-request.json0000664000175000017500000000061400000000000025547 0ustar00zuulzuul00000000000000{ "l7policy": { "description": "Redirect requests to example.com", "admin_state_up": true, "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", "redirect_url": "http://www.example.com", "redirect_http_code": 301, "name": "redirect-example.com", "action": "REDIRECT_TO_URL", "position": 1, "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-create-response.json0000664000175000017500000000172400000000000025720 0ustar00zuulzuul00000000000000{ "l7policy": [ { "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", "description": "Redirect requests to example.com", "admin_state_up": true, "rules": [ { "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" } ], "created_at": "2017-06-24T23:25:14", "provisioning_status": "PENDING_CREATE", "updated_at": "2017-06-24T23:30:05", "redirect_http_code": 301, "redirect_pool_id": null, "redirect_prefix": null, "redirect_url": "http://www.example.com", "action": "REDIRECT_TO_URL", "position": 1, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", "operating_status": "OFFLINE", "name": "redirect-example.com", "tags": ["test_tag"] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-delete-curl0000664000175000017500000000017500000000000024055 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-show-curl0000664000175000017500000000017200000000000023570 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-show-response.json0000664000175000017500000000170300000000000025432 0ustar00zuulzuul00000000000000{ "l7policy": { "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", "description": "Redirect requests to example.com", "admin_state_up": true, "rules": [ { "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" } ], "created_at": "2017-06-24T23:25:14", "provisioning_status": "ACTIVE", "updated_at": "2017-06-24T23:30:05", "redirect_http_code": 302, "redirect_pool_id": null, "redirect_prefix": null, "redirect_url": "http://www.example.com", "action": "REDIRECT_TO_URL", "position": 1, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", "operating_status": "ONLINE", "name": "redirect-example.com", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-update-curl0000664000175000017500000000065000000000000024073 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"l7policy":{"description":"Redirect requests to images.example.com","admin_state_up":true,"redirect_http_code":301,"redirect_url":"http://images.example.com","name":"redirect-images.example.com","action":"REDIRECT_TO_URL","position":1,"tags":["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-update-request.json0000664000175000017500000000054100000000000025565 0ustar00zuulzuul00000000000000{ "l7policy": { "description": "Redirect requests to images.example.com", "admin_state_up": true, "redirect_http_code": 301, "redirect_url": "http://images.example.com", "name": "redirect-images.example.com", "action": "REDIRECT_TO_URL", "position": 1, "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7policy-update-response.json0000664000175000017500000000171600000000000025740 0ustar00zuulzuul00000000000000{ "l7policy": { "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", "description": "Redirect requests to example.com", "admin_state_up": true, "rules": [ { "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" } ], "created_at": "2017-06-24T23:25:14", "provisioning_status": "PENDING_UPDATE", "updated_at": "2017-06-24T23:30:05", "redirect_http_code": 301, "redirect_pool_id": null, "redirect_prefix": null, "redirect_url": "http://www.example.com", "action": "REDIRECT_TO_URL", "position": 1, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", "operating_status": "ONLINE", "name": "redirect-example.com", "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-create-curl0000664000175000017500000000044600000000000023527 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"rule":{"compare_type":"REGEX","invert":false,"type":"PATH","value":"/images*","admin_state_up":true,"tags":["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-create-request.json0000664000175000017500000000030400000000000025213 0ustar00zuulzuul00000000000000{ "rule": { "compare_type": "REGEX", "invert": false, "type": "PATH", "value": "/images*", "admin_state_up": true, "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-create-response.json0000664000175000017500000000107300000000000025365 0ustar00zuulzuul00000000000000{ "rule": { "created_at": "2017-06-27T15:52:27", "compare_type": "REGEX", "provisioning_status": "PENDING_CREATE", "invert": false, "admin_state_up": true, "updated_at": "2017-06-27T15:52:28", "value": "/images*", "key": null, "project_id": "e3cd678b11784734bc366148aa37580e", "type": "PATH", "id": "16621dbb-a736-4888-a57a-3ecd53df784c", "operating_status": "OFFLINE", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-delete-curl0000664000175000017500000000025000000000000023517 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-show-curl0000664000175000017500000000024500000000000023241 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-show-response.json0000664000175000017500000000106200000000000025100 0ustar00zuulzuul00000000000000{ "rule": { "created_at": "2017-06-27T15:52:27", "compare_type": "REGEX", "provisioning_status": "ACTIVE", "invert": false, "admin_state_up": true, "updated_at": "2017-06-27T15:52:28", "value": "/images*", "key": null, "project_id": "e3cd678b11784734bc366148aa37580e", "type": "PATH", "id": "16621dbb-a736-4888-a57a-3ecd53df784c", "operating_status": "ONLINE", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-update-curl0000664000175000017500000000052400000000000023543 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"rule":{"compare_type":"REGEX","invert":true,"type":"PATH","value":"/images/special*","admin_state_up":true,"tags":["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-update-request.json0000664000175000017500000000031600000000000025235 0ustar00zuulzuul00000000000000{ "rule": { "compare_type": "REGEX", "invert": true, "type": "PATH", "value": "/images/special*", "admin_state_up": true, "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rule-update-response.json0000664000175000017500000000110400000000000025377 0ustar00zuulzuul00000000000000{ "rule": { "created_at": "2017-06-27T15:52:27", "compare_type": "REGEX", "provisioning_status": "PENDING_UPDATE", "invert": true, "admin_state_up": true, "updated_at": "2017-06-27T15:58:28", "value": "/images/special*", "key": null, "project_id": "e3cd678b11784734bc366148aa37580e", "type": "PATH", "id": "16621dbb-a736-4888-a57a-3ecd53df784c", "operating_status": "ONLINE", "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rules-list-curl0000664000175000017500000000020000000000000023406 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/l7rules-list-response.json0000664000175000017500000000107300000000000025260 0ustar00zuulzuul00000000000000{ "rules": [ { "created_at": "2017-06-27T15:52:27", "compare_type": "REGEX", "provisioning_status": "ACTIVE", "invert": false, "admin_state_up": true, "updated_at": "2017-06-27T15:52:28", "value": "/images*", "key": null, "project_id": "e3cd678b11784734bc366148aa37580e", "type": "PATH", "id": "16621dbb-a736-4888-a57a-3ecd53df784c", "operating_status": "ONLINE", "tags": ["test_tag"] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-create-curl0000664000175000017500000000236300000000000024142 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"listener": {"protocol": "TERMINATED_HTTPS", "description": "A great TLS listener", "admin_state_up": true, "connection_limit": 200, "protocol_port": "443", "loadbalancer_id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "name": "great_tls_listener", "insert_headers": {"X-Forwarded-For": "true", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "MANDATORY", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", "allowed_cidrs": ["192.0.2.0/24", "198.51.100.0/24"], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256"}}' http://198.51.100.10:9876/v2/lbaas/listeners ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-create-request.json0000664000175000017500000000260700000000000025636 0ustar00zuulzuul00000000000000{ "listener": { "protocol": "TERMINATED_HTTPS", "description": "A great TLS listener", "admin_state_up": true, "connection_limit": 200, "protocol_port": "443", "loadbalancer_id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "name": "great_tls_listener", "insert_headers": { "X-Forwarded-For": "true", "X-Forwarded-Port": "true" }, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": [ "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" ], "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "MANDATORY", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", "allowed_cidrs": [ "192.0.2.0/24", "198.51.100.0/24" ], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-create-response.json0000664000175000017500000000360100000000000025777 0ustar00zuulzuul00000000000000{ "listener": { "description": "A great TLS listener", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "protocol": "TERMINATED_HTTPS", "protocol_port": 443, "provisioning_status": "PENDING_CREATE", "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "insert_headers": { "X-Forwarded-Port": "true", "X-Forwarded-For": "true" }, "created_at": "2017-02-28T00:42:44", "updated_at": "2017-02-28T00:44:30", "id": "023f2e34-7806-443b-bfae-16c324569a3d", "operating_status": "OFFLINE", "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", "sni_container_refs": [ "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" ], "l7policies": [ { "id": "5e618272-339d-4a80-8d14-dbc093091bb1" } ], "name": "great_tls_listener", "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "MANDATORY", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", "allowed_cidrs": [ "192.0.2.0/24", "198.51.100.0/24" ], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-delete-curl0000664000175000017500000000017400000000000024137 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-show-curl0000664000175000017500000000017100000000000023652 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-show-response.json0000664000175000017500000000357000000000000025521 0ustar00zuulzuul00000000000000{ "listener": { "description": "A great TLS listener", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "protocol": "TERMINATED_HTTPS", "protocol_port": 443, "provisioning_status": "ACTIVE", "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "insert_headers": { "X-Forwarded-Port": "true", "X-Forwarded-For": "true" }, "created_at": "2017-02-28T00:42:44", "updated_at": "2017-02-28T00:44:30", "id": "023f2e34-7806-443b-bfae-16c324569a3d", "operating_status": "ONLINE", "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", "sni_container_refs": [ "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" ], "l7policies": [ { "id": "5e618272-339d-4a80-8d14-dbc093091bb1" } ], "name": "great_tls_listener", "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "MANDATORY", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", "allowed_cidrs": [ "192.0.2.0/24", "198.51.100.0/24" ], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-stats-curl0000664000175000017500000000017700000000000024036 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d/stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-stats-response.json0000664000175000017500000000027700000000000025700 0ustar00zuulzuul00000000000000{ "stats": { "bytes_in": 65671420, "total_connections": 26189172, "active_connections": 48629, "bytes_out": 774771186, "request_errors": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-update-curl0000664000175000017500000000173700000000000024165 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"listener": {"description": "An updated great TLS listener", "admin_state_up": true, "connection_limit": 200, "name": "great_updated_tls_listener", "insert_headers": {"X-Forwarded-For": "false", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 100000, "timeout_member_connect": 1000, "timeout_member_data": 100000, "timeout_tcp_inspect": 5, "tags": ["updated_tag"], "client_ca_tls_container_ref": null, "allowed_cidrs": ["192.0.2.0/24", "198.51.100.0/24"], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256"}}' http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-update-request.json0000664000175000017500000000215200000000000025650 0ustar00zuulzuul00000000000000{ "listener": { "description": "An updated great TLS listener", "admin_state_up": true, "connection_limit": 200, "name": "great_updated_tls_listener", "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", "insert_headers": { "X-Forwarded-For": "false", "X-Forwarded-Port": "true" }, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": [ "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" ], "timeout_client_data": 100000, "timeout_member_connect": 1000, "timeout_member_data": 100000, "timeout_tcp_inspect": 5, "tags": ["updated_tag"], "client_ca_tls_container_ref": null, "allowed_cidrs": [ "192.0.2.0/24", "198.51.100.0/24" ], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listener-update-response.json0000664000175000017500000000337700000000000026030 0ustar00zuulzuul00000000000000{ "listener": { "description": "An updated great TLS listener", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "protocol": "TERMINATED_HTTPS", "protocol_port": 443, "provisioning_status": "PENDING_UPDATE", "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "insert_headers": { "X-Forwarded-Port": "true", "X-Forwarded-For": "false" }, "created_at": "2017-02-28T00:42:44", "updated_at": "2017-02-28T00:44:30", "id": "023f2e34-7806-443b-bfae-16c324569a3d", "operating_status": "OFFLINE", "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", "sni_container_refs": [ "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" ], "l7policies": [ { "id": "5e618272-339d-4a80-8d14-dbc093091bb1" } ], "name": "great_updated_tls_listener", "timeout_client_data": 100000, "timeout_member_connect": 1000, "timeout_member_data": 100000, "timeout_tcp_inspect": 5, "tags": ["updated_tag"], "client_ca_tls_container_ref": null, "client_authentication": "NONE", "client_crl_container_ref": null, "allowed_cidrs": [ "192.0.2.0/24", "198.51.100.0/24" ], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listeners-list-curl0000664000175000017500000000020000000000000024021 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners?project_id=e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/listeners-list-response.json0000664000175000017500000000417200000000000025676 0ustar00zuulzuul00000000000000{ "listeners": [ { "description": "A great TLS listener", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "protocol": "TERMINATED_HTTPS", "protocol_port": 443, "provisioning_status": "ACTIVE", "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "insert_headers": { "X-Forwarded-Port": "true", "X-Forwarded-For": "true" }, "created_at": "2017-02-28T00:42:44", "updated_at": "2017-02-28T00:44:30", "id": "023f2e34-7806-443b-bfae-16c324569a3d", "operating_status": "ONLINE", "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", "sni_container_refs": [ "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" ], "l7policies": [ { "id": "58284ac9-673e-47ff-9dcb-09871a1956c4", "id": "5e618272-339d-4a80-8d14-dbc093091bb1" } ], "name": "great_tls_listener", "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "NONE", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", "allowed_cidrs": [ "192.0.2.0/24", "198.51.100.0/24" ], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-create-curl0000664000175000017500000000107600000000000024724 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"loadbalancer": {"description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "flavor_id": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "provider": "octavia", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": ["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/loadbalancers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-create-request.json0000664000175000017500000000074300000000000026417 0ustar00zuulzuul00000000000000{ "loadbalancer": { "description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "provider": "octavia", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-create-response.json0000664000175000017500000000154500000000000026566 0ustar00zuulzuul00000000000000{ "loadbalancer": { "description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "provisioning_status": "PENDING_CREATE", "flavor_id": "", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", "provider": "octavia", "created_at": "2017-02-28T00:41:44", "updated_at": "2017-02-28T00:43:30", "id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "operating_status": "OFFLINE", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-delete-curl0000664000175000017500000000020000000000000024707 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/4b9b652c-537a-44bf-bbe8-85a690625597 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-failover-curl0000664000175000017500000000020600000000000025262 0ustar00zuulzuul00000000000000curl -X PUT -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/4a13c573-623c-4d23-8a9c-581dc17ceb1f/failover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-full-create-request.json0000664000175000017500000000573600000000000027366 0ustar00zuulzuul00000000000000{ "loadbalancer": { "description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "flavor_id": "", "listeners": [ { "name": "http_listener", "protocol": "HTTP", "protocol_port": 80, "default_pool": { "name": "rr_pool", "protocol": "HTTP", "lb_algorithm": "ROUND_ROBIN", "healthmonitor": { "type": "HTTP", "delay": "3", "expected_codes": "200,201,202", "http_method": "GET", "max_retries": 2, "timeout": 1, "url_path": "/index.html" }, "members": [ { "address": "192.0.2.16", "protocol_port": 80 }, { "address": "192.0.2.19", "protocol_port": 80 } ] } }, { "name": "https_listener", "protocol": "HTTPS", "protocol_port": 443, "default_pool": { "name": "https_pool" }, "tags": ["test_tag"] }, { "name": "redirect_listener", "protocol": "HTTP", "protocol_port": 8080, "l7policies": [ { "action": "REDIRECT_TO_URL", "name": "redirect_policy", "redirect_url": "https://www.example.com/", "admin_state_up": true } ] } ], "pools": [ { "name": "https_pool", "protocol": "HTTPS", "lb_algorithm": "ROUND_ROBIN", "healthmonitor": { "type": "HTTPS", "delay": "3", "max_retries": 2, "timeout": 1 }, "members": [ { "address": "192.0.2.51", "protocol_port": 80 }, { "address": "192.0.2.52", "protocol_port": 80 } ] } ], "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "provider": "octavia", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-full-create-response.json0000664000175000017500000001656200000000000027533 0ustar00zuulzuul00000000000000{ "loadbalancer": { "description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "provisioning_status": "ACTIVE", "flavor_id": "", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "listeners": [ { "l7policies": [], "protocol": "HTTP", "description": "", "default_tls_container_ref": null, "admin_state_up": true, "default_pool": { "id": "c8cec227-410a-4a5b-af13-ecf38c2b0abb" }, "project_id": "e3cd678b11784734bc366148aa37580e", "default_tls_container_id": null, "connection_limit": -1, "sni_container_refs": [], "protocol_port": 80, "id": "a99995c6-4f04-4ed3-a37f-ae58f6e7e5e1", "name": "http_listener" }, { "l7policies": [], "protocol": "HTTPS", "description": "", "default_tls_container_ref": null, "admin_state_up": true, "default_pool": { "id": "b0577aff-c1f9-40c6-9a3b-7b1d2a669136" }, "project_id": "e3cd678b11784734bc366148aa37580e", "default_tls_container_id": null, "connection_limit": -1, "sni_container_refs": [], "protocol_port": 443, "id": "73c6c564-f215-48e9-91d6-f10bb3454954", "name": "https_listener", "tags": ["test_tag"] }, { "l7policies": [ { "description": "", "admin_state_up": true, "rules": [], "project_id": "e3cd678b11784734bc366148aa37580e", "listener_id": "95de30ec-67f4-437b-b3f3-22c5d9ef9828", "redirect_url": "https://www.example.com/", "action": "REDIRECT_TO_URL", "position": 1, "id": "d0553837-f890-4981-b99a-f7cbd6a76577", "name": "redirect_policy" } ], "protocol": "HTTP", "description": "", "default_tls_container_ref": null, "admin_state_up": true, "default_pool": null, "project_id": "e3cd678b11784734bc366148aa37580e", "default_tls_container_id": null, "connection_limit": -1, "sni_container_refs": [], "protocol_port": 8080, "id": "95de30ec-67f4-437b-b3f3-22c5d9ef9828", "name": "redirect_listener" } ], "vip_address": "203.0.113.50", "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", "provider": "octavia", "pools": [ { "lb_algorithm": "ROUND_ROBIN", "protocol": "HTTP", "description": "", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "session_persistence": null, "healthmonitor": { "name": "", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "delay": 3, "expected_codes": "200,201,202", "max_retries": 2, "http_method": "GET", "timeout": 1, "max_retries_down": 3, "url_path": "/index.html", "type": "HTTP", "id": "a8a2aa3f-d099-4752-8265-e6472f8147f9" }, "members": [ { "name": "", "weight": 1, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "project_id": "e3cd678b11784734bc366148aa37580e", "address": "192.0.2.16", "protocol_port": 80, "id": "7d19ad6c-d549-453e-a5cd-05382c6be96a" }, { "name": "", "weight": 1, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "project_id": "e3cd678b11784734bc366148aa37580e", "address": "192.0.2.19", "protocol_port": 80, "id": "a167402b-caa6-41d5-b4d4-bde7f2cbfa5e" } ], "id": "c8cec227-410a-4a5b-af13-ecf38c2b0abb", "name": "rr_pool" }, { "lb_algorithm": "ROUND_ROBIN", "protocol": "HTTPS", "description": "", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "session_persistence": null, "healthmonitor": { "name": "", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "delay": 3, "expected_codes": "200,201,202", "max_retries": 2, "http_method": "GET", "timeout": 1, "max_retries_down": 3, "url_path": "/index.html", "type": "HTTPS", "id": "d5bb7712-26b7-4809-8c14-3b407c0cb00d" }, "members": [ { "name": "", "weight": 1, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "project_id": "e3cd678b11784734bc366148aa37580e", "address": "192.0.2.51", "protocol_port": 80, "id": "f83832d5-1f22-45fa-866a-4abea36e0886" }, { "name": "", "weight": 1, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "project_id": "e3cd678b11784734bc366148aa37580e", "address": "192.0.2.52", "protocol_port": 80, "id": "f83832d5-1f22-45fa-866a-4abea36e0886" } ], "id": "b0577aff-c1f9-40c6-9a3b-7b1d2a669136", "name": "https_pool" } ], "created_at": "2017-02-28T00:41:44", "updated_at": "2017-02-28T00:43:30", "id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "operating_status": "ONLINE", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-show-curl0000664000175000017500000000017500000000000024440 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/8a562351-f0fb-424c-a0af-513461424ea5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-show-response.json0000664000175000017500000000153200000000000026277 0ustar00zuulzuul00000000000000{ "loadbalancer": { "description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "provisioning_status": "PENDING_CREATE", "flavor_id": "", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", "provider": "octavia", "created_at": "2017-02-28T00:41:44", "updated_at": "2017-02-28T00:43:30", "id": "8a562351-f0fb-424c-a0af-513461424ea5", "operating_status": "ONLINE", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": [] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-stats-curl0000664000175000017500000000020300000000000024606 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/4a13c573-623c-4d23-8a9c-581dc17ceb1f/stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-stats-response.json0000664000175000017500000000030100000000000026446 0ustar00zuulzuul00000000000000{ "stats": { "bytes_in": 131342840, "total_connections": 52378345, "active_connections": 97258, "bytes_out": 1549542372, "request_errors": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-status-curl0000664000175000017500000000020400000000000024774 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/bda6f032-80d3-414a-b395-e79c374e3929/status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-status-response.json0000664000175000017500000001152100000000000026641 0ustar00zuulzuul00000000000000{ "statuses": { "loadbalancer": { "name": "excellent_load_balancer", "provisioning_status": "ACTIVE", "listeners": [ { "name": "HTTP_listener", "provisioning_status": "ACTIVE", "pools": [ { "name": "HTTP_pool", "provisioning_status": "ACTIVE", "healthmonitor": { "type": "HTTP", "id": "0b608787-ea2d-48c7-89a1-8b8c24fa3b17", "name": "HTTP_healthmonitor", "provisioning_status": "ACTIVE" }, "members": [ { "name": "", "provisioning_status": "ACTIVE", "address": "192.0.2.20", "protocol_port": 80, "id": "3c6857f4-057a-405a-9134-bdeaa8796c8a", "operating_status": "ERROR" }, { "name": "", "provisioning_status": "ACTIVE", "address": "192.0.2.21", "protocol_port": 80, "id": "f7495909-1706-4c91-83b4-641dab6962ac", "operating_status": "ONLINE" } ], "id": "89a47f78-cf81-480b-ad74-bba4177eeb81", "operating_status": "DEGRADED" } ], "l7policies": [], "id": "78febaf6-1e63-47c6-af5f-7b5e23fd7094", "operating_status": "DEGRADED" }, { "name": "redirect_listener", "provisioning_status": "ACTIVE", "pools": [], "l7policies": [ { "action": "REDIRECT_TO_URL", "rules": [ { "type": "PATH", "id": "27f3007a-a1cb-4e17-9696-0e578d617715", "provisioning_status": "ACTIVE" } ], "id": "2e8f3139-0673-43f9-aae4-c7a9460e3233", "name": "redirect_policy", "provisioning_status": "ACTIVE" } ], "id": "1341fbaf-ad4f-4cfe-a943-ad5e14e664cb", "operating_status": "ONLINE" } ], "pools": [ { "name": "HTTP_pool", "provisioning_status": "ACTIVE", "healthmonitor": { "type": "HTTP", "id": "0b608787-ea2d-48c7-89a1-8b8c24fa3b17", "name": "HTTP_healthmonitor", "provisioning_status": "ACTIVE" }, "members": [ { "name": "", "provisioning_status": "ACTIVE", "address": "192.0.2.20", "protocol_port": 80, "id": "3c6857f4-057a-405a-9134-bdeaa8796c8a", "operating_status": "ERROR" }, { "name": "", "provisioning_status": "ACTIVE", "address": "192.0.2.21", "protocol_port": 80, "id": "f7495909-1706-4c91-83b4-641dab6962ac", "operating_status": "ONLINE" } ], "id": "89a47f78-cf81-480b-ad74-bba4177eeb81", "operating_status": "DEGRADED" }, { "name": "source_ip_pool", "provisioning_status": "ACTIVE", "healthmonitor": {}, "members": [], "id": "8189d6a9-646e-4d23-b742-548dab991951", "operating_status": "ONLINE" } ], "id": "84faceee-cb97-48d0-93df-9e41d40d4cb4", "operating_status": "DEGRADED" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-update-curl0000664000175000017500000000057700000000000024750 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"loadbalancer": {"description": "Temporarily disabled load balancer", "admin_state_up": false, "name": "disabled_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "tags": ["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/loadbalancers/8b6fc468-07d5-4d8b-a0b9-695060e72c31 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-update-request.json0000664000175000017500000000041500000000000026432 0ustar00zuulzuul00000000000000{ "loadbalancer": { "description": "Temporarily disabled load balancer", "admin_state_up": false, "name": "disabled_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancer-update-response.json0000664000175000017500000000151700000000000026604 0ustar00zuulzuul00000000000000{ "loadbalancer": { "description": "Temporarily disabled load balancer", "admin_state_up": false, "project_id": "e3cd678b11784734bc366148aa37580e", "provisioning_status": "PENDING_UPDATE", "flavor_id": "", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", "provider": "octavia", "created_at": "2017-02-28T00:41:44", "updated_at": "2017-02-28T00:43:30", "id": "8b6fc468-07d5-4d8b-a0b9-695060e72c31", "operating_status": "ONLINE", "name": "disabled_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancers-list-curl0000664000175000017500000000020400000000000024607 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers?project_id=e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/loadbalancers-list-response.json0000664000175000017500000000231100000000000026451 0ustar00zuulzuul00000000000000{ "loadbalancers": [ { "description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "provisioning_status": "ACTIVE", "flavor_id": "", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "listeners": [ { "id": "023f2e34-7806-443b-bfae-16c324569a3d" } ], "vip_address": "203.0.113.50", "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", "provider": "octavia", "pools": [ { "id": "9aa16cdc-8d18-47b9-aba9-ec044531a79f" } ], "created_at": "2017-02-28T00:41:44", "updated_at": "2017-02-28T00:43:30", "id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "operating_status": "ONLINE", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": [] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-batch-update-curl0000664000175000017500000000110000000000000024646 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"members":[{"name":"web-server-1","weight":"20","admin_state_up":true,"subnet_id":"bbb35f84-35cc-4b2f-84c2-a6a29bba68aa","address":"192.0.2.16","protocol_port":"80","monitor_port":8080,"tags":["updated_tag"]},{"name":"web-server-2","weight":"10","admin_state_up":true,"subnet_id":"bbb35f84-35cc-4b2f-84c2-a6a29bba68aa","address":"192.0.2.17","protocol_port":"80","monitor_port":8080,"tags":["updated_tag"]}]}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-batch-update-request.json0000664000175000017500000000124200000000000026350 0ustar00zuulzuul00000000000000{ "members": [ { "name": "web-server-1", "weight": 20, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "address": "192.0.2.16", "protocol_port": 80, "monitor_port": 8080, "tags": ["updated_tag"] }, { "name": "web-server-2", "weight": 10, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "address": "192.0.2.17", "protocol_port": 80, "monitor_port": 8080, "tags": ["updated_tag"] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-create-curl0000664000175000017500000000060400000000000023560 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"member":{"name":"web-server-1","weight":"20","admin_state_up":true,"subnet_id":"bbb35f84-35cc-4b2f-84c2-a6a29bba68aa","address":"192.0.2.16","protocol_port":"80","monitor_port":8080,"backup":false,"tags":["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-create-request.json0000664000175000017500000000050300000000000025251 0ustar00zuulzuul00000000000000{ "member": { "name": "web-server-1", "weight": "20", "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "address": "192.0.2.16", "protocol_port": "80", "monitor_port": 8080, "backup": false, "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-create-response.json0000664000175000017500000000117500000000000025425 0ustar00zuulzuul00000000000000{ "member": { "monitor_port": 8080, "project_id": "e3cd678b11784734bc366148aa37580e", "name": "web-server-1", "weight": 20, "backup": false, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "created_at": "2017-05-11T17:21:34", "provisioning_status": "ACTIVE", "monitor_address": null, "updated_at": "2017-05-11T17:21:37", "address": "192.0.2.16", "protocol_port": 80, "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", "operating_status": "NO_MONITOR", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-delete-curl0000664000175000017500000000024500000000000023560 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members/957a1ace-1bd2-449b-8455-820b6e4b63f3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-show-curl0000664000175000017500000000024200000000000023273 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/24a43e68-36de-45f6-89cf-c03df583131d/members/957a1ace-1bd2-449b-8455-820b6e4b63f3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-show-response.json0000664000175000017500000000117500000000000025142 0ustar00zuulzuul00000000000000{ "member": { "monitor_port": 8080, "project_id": "e3cd678b11784734bc366148aa37580e", "name": "web-server-1", "weight": 20, "backup": false, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "created_at": "2017-05-11T17:21:34", "provisioning_status": "ACTIVE", "monitor_address": null, "updated_at": "2017-05-11T17:21:37", "address": "192.0.2.16", "protocol_port": 80, "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", "operating_status": "NO_MONITOR", "tags": ["test_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-update-curl0000664000175000017500000000055600000000000023605 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"member":{"name":"web-server-1-2","weight":"0","admin_state_up":"true","monitor_address":"192.0.2.40","monitor_port":8888,"backup":false,"tags":["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members/957a1ace-1bd2-449b-8455-820b6e4b63f3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-update-request.json0000664000175000017500000000036500000000000025276 0ustar00zuulzuul00000000000000{ "member": { "name": "web-server-1-2", "weight": "0", "admin_state_up": "true", "monitor_address": "192.0.2.40", "monitor_port": 8888, "backup": false, "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/member-update-response.json0000664000175000017500000000121000000000000025432 0ustar00zuulzuul00000000000000{ "member": { "monitor_port": 8080, "project_id": "e3cd678b11784734bc366148aa37580e", "name": "web-server-1", "weight": 20, "backup": false, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "created_at": "2017-05-11T17:21:34", "provisioning_status": "PENDING_UPDATE", "monitor_address": null, "updated_at": "2017-05-11T17:21:37", "address": "192.0.2.16", "protocol_port": 80, "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", "operating_status": "NO_MONITOR", "tags": ["updated_tag"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/members-list-curl0000664000175000017500000000025100000000000023451 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/24a43e68-36de-45f6-89cf-c03df583131d/members?project_id=e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/members-list-response.json0000664000175000017500000000132200000000000025312 0ustar00zuulzuul00000000000000{ "members": [ { "monitor_port": 8080, "project_id": "e3cd678b11784734bc366148aa37580e", "name": "web-server-1", "weight": 20, "backup": false, "admin_state_up": true, "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "created_at": "2017-05-11T17:21:34", "provisioning_status": "ACTIVE", "monitor_address": null, "updated_at": "2017-05-11T17:21:37", "address": "192.0.2.16", "protocol_port": 80, "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", "operating_status": "NO_MONITOR", "tags": ["test_tag"] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-create-curl0000664000175000017500000000143700000000000023267 0ustar00zuulzuul00000000000000curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"pool":{"lb_algorithm":"ROUND_ROBIN","protocol":"HTTP","description":"Super Round Robin Pool","admin_state_up":true,"session_persistence":{"cookie_name":"ChocolateChip","type":"APP_COOKIE"},"listener_id":"023f2e34-7806-443b-bfae-16c324569a3d","name":"super-pool","tags":["test_tag"],"tls_container_ref":"http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6","ca_tls_container_ref":"http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb","crl_container_ref":"http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b","tls_enabled":true,"tls_ciphers":"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256"}}' http://198.51.100.10:9876/v2/lbaas/pools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-create-request.json0000664000175000017500000000153000000000000024754 0ustar00zuulzuul00000000000000{ "pool": { "lb_algorithm": "ROUND_ROBIN", "protocol": "HTTP", "description": "Super Round Robin Pool", "admin_state_up": true, "session_persistence": { "cookie_name": "ChocolateChip", "type": "APP_COOKIE" }, "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", "name": "super-pool", "tags": ["test_tag"], "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", "tls_enabled": true, "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-create-response.json0000664000175000017500000000254500000000000025131 0ustar00zuulzuul00000000000000{ "pool": { "lb_algorithm": "ROUND_ROBIN", "protocol": "HTTP", "description": "Super Round Robin Pool", "admin_state_up": true, "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "created_at": "2017-05-10T18:14:44", "provisioning_status": "ACTIVE", "updated_at": "2017-05-10T23:08:12", "session_persistence": { "cookie_name": "ChocolateChip", "type": "APP_COOKIE" }, "listeners": [ { "id": "023f2e34-7806-443b-bfae-16c324569a3d" } ], "members": [], "healthmonitor_id": null, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", "operating_status": "ONLINE", "name": "super-pool", "tags": ["test_tag"], "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", "tls_enabled": true, "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-delete-curl0000664000175000017500000000017000000000000023257 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-session-persistence-obj.json0000664000175000017500000000006700000000000026604 0ustar00zuulzuul00000000000000{"cookie_name": "my_app_cookie", "type": "APP_COOKIE"} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-show-curl0000664000175000017500000000016500000000000023001 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/24a43e68-36de-45f6-89cf-c03df583131d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-show-response.json0000664000175000017500000000254600000000000024647 0ustar00zuulzuul00000000000000{ "pool": { "lb_algorithm": "ROUND_ROBIN", "protocol": "HTTP", "description": "Super Round Robin Pool", "admin_state_up": true, "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "created_at": "2017-05-10T18:14:44", "provisioning_status": "ACTIVE", "updated_at": "2017-05-10T23:08:12", "session_persistence": { "cookie_name": "ChocolateChip", "type": "APP_COOKIE" }, "listeners": [ { "id": "023f2e34-7806-443b-bfae-16c324569a3d" } ], "members": [], "healthmonitor_id": null, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", "operating_status": "ONLINE", "name": "super-pool", "tags": ["test_tag"], "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", "tls_enabled": false, "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-update-curl0000664000175000017500000000107200000000000023301 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"pool":{"lb_algorithm":"LEAST_CONNECTIONS","session_persistence":{"type":"SOURCE_IP"},"description":"second description","name":"second_name","tags":["updated_tag"],"tls_container_ref":"http://198.51.100.10:9311/v1/containers/c1cd501d-3cf9-4873-a11b-a74bebcde929","ca_tls_container_ref":null,"crl_container_ref":null,"tls_enabled":false,"tls_ciphers":"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256"}}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-update-request.json0000664000175000017500000000106700000000000025000 0ustar00zuulzuul00000000000000{ "pool": { "lb_algorithm": "LEAST_CONNECTIONS", "session_persistence": { "type": "SOURCE_IP" }, "description": "Super Least Connections Pool", "name": "super-least-conn-pool", "tags": ["updated_tag"], "tls_container_ref": "http://198.51.100.10:9311/v1/containers/c1cd501d-3cf9-4873-a11b-a74bebcde929", "ca_tls_container_ref": null, "crl_container_ref": null, "tls_enabled": false, "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pool-update-response.json0000664000175000017500000000235000000000000025142 0ustar00zuulzuul00000000000000{ "pool": { "lb_algorithm": "LEAST_CONNECTIONS", "protocol": "HTTP", "description": "Super Least Connections Pool", "admin_state_up": true, "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "created_at": "2017-05-10T18:14:44", "provisioning_status": "PENDING_UPDATE", "updated_at": "2017-05-10T23:08:12", "session_persistence": { "cookie_name": null, "type": "SOURCE_IP" }, "listeners": [ { "id": "023f2e34-7806-443b-bfae-16c324569a3d" } ], "members": [], "healthmonitor_id": null, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", "operating_status": "ONLINE", "name": "super-least-conn-pool", "tags": ["updated_tag"], "tls_container_ref": "http://198.51.100.10:9311/v1/containers/c1cd501d-3cf9-4873-a11b-a74bebcde929", "ca_tls_container_ref": null, "crl_container_ref": null, "tls_enabled": false, "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pools-list-curl0000664000175000017500000000017400000000000023157 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools?project_id=e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/pools-list-response.json0000664000175000017500000000325100000000000025017 0ustar00zuulzuul00000000000000{ "pools": [ { "lb_algorithm": "ROUND_ROBIN", "protocol": "HTTP", "description": "My round robin pool", "admin_state_up": true, "loadbalancers": [ { "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" } ], "created_at": "2017-04-13T18:14:44", "provisioning_status": "ACTIVE", "updated_at": "2017-04-13T23:08:12", "session_persistence": { "cookie_name": null, "type": "SOURCE_IP" }, "listeners": [ { "id": "023f2e34-7806-443b-bfae-16c324569a3d" } ], "members": [ { "id": "5bc73753-348f-4b5a-8f9c-10bd7b30dc35", "id": "692e8358-f8fd-4b92-bbca-6e4b97c75571" } ], "healthmonitor_id": null, "project_id": "e3cd678b11784734bc366148aa37580e", "id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", "operating_status": "ONLINE", "name": "round_robin_pool", "tags": ["test_tag"], "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", "tls_enabled": true, "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/provider-availability-zone-capability-show-curl0000664000175000017500000000017300000000000031421 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/providers/amphora/availability_zone_capabilities ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/provider-availability-zone-capability-show-response.json0000664000175000017500000000021600000000000033260 0ustar00zuulzuul00000000000000{ "availability_zone_capabilities": [ { "name": "compute_zone", "description": "The compute availability zone." } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/provider-flavor-capability-show-curl0000664000175000017500000000016000000000000027263 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/providers/amphora/flavor_capabilities ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/provider-flavor-capability-show-response.json0000664000175000017500000000035100000000000031126 0ustar00zuulzuul00000000000000{ "flavor_capabilities": [ { "name": "loadbalancer_topology", "description": "The load balancer topology. One of: SINGLE - One amphora per load balancer. ACTIVE_STANDBY - Two amphora per load balancer." } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/provider-list-curl0000664000175000017500000000012400000000000023650 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/providers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/provider-list-response.json0000664000175000017500000000041200000000000025511 0ustar00zuulzuul00000000000000{ "providers": [ { "name": "amphora", "description": "The Octavia Amphora driver." }, { "name": "octavia", "description": "Deprecated alias of the Octavia Amphora driver." } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quota-reset-curl0000664000175000017500000000016500000000000023323 0ustar00zuulzuul00000000000000curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas/e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quota-show-curl0000664000175000017500000000016200000000000023156 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas/e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quota-update-curl0000664000175000017500000000036000000000000023460 0ustar00zuulzuul00000000000000curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"quota":{"loadbalancer":10,"listener":-1,"member":50,"pool":-1,"healthmonitor":-1}}' http://198.51.100.10:9876/v2/lbaas/quotas/e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quota-update-request.json0000664000175000017500000000022300000000000025151 0ustar00zuulzuul00000000000000{ "quota": { "loadbalancer": 10, "listener": -1, "member": 50, "pool": -1, "healthmonitor": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quota-update-response.json0000664000175000017500000000022300000000000025317 0ustar00zuulzuul00000000000000{ "quota": { "loadbalancer": 10, "listener": -1, "member": 50, "pool": -1, "healthmonitor": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quotas-defaults-curl0000664000175000017500000000013200000000000024165 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas/defaults ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quotas-defaults-response.json0000664000175000017500000000022300000000000026027 0ustar00zuulzuul00000000000000{ "quota": { "loadbalancer": 50, "listener": -1, "member": -1, "pool": -1, "healthmonitor": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quotas-list-curl0000664000175000017500000000017500000000000023340 0ustar00zuulzuul00000000000000curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas?project_id=e3cd678b11784734bc366148aa37580e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quotas-list-response.json0000664000175000017500000000037500000000000025203 0ustar00zuulzuul00000000000000{ "quotas": [ { "loadbalancer": 5, "member": 50, "healthmonitor": -1, "listener": null, "project_id": "e3cd678b11784734bc366148aa37580e", "pool": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/examples/quotas-show-response.json0000664000175000017500000000022200000000000025177 0ustar00zuulzuul00000000000000{ "quota": { "loadbalancer": 5, "listener": -1, "member": 50, "pool": -1, "healthmonitor": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/flavor.inc0000664000175000017500000001261400000000000020334 0ustar00zuulzuul00000000000000.. -*- rst -*- List Flavors ============ .. rest_method:: GET /v2.0/lbaas/flavors List all available flavors. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. The list might be empty. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields Curl Example ------------ .. literalinclude:: examples/flavor-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - flavor_profile_id: flavor-profile-id - flavors: flavors - id: flavor-id - name: name Response Example ---------------- .. literalinclude:: examples/flavors-list-response.json :language: javascript Create Flavor ============= .. rest_method:: POST /v2.0/lbaas/flavors Creates a flavor. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - description: description-optional - enabled: enabled-optional - flavor: flavor - flavor_profile_id: flavor-profile-id - name: name Request Example --------------- .. literalinclude:: examples/flavor-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/flavor-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - flavor_profile_id: flavor-profile-id - flavor: flavor - id: flavor-id - name: name Response Example ---------------- .. literalinclude:: examples/flavor-create-response.json :language: javascript Show Flavor Details =================== .. rest_method:: GET /v2.0/lbaas/flavors/{flavor_id} Shows the details of a flavor. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. This operation does not require a request body. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - flavor_id: path-flavor-id Curl Example ------------ .. literalinclude:: examples/flavor-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - flavor_profile_id: flavor-profile-id - flavor: flavor - id: flavor-id - name: name Response Example ---------------- .. literalinclude:: examples/flavor-show-response.json :language: javascript Update a Flavor =============== .. rest_method:: PUT /v2.0/lbaas/flavors/{flavor_id} Update a flavor. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - description: description-optional - enabled: enabled-optional - flavor: flavor - flavor_id: path-flavor-id - name: name-optional Request Example --------------- .. literalinclude:: examples/flavor-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/flavor-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - description: description - enabled: enabled - flavor_profile_id: flavor-profile-id - flavor: flavor - id: flavor-id - name: name Response Example ---------------- .. literalinclude:: examples/flavor-update-response.json :language: javascript Remove a Flavor =============== .. rest_method:: DELETE /v2.0/lbaas/flavors/{flavor_id} Remove a flavor and its associated configuration. If any load balancers are using this flavor the service returns the HTTP ``Conflict (409)`` response code. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - flavor_id: path-flavor-id Curl Example ------------ .. literalinclude:: examples/flavor-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/flavorprofile.inc0000664000175000017500000001355000000000000021715 0ustar00zuulzuul00000000000000.. -*- rst -*- List Flavor Profiles ==================== .. rest_method:: GET /v2.0/lbaas/flavorprofiles List all available flavor profiles. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. The list might be empty. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields Curl Example ------------ .. literalinclude:: examples/flavor-profile-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - flavor_data: flavor-data - flavorprofiles: flavorprofiles - id: flavor-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/flavorprofiles-list-response.json :language: javascript Create Flavor Profile ===================== .. rest_method:: POST /v2.0/lbaas/flavorprofiles Creates a flavor profile. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - flavor_data: flavor-data - flavorprofile: flavorprofile - name: name - provider_name: provider-name Request Example --------------- .. literalinclude:: examples/flavorprofile-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/flavorprofile-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - flavor_data: flavor-data - flavorprofile: flavorprofile - id: flavor-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/flavorprofile-create-response.json :language: javascript Show Flavor Profile Details =========================== .. rest_method:: GET /v2.0/lbaas/flavorprofiles/{flavorprofile_id} Shows the details of a flavor profile. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - flavorprofile_id: path-flavorprofile-id Curl Example ------------ .. literalinclude:: examples/flavorprofile-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - flavor_data: flavor-data - flavorprofile: flavorprofile - id: flavor-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/flavorprofile-show-response.json :language: javascript Update a Flavor Profile ======================= .. rest_method:: PUT /v2.0/lbaas/flavorprofiles/{flavorprofile_id} Update a flavor profile. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - flavor_data: flavor-data-optional - flavorprofile: flavorprofile - flavorprofile_id: path-flavorprofile-id - name: name-optional - provider_name: provider-name-optional Request Example --------------- .. literalinclude:: examples/flavorprofile-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/flavorprofile-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - flavor_data: flavor-data - flavorprofile: flavorprofile - id: flavor-profile-id - name: name - provider_name: provider-name Response Example ---------------- .. literalinclude:: examples/flavorprofile-update-response.json :language: javascript Remove a Flavor Profile ======================= .. rest_method:: DELETE /v2.0/lbaas/flavorprofiles/{flavorprofile_id} Remove a flavor profile and its associated configuration. If any flavors using this flavor profile the service returns the HTTP ``Conflict (409)`` response code. If you are not an administrative user the service returns the HTTP ``Forbidden (403)`` response code. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - flavorprofile_id: path-flavorprofile-id Curl Example ------------ .. literalinclude:: examples/flavorprofile-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/general.inc0000664000175000017500000006573300000000000020472 0ustar00zuulzuul00000000000000.. -*- rst -*- This section introduces readers to OpenStack Octavia v2 ReSTful HTTP API and provides guidelines on how to use it. .. note:: To clarify the Octavia API versioning we have updated the endpoint to support both the previously documented /v2.0 and the new path of /v2. They are exactly the same API and /v2.0 will be a supported alias for the life of the v2 API. Service Endpoints ================= All API calls described throughout the rest of this document require authentication with the `OpenStack Identity service `_. After authentication, the base ``endpoint URL`` for the ``service type`` of ``load-balancer`` and ``service name`` of ``octavia`` can be extracted from the service catalog returned with the identity token. **Example token snippet with service catalog** .. code:: { "token": { "catalog": [ { "endpoints": [ { "url": "http://198.51.100.10:9876/", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "cd1c3c2dc6434c739ed0a12015373754" } ], "type": "load-balancer", "id": "1209701aecd3453e9803119cd28cb013", "name": "octavia" } ] } } For instance, if the ``endpoint URL`` is ``http://198.51.100.10:9876/`` then the full API call for ``/v2/lbaas/loadbalancers`` is ``http://198.51.100.10:9876/v2/lbaas/loadbalancers``. Depending on the deployment, the ``load-balancer`` ``endpoint URL`` might be http or https, a custom port, a custom path, and include your project id. The only way to know the URLs for your deployment is by using the service catalog. The ``load-balancer`` ``endpoint URL`` should never be hard coded in applications, even if they are only expected to work at a single site. It should always be discovered from the Identity token. As such, for the rest of this document we will be using short hand where ``GET /v2/lbaas/loadbalancers`` really means ``GET {your_load-balancer_endpoint_URL}/v2/lbaas/loadbalancers``. Neutron-lbaas and Octavia v2 APIs ================================= The Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API and is a superset of the neutron-lbaas v2 API. This is intended to provide a simple migration path for deployments currently using the neutron-lbaas v2 API. You can update the endpoint your application is using from the keystone service catalog to use the ``octavia`` endpoint instead of the ``neutron`` endpoint for load balancer activities. During the neutron-lbaas deprecation period a pass-through proxy will be included in neutron to allow requests via neutron and the neutron-lbaas v2 API to continue to function. Users are strongly encouraged to update their applications to access load balancing via the Octavia v2 API. .. warning:: Load balancing functions accessed via the neutron endpoint are deprecated and will be removed in a future release. Users are strongly encouraged to migrate to using the octavia endpoint. Authentication and authorization ================================ The Octavia API v2 uses the `OpenStack Identity service `_ as the default authentication service. When Keystone is enabled, users that submit requests to the Octavia service must provide an authentication token in **X-Auth-Token** request header. You obtain the token by authenticating to the Keystone endpoint. When Keystone is enabled, the ``project_id`` attribute is not required in create requests because the project ID is derived from the authentication token. The default authorization settings allow only administrative users to create resources on behalf of a different project. Octavia uses information received from Keystone to authorize user requests. Octavia Networking handles the following types of authorization policies: - **Operation-based policies** specify access criteria for specific operations, possibly with fine-grained control over specific attributes. - **Resource-based policies** access a specific resource. Permissions might or might not be granted depending on the permissions configured for the resource. Currently available for only the network resource. The actual authorization policies enforced in Octavia might vary from deployment to deployment. Request and response formats ============================ The Octavia API v2 supports JSON data serialization request and response formats only. Request format -------------- The Octavia API v2 only accepts requests with the JSON data serialization format. The ``Content-Type`` header is ignored. Response format --------------- The Octavia API v2 always responds with the JSON data serialization format. The ``Accept`` header is ignored. Query extension A ``.json`` extension can be added to the request URI. For example, the ``.json`` extension in the following requests are equivalent: - **GET** *publicURL*/loadbalancers - **GET** *publicURL*/loadbalancers.json .. _filtering: Filtering and column selection ============================== The Octavia API v2 supports filtering based on all top level attributes of a resource. Filters are applicable to all list requests. For example, the following request returns all loadbalancers named ``foobar``: .. code:: GET /v2/lbaas/loadbalancers?name=foobar When you specify multiple filters, the Octavia API v2 returns only objects that meet all filtering criteria. The operation applies an AND condition among the filters. Note ---- Octavia does not offer an OR mechanism for filters. Alternatively, you can issue a distinct request for each filter and build a response set from the received responses on the client-side. Filtering by Tags ----------------- **New in version 2.5** Most Octavia resources support adding tags to the resource attributes. Octavia supports advanced filtering using these tags. The following tag filters are supported by the Octavia API: - ``tags`` - Return the list of entities that have this tag or tags. - ``tags-any`` - Return the list of entities that have one or more of the given tags. - ``not-tags`` - Return the list of entities that do not have one or more of the given tags. - ``not-tags-any`` - Return the list of entities that do not have at least one of the given tags. When supplying a list of tags, the tags should be provided in a comma seperated list. For example, if you would like to get the list of load balancers with both the "red" and "blue" tags you would request: .. code:: GET /v2/lbaas/loadbalancers?tags=red,blue To get a list of load balancers that have the "red" or "blue" tag, you would request: .. code:: GET /v2/lbaas/loadbalancers?tags-any=red,blue For a list of load balancers that do not have the "red" tag, you would request: .. code:: GET /v2/lbaas/loadbalancers?not-tags=red To get a list of load balancers that don't have either the "red" or "blue" tag you would request: .. code:: GET /v2/lbaas/loadbalancers?not-tags-any=red,blue Tag filters can also be combined in the same request: .. code:: GET /v2/lbaas/loadbalancers?tags=red,blue&tags-any=green,orange Column Selection ---------------- By default, Octavia returns all attributes for any show or list call. The Octavia API v2 has a mechanism to limit the set of attributes returned. For example, return ``id``. You can use the ``fields`` query parameter to control the attributes returned from the Octavia API v2. For example, the following request returns only ``id`` and ``name`` for each load balancer: .. code:: GET /v2/lbaas/loadbalancers.json?fields=id&fields=name Synchronous versus asynchronous plug-in behavior ================================================ The Octavia API v2 presents a logical model of load balancers consisting of listeners, pools, and members. It is up to the OpenStack Octavia plug-in to communicate with the underlying infrastructure to ensure load balancing is consistent with the logical model. A plug-in might perform these operations asynchronously. When an API client modifies the logical model by issuing an HTTP **POST**, **PUT**, or **DELETE** request, the API call might return before the plug-in modifies underlying virtual and physical load balancing devices. However, an API client is guaranteed that all subsequent API calls properly reflect the changed logical model. For example, if a client issues an HTTP **PUT** request to set the weight of a member, there is no guarantee that the new weight will be in effect when the HTTP call returns. This is indicated by an HTTP response code of 202. You can use the ``provisioning_status`` attribute to determine whether the Octavia plug-in has successfully completed the configuration of the resource. Bulk-create =========== The Octavia v2 API does not support bulk create. You cannot create more than one load balancer per API call. The Octavia v2 API does support single call create which allows you to create a fully populated load balancer in one API call. This is discussed in the load balancer create section of this reference. Pagination ========== To reduce load on the service, list operations will return a maximum number of items at a time. To navigate the collection, the parameters limit, marker and page\_reverse can be set in the URI. For example: .. code:: ?limit=100&marker=1234&page_reverse=False The ``marker`` parameter is the ID of the last item in the previous list. The ``limit`` parameter sets the page size. The ``page_reverse`` parameter sets the page direction. These parameters are optional. If the client requests a limit beyond the maximum limit configured by the deployment, the server returns the maximum limit number of items. For convenience, list responses contain atom "next" links and "previous" links. The last page in the list requested with 'page\_reverse=False' will not contain "next" link, and the last page in the list requested with 'page\_reverse=True' will not contain "previous" link. The following examples illustrate two pages with three items. The first page was retrieved through: .. code:: GET http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2 If a particular plug-in does not support pagination operations the Octavia API v2 will emulate the pagination behavior so that users can expect the same behavior regardless of the particular plug-in running in the background. **Example load balancer list, first page: JSON request** .. code:: GET /v2/lbaas/loadbalancers.json?limit=2 HTTP/1.1 Host: 198.51.100.10:9876 Content-Type: application/json Accept: application/json **Example load balancer list, first page: JSON response** .. code:: { "loadbalancers": [ { "admin_state_up": true, "listeners": [], "vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea", "pools": [], "provider": "octavia", "description": "Best App load balancer 1", "name": "bestapplb1", "operating_status": "ONLINE", "id": "34d5f4a5-cbbc-43a0-878f-b8a26370e6e7", "provisioning_status": "ACTIVE", "vip_port_id": "1e20d91d-8df9-4c15-9778-28bc89226c19", "vip_address": "203.0.113.10", "project_id": "bf325b04-e7b1-4002-9b10-f4984630367f" }, { "admin_state_up": true, "listeners": [], "vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea", "pools": [], "provider": "octavia", "description": "Second Best App load balancer 1", "name": "2ndbestapplb1", "operating_status": "ONLINE", "id": "0fdb0ca7-0a38-4aea-891c-daaed40bcafe", "provisioning_status": "ACTIVE", "vip_port_id": "21f7ac04-6824-4222-93cf-46e0d70607f9", "vip_address": "203.0.113.20", "project_id": "bf325b04-e7b1-4002-9b10-f4984630367f" } ], "loadbalancers_links": [ { "href": "http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2&marker=0fdb0ca7-0a38-4aea-891c-daaed40bcafe", "rel": "next" }, { "href": "http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2&marker=34d5f4a5-cbbc-43a0-878f-b8a26370e6e7&page_reverse=True", "rel": "previous" } ] } The last page won't show the "next" links **Example load balancer list, last page: JSON request** .. code:: GET /v2/lbaas/loadbalancers.json?limit=2&marker=4ef465f3-0233-44af-b93d-9d3eae4daf85 HTTP/1.1 Host: 198.51.100.10:9876 Content-Type: application/json Accept: application/json **Example load balancer list, last page: JSON response** .. code:: { "loadbalancers": [ { "admin_state_up": true, "listeners": [], "vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea", "pools": [], "provider": "octavia", "description": "Other App load balancer 1", "name": "otherapplb1", "operating_status": "ONLINE", "id": "4ef465f3-0233-44af-b93d-9d3eae4daf85", "provisioning_status": "ACTIVE", "vip_port_id": "f777a1c7-7f59-4a36-ad34-24dfebaf19e6", "vip_address": "203.0.113.50", "project_id": "bf325b04-e7b1-4002-9b10-f4984630367f" } ], "loadbalancers_links": [ { "href": "http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2&marker=4ef465f3-0233-44af-b93d-9d3eae4daf85&page_reverse=True", "rel": "previous" } ] } Sorting ======= Sorting is determined through the use of the 'sort' query string parameter. The value of this parameter is a comma-separated list of sort keys. Sort directions can optionally be appended to each sort key, separated by the ':' character. The supported sort directions are either 'asc' for ascending or 'desc' for descending. The caller may (but is not required to) specify a sort direction for each key. If a sort direction is not specified for a key, then a default is set by the server. For example: - Only sort keys specified: + ``sort=key1,key2,key3`` + 'key1' is the first key, 'key2' is the second key, etc. + Sort directions are defaulted by the server - Some sort directions specified: + ``sort=key1:asc,key2,key3`` + Any sort key without a corresponding direction is defaulted + 'key1' is the first key (ascending order), 'key2' is the second key (direction defaulted by the server), etc. - Equal number of sort keys and directions specified: + ``sort=key1:asc,key2:desc,key3:asc`` + Each key is paired with the corresponding direction + 'key1' is the first key (ascending order), 'key2' is the second key (descending order), etc. You can also use the ``sort_key`` and ``sort_dir`` parameters to sort the results of list operations. Currently sorting does not work with extended attributes of resource. The ``sort_key`` and ``sort_dir`` can be repeated, and the number of ``sort_key`` and ``sort_dir`` provided must be same. The ``sort_dir`` parameter indicates in which direction to sort. Acceptable values are ``asc`` (ascending) and ``desc`` (descending). If a particular plug-in does not support sorting operations the Octavia API v2 emulates the sorting behavior so that users can expect the same behavior regardless of the particular plug-in that runs in the background. Response Codes ============== The following HTTP response status codes are used by the Octavia v2 API. Success ------- +------+----------------------------------------------------------------+ | Code | Description | +======+================================================================+ | 200 | - The synchronous request was successful | +------+----------------------------------------------------------------+ | 202 | - The asynchronous request was accepted and is being processed | +------+----------------------------------------------------------------+ | 204 | - The request was successful, no content to return | | | - The entity was successfully deleted | +------+----------------------------------------------------------------+ Faults ------ The Octavia API v2 returns an error response if a failure occurs while processing a request. Octavia uses only standard HTTP error codes. 4\ *nn* errors indicate problems in the particular request being sent from the client. +------+----------------------------------------------------------------+ | Code | Description | +======+================================================================+ | 400 | - Bad request | | | - Malformed request URI or body requested | | | - The request could not be understood | | | - Invalid values entered | | | - Bulk operations disallowed | | | - Validation failed | | | - Method not allowed for request body (such as trying to | | | update attributes that can be specified at create-time only) | +------+----------------------------------------------------------------+ | 401 | - Unauthorized: Access is denied due to invalid credentials | +------+----------------------------------------------------------------+ | 403 | - Policy does not allow current user to do this operation | | | - The project is over quota for the request | +------+----------------------------------------------------------------+ | 404 | - Not Found | | | - Non existent URI | | | - Resource not found | +------+----------------------------------------------------------------+ | 409 | - Conflict | | | - The resource is in an immutable state | +------+----------------------------------------------------------------+ | 500 | - Internal server error | +------+----------------------------------------------------------------+ | 503 | - Service unavailable | | | - The project is busy with other requests, try again later | +------+----------------------------------------------------------------+ Status Codes ============ Octavia API v2 entities have two status codes present in the response body. The ``provisioning_status`` describes the lifecycle status of the entity while the ``operating_status`` provides the observed status of the entity. For example, a member may be in a ``provisioning_status`` of ``PENDING_UPDATE`` and have an ``operating_status`` of ``ONLINE``. This would indicate that an update operation is occurring on this member and it is in an immutable state but it is healthy and able to service requests. This situation could occur if the user made a request to update the weight of the member. .. _op_status: Operating Status Codes ---------------------- +------------+--------------------------------------------------------------+ | Code | Description | +============+==============================================================+ | ONLINE | - Entity is operating normally | | | - All pool members are healthy | +------------+--------------------------------------------------------------+ | DRAINING | - The member is not accepting new connections | +------------+--------------------------------------------------------------+ | OFFLINE | - Entity is administratively disabled | +------------+--------------------------------------------------------------+ | DEGRADED | - One or more of the entity's components are in ERROR | +------------+--------------------------------------------------------------+ | ERROR | - The entity has failed | | | - The member is failing it's health monitoring checks | | | - All of the pool members are in ERROR | +------------+--------------------------------------------------------------+ | NO_MONITOR | - No health monitor is configured for this entity and it's | | | status is unknown | +------------+--------------------------------------------------------------+ .. _prov_status: Provisioning Status Codes ------------------------- +----------------+----------------------------------------------------------+ | Code | Description | +================+==========================================================+ | ACTIVE | - The entity was provisioned successfully | +----------------+----------------------------------------------------------+ | DELETED | - The entity has been successfully deleted | +----------------+----------------------------------------------------------+ | ERROR | - Provisioning failed | +----------------+----------------------------------------------------------+ | PENDING_CREATE | - The entity is being created | +----------------+----------------------------------------------------------+ | PENDING_UPDATE | - The entity is being updated | +----------------+----------------------------------------------------------+ | PENDING_DELETE | - The entity is being deleted | +----------------+----------------------------------------------------------+ Entities in a ``PENDING_*`` state are immutable and cannot be modified until the requested operation completes. The entity will return to the ``ACTIVE`` provisioning status once the asynchronus operation completes. An entity in ``ERROR`` has failed provisioning. The entity may be deleted and recreated. .. _valid_protocol: Protocol Combinations (Listener/Pool) ===================================== The listener and pool can be associated through the listener's ``default_pool_id`` or l7policy's ``redirect_pool_id``. Both listener and pool must set the protocol parameter, but the association between the listener and the pool isn't arbitrary and has some constraints on the protocol aspect. Valid protocol combinations --------------------------- .. |1| unicode:: U+2002 .. nut ( ) .. |2| unicode:: U+2003 .. mutton ( ) .. |listener| replace:: |2| |2| Listener .. |1Y| replace:: |1| Y .. |1N| replace:: |1| N .. |2Y| replace:: |2| Y .. |2N| replace:: |2| N .. |8Y| replace:: |2| |2| |2| |2| Y .. |8N| replace:: |2| |2| |2| |2| N +-------------+-------+--------+------+-------------------+------+ || |listener| || HTTP || HTTPS || TCP || TERMINATED_HTTPS || UDP | || Pool || || || || || | +=============+=======+========+======+===================+======+ | HTTP | |2Y| | |2N| | |1Y| | |8Y| | |1N| | +-------------+-------+--------+------+-------------------+------+ | HTTPS | |2N| | |2Y| | |1Y| | |8N| | |1N| | +-------------+-------+--------+------+-------------------+------+ | PROXY | |2Y| | |2Y| | |1Y| | |8Y| | |1N| | +-------------+-------+--------+------+-------------------+------+ | TCP | |2N| | |2Y| | |1Y| | |8N| | |1N| | +-------------+-------+--------+------+-------------------+------+ | UDP | |2N| | |2N| | |1N| | |8N| | |1Y| | +-------------+-------+--------+------+-------------------+------+ "Y" means the combination is valid and "N" means invalid. The HTTPS protocol is HTTPS pass-through. For most providers, this is treated as a TCP protocol. Some advanced providers may support HTTPS session persistence features by using the session ID. The Amphora provider treats HTTPS as a TCP flow, but currently does not support HTTPS session persistence using the session ID. The pool protocol of PROXY will use the listener protocol as the pool protocol but will wrap that protocol in the proxy protocol. In the case of listener protocol TERMINATED_HTTPS, a pool protocol of PROXY will be HTTP wrapped in the proxy protocol. Protocol Combinations (Pool/Health Monitor) =========================================== Pools and health monitors are also related with regard to protocol. Pools set the protocol parameter for the real member connections, and the health monitor sets a type for health checks. Health check types are limited based on the protocol of the pool. Valid protocol combinations --------------------------- .. |Health Monitor| replace:: |2| |2| Health Monitor .. |UDPCONNECT| replace:: UDP-CONNECT .. |4Y| replace:: |2| |2| Y .. |4N| replace:: |2| |2| N .. |5Y| replace:: |2| |2| |1| Y .. |5N| replace:: |2| |2| |1| N +-------------------+-------+--------+-------+------+------------+---------------+ || |Health Monitor| || HTTP || HTTPS || PING || TCP || TLS-HELLO || |UDPCONNECT| | || Pool || || || || || || | +===================+=======+========+=======+======+============+===============+ | HTTP | |2Y| | |2Y| | |1Y| | |1Y| | |4Y| | |5N| | +-------------------+-------+--------+-------+------+------------+---------------+ | HTTPS | |2Y| | |2Y| | |1Y| | |1Y| | |4Y| | |5N| | +-------------------+-------+--------+-------+------+------------+---------------+ | PROXY | |2Y| | |2Y| | |1Y| | |1Y| | |4Y| | |5N| | +-------------------+-------+--------+-------+------+------------+---------------+ | TCP | |2Y| | |2Y| | |1Y| | |1Y| | |4Y| | |5N| | +-------------------+-------+--------+-------+------+------------+---------------+ | UDP | |2Y| | |2N| | |1N| | |1Y| | |4N| | |5Y| | +-------------------+-------+--------+-------+------+------------+---------------+ "Y" means the combination is valid and "N" means invalid. These combinations are mostly as you'd expect for all non-UDP pool protocols: non-UDP pools can have health monitors with any check type besides UDP-CONNECT. For UDP pools however, things are a little more complicated. UDP Pools support UDP-CONNECT but also HTTP and TCP checks. HTTPS checks are technically feasible but have not yet been implemented. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/healthmonitor.inc0000664000175000017500000002557400000000000021731 0ustar00zuulzuul00000000000000.. -*- rst -*- List Health Monitors ==================== .. rest_method:: GET /v2/lbaas/healthmonitors Lists all health monitors for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list health monitors for other projects. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/healthmonitor-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - created_at: created_at - delay: healthmonitor-delay - domain_name: healthmonitor-domain_name - expected_codes: healthmonitor-expected_codes - http_method: healthmonitor-http_method - http_version: healthmonitor-http_version - id: healthmonitor-id - max_retries: healthmonitor-max-retries - max_retries_down: healthmonitor-max-retries-down - name: name - operating_status: operating_status - pool_id: pool-id - project_id: project_id - provisioning_status: provisioning_status - tags: tags - timeout: healthmonitor-timeout - type: healthmonitor-type - updated_at: updated_at - url_path: healthmonitor-url_path Response Example ---------------- .. literalinclude:: examples/healthmonitors-list-response.json :language: javascript Create Health Monitor ===================== .. rest_method:: POST /v2/lbaas/healthmonitors Creates a health monitor on a pool. Health monitors define how the load balancer monitors backend servers to determine if they are available to service requests. This operation provisions a new health monitor by using the configuration that you define in the request object. After the API validates the request and starts the provisioning process, the API returns a response object that contains a unique ID and the status of provisioning the health monitor. In the response, the health monitor :ref:`provisioning status` is ``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. If the status is ``PENDING_CREATE``, issue GET ``/v2/lbaas/healthmonitors/{healthmonitor_id}`` to view the progress of the provisioning operation. When the health monitor status changes to ``ACTIVE``, the health monitor is successfully provisioned and is ready for further configuration. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. Specifying a project_id is deprecated. The health monitor will inherit the project_id of the parent load balancer. At a minimum, you must specify these health monitor attributes: - ``delay`` The interval, in seconds, between health checks. - ``max_retries`` The number of successful checks before changing the operating status of the member to ONLINE. - ``pool_id`` The pool to monitor. - ``timeout`` The time, in seconds, after which a health check times out. - ``type`` The type of health monitor. One of ``HTTP``, ``HTTPS``, ``PING``, ``TCP``, ``TLS-HELLO``, or ``UDP-CONNECT``. Some attributes receive default values if you omit them from the request: - ``admin_state_up`` The default is ``true``. - ``expected_codes`` The expected HTTP status codes to get from a successful health check. The default is ``200``. - ``http_method`` The default is ``GET``. - ``http_version`` The default is ``1.0``. - ``max_retries_down`` The default is ``3``. - ``url_path`` The default is ``/``. To create a health monitor, the parent load balancer must have an ``ACTIVE`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - delay: healthmonitor-delay - domain_name: healthmonitor-domain_name-optional - expected_codes: healthmonitor-expected_codes-optional - http_method: healthmonitor-http_method-optional - http_version: healthmonitor-http_version-optional - name: name-optional - max_retries: healthmonitor-max-retries - max_retries_down: healthmonitor-max-retries-down-optional - pool_id: pool-id - project_id: project_id-optional-deprecated - tags: tags-optional - timeout: healthmonitor-timeout - type: healthmonitor-type - url_path: healthmonitor-url_path-optional Request Example ---------------- .. literalinclude:: examples/healthmonitor-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/healthmonitor-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - created_at: created_at - delay: healthmonitor-delay - domain_name: healthmonitor-domain_name - expected_codes: healthmonitor-expected_codes - http_method: healthmonitor-http_method - http_version: healthmonitor-http_version - id: healthmonitor-id - max_retries: healthmonitor-max-retries - max_retries_down: healthmonitor-max-retries-down - name: name - operating_status: operating_status - pool_id: pool-id - project_id: project_id - provisioning_status: provisioning_status - tags: tags - timeout: healthmonitor-timeout - type: healthmonitor-type - updated_at: updated_at - url_path: healthmonitor-url_path Response Example ---------------- .. literalinclude:: examples/healthmonitor-create-response.json :language: javascript Show Health Monitor details =========================== .. rest_method:: GET /v2/lbaas/healthmonitors/{healthmonitor_id} Shows the details of a health monitor. If you are not an administrative user and the parent load balancer does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - healthmonitor_id: path-healthmonitor-id Curl Example ------------ .. literalinclude:: examples/healthmonitor-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - created_at: created_at - delay: healthmonitor-delay - domain_name: healthmonitor-domain_name - expected_codes: healthmonitor-expected_codes - http_method: healthmonitor-http_method - http_version: healthmonitor-http_version - id: healthmonitor-id - max_retries: healthmonitor-max-retries - max_retries_down: healthmonitor-max-retries-down - name: name - operating_status: operating_status - pool_id: pool-id - project_id: project_id - provisioning_status: provisioning_status - tags: tags - timeout: healthmonitor-timeout - type: healthmonitor-type - updated_at: updated_at - url_path: healthmonitor-url_path Response Example ---------------- .. literalinclude:: examples/healthmonitor-show-response.json :language: javascript Update a Health Monitor ======================= .. rest_method:: PUT /v2/lbaas/healthmonitors/{healthmonitor_id} Update an existing health monitor. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the update, check that the health monitor provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET operation to poll the health monitor object for changes. This operation returns the updated health monitor object with the ``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - delay: healthmonitor-delay-optional - domain_name: healthmonitor-domain_name-optional - expected_codes: healthmonitor-expected_codes-optional - healthmonitor_id: path-healthmonitor-id - http_method: healthmonitor-http_method-optional - http_version: healthmonitor-http_version-optional - max_retries: healthmonitor-max-retries-optional - max_retries_down: healthmonitor-max-retries-down-optional - name: name-optional - tags: tags-optional - timeout: healthmonitor-timeout-optional - url_path: healthmonitor-url_path-optional Request Example --------------- .. literalinclude:: examples/healthmonitor-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/healthmonitor-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - created_at: created_at - delay: healthmonitor-delay - domain_name: healthmonitor-domain_name - expected_codes: healthmonitor-expected_codes - http_method: healthmonitor-http_method - http_version: healthmonitor-http_version - id: healthmonitor-id - max_retries: healthmonitor-max-retries - max_retries_down: healthmonitor-max-retries-down - name: name - operating_status: operating_status - pool_id: pool-id - project_id: project_id - provisioning_status: provisioning_status - tags: tags - timeout: healthmonitor-timeout - type: healthmonitor-type - updated_at: updated_at - url_path: healthmonitor-url_path Response Example ---------------- .. literalinclude:: examples/healthmonitor-update-response.json :language: javascript Remove a Health Monitor ======================= .. rest_method:: DELETE /v2/lbaas/healthmonitors/{healthmonitor_id} Removes a health monitor and its associated configuration from the project. The API immediately purges any and all configuration data, depending on the configuration settings. You cannot recover it. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - healthmonitor_id: path-healthmonitor-id Curl Example ------------ .. literalinclude:: examples/healthmonitor-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/index.rst0000664000175000017500000000213200000000000020203 0ustar00zuulzuul00000000000000:tocdepth: 3 ======================== Octavia API v2 (Current) ======================== .. rest_expand_all:: -------------------- General API Overview -------------------- .. include:: general.inc -------------- Load Balancers -------------- .. include:: loadbalancer.inc --------- Listeners --------- .. include:: listener.inc ----- Pools ----- .. include:: pool.inc ------- Members ------- .. include:: member.inc -------------- Health Monitor -------------- .. include:: healthmonitor.inc ----------- L7 Policies ----------- .. include:: l7policy.inc -------- L7 Rules -------- .. include:: l7rule.inc ------ Quotas ------ .. include:: quota.inc --------- Providers --------- .. include:: provider.inc ------- Flavors ------- .. include:: flavor.inc --------------- Flavor Profiles --------------- .. include:: flavorprofile.inc ------------------ Availability Zones ------------------ .. include:: availabilityzone.inc -------------------------- Availability Zone Profiles -------------------------- .. include:: availabilityzoneprofile.inc -------- Amphorae -------- .. include:: amphora.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/l7policy.inc0000664000175000017500000002335000000000000020604 0ustar00zuulzuul00000000000000.. -*- rst -*- List L7 Policies ================ .. rest_method:: GET /v2/lbaas/l7policies Lists all L7 policies for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list L7 policies for other projects. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/l7policies-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - action: l7policy-action - admin_state_up: admin_state_up - created_at: created_at - description: description - id: l7policy-id - listener_id: listener-id - name: name - operating_status: operating_status - position: l7policy-position - project_id: project_id - provisioning_status: provisioning_status - redirect_http_code: l7policy-redirect-http-code - redirect_pool_id: l7policy-redirect-pool_id - redirect_prefix: l7policy-redirect-prefix - redirect_url: l7policy-redirect-url - rules: l7policy-rule-ids - tags: tags - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/l7policies-list-response.json :language: javascript Create an L7 Policy =================== .. rest_method:: POST /v2/lbaas/l7policies Creates a L7 policy. This operation provisions a new L7 policy by using the configuration that you define in the request object. After the API validates the request and starts the provisioning process, the API returns a response object that contains a unique ID and the status of provisioning the L7 policy. In the response, the L7 policy :ref:`provisioning status` is ``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. If the status is ``PENDING_CREATE``, issue GET ``/v2/lbaas/l7policies/{l7policy_id}`` to view the progress of the provisioning operation. When the L7 policy status changes to ``ACTIVE``, the L7 policy is successfully provisioned and is ready for further configuration. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. All the rules associated with a given policy are logically ANDed together. A request must match all the policy's rules to match the policy. If you need to express a logical OR operation between rules, then do this by creating multiple policies with the same action. If a new policy is created with a position that matches that of an existing policy, then the new policy is inserted at the given position. L7 policies with ``action`` of ``REDIRECT_TO_URL`` will return the default HTTP ``Found (302)`` response code with the ``redirect_url``. Also, specify ``redirect_http_code`` to configure the needed HTTP response code, such as, 301, 302, 303, 307 and 308. L7 policies with ``action`` of ``REJECT`` will return a ``Forbidden (403)`` response code to the requester. .. note:: Pools of type ``UDP`` cannot be used in L7 policies at this time. .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - action: l7policy-action - admin_state_up: admin_state_up-default-optional - description: description-optional - listener_id: listener-id - name: name-optional - position: l7policy-position-optional - project_id: project_id-optional - redirect_http_code: l7policy-redirect-http-code-optional - redirect_pool_id: l7policy-redirect-pool_id-optional - redirect_prefix: l7policy-redirect-prefix-optional - redirect_url: l7policy-redirect-url-optional - tags: tags-optional Request Example ---------------- .. literalinclude:: examples/l7policy-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/l7policy-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - action: l7policy-action - admin_state_up: admin_state_up - created_at: created_at - description: description - id: l7policy-id - listener_id: listener-id - name: name - operating_status: operating_status - position: l7policy-position - project_id: project_id - provisioning_status: provisioning_status - redirect_http_code: l7policy-redirect-http-code - redirect_pool_id: l7policy-redirect-pool_id - redirect_prefix: l7policy-redirect-prefix - redirect_url: l7policy-redirect-url - rules: l7policy-rule-ids - tags: tags - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/l7policy-create-response.json :language: javascript Show L7 Policy details ========================== .. rest_method:: GET /v2/lbaas/l7policies/{l7policy_id} Shows the details of a L7 policy. If you are not an administrative user and the L7 policy object does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - l7policy_id: path-l7policy-id Curl Example ------------ .. literalinclude:: examples/l7policy-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - action: l7policy-action - admin_state_up: admin_state_up - created_at: created_at - description: description - id: l7policy-id - listener_id: listener-id - name: name - operating_status: operating_status - position: l7policy-position - project_id: project_id - provisioning_status: provisioning_status - redirect_http_code: l7policy-redirect-http-code - redirect_pool_id: l7policy-redirect-pool_id - redirect_prefix: l7policy-redirect-prefix - redirect_url: l7policy-redirect-url - rules: l7policy-rule-ids - tags: tags - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/l7policy-show-response.json :language: javascript Update a L7 Policy ================== .. rest_method:: PUT /v2/lbaas/l7policies/{l7policy_id} Updates a L7 policy. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the update, check that the L7 policy provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET operation to poll the L7 policy object for changes. This operation returns the updated L7 policy object with the ``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. If a policy is updated with a position that matches that of an existing policy, then the updated policy is inserted at the given position. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - action: l7policy-action-optional - admin_state_up: admin_state_up-default-optional - description: description-optional - l7policy_id: path-l7policy-id - name: name-optional - position: l7policy-position-optional - redirect_http_code: l7policy-redirect-http-code-optional - redirect_pool_id: l7policy-redirect-pool_id-optional - redirect_prefix: l7policy-redirect-prefix-optional - redirect_url: l7policy-redirect-url-optional - tags: tags-optional Request Example --------------- .. literalinclude:: examples/l7policy-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/l7policy-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - action: l7policy-action - admin_state_up: admin_state_up - created_at: created_at - description: description - id: l7policy-id - listener_id: listener-id - name: name - operating_status: operating_status - position: l7policy-position - project_id: project_id - provisioning_status: provisioning_status - redirect_http_code: l7policy-redirect-http-code - redirect_pool_id: l7policy-redirect-pool_id - redirect_prefix: l7policy-redirect-prefix - redirect_url: l7policy-redirect-url - rules: l7policy-rule-ids - tags: tags - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/l7policy-update-response.json :language: javascript Remove a L7 Policy ================== .. rest_method:: DELETE /v2/lbaas/l7policies/{l7policy_id} Removes a L7 policy and its associated configuration from the project. The API immediately purges any and all configuration data, depending on the configuration settings. You cannot recover it. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - l7policy_id: path-l7policy-id Curl Example ------------ .. literalinclude:: examples/l7policy-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/l7rule.inc0000664000175000017500000001765700000000000020271 0ustar00zuulzuul00000000000000.. -*- rst -*- List L7 Rules ============= .. rest_method:: GET /v2/lbaas/l7policies/{l7policy_id}/rules Lists all L7 rules for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list L7 policies for other projects. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - l7policy_id: path-l7policy-id - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/l7rules-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - compare_type: l7rule-compare_type - created_at: created_at - id: l7rule-id - invert: l7rule-invert - key: l7rule-key - operating_status: operating_status - project_id: project_id - provisioning_status: provisioning_status - tags: tags - type: l7rule-type - updated_at: updated_at - value: l7rule-value Response Example ---------------- .. literalinclude:: examples/l7rules-list-response.json :language: javascript Create an L7 Rule ================= .. rest_method:: POST /v2/lbaas/l7policies/{l7policy_id}/rules Creates a L7 rule. This operation provisions a new L7 rule by using the configuration that you define in the request object. After the API validates the request and starts the provisioning process, the API returns a response object that contains a unique ID and the status of provisioning the L7 rule. In the response, the L7 rule :ref:`provisioning status` is ``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. If the status is ``PENDING_CREATE``, issue GET ``/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}`` to view the progress of the provisioning operation. When the L7 rule status changes to ``ACTIVE``, the L7 rule is successfully provisioned and is ready for further configuration. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. All the rules associated with a given policy are logically ANDed together. A request must match all the policy's rules to match the policy. If you need to express a logical OR operation between rules, then do this by creating multiple policies with the same action. .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - compare_type: l7rule-compare_type - invert: l7rule-invert-optional - key: l7rule-key-optional - l7policy_id: path-l7policy-id - project_id: project_id-optional - tags: tags-optional - type: l7rule-type - value: l7rule-value Request Example ---------------- .. literalinclude:: examples/l7rule-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/l7rule-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - compare_type: l7rule-compare_type - created_at: created_at - id: l7rule-id - invert: l7rule-invert - key: l7rule-key - operating_status: operating_status - project_id: project_id - provisioning_status: provisioning_status - tags: tags - type: l7rule-type - updated_at: updated_at - value: l7rule-value Response Example ---------------- .. literalinclude:: examples/l7rule-create-response.json :language: javascript Show L7 Rule details ========================== .. rest_method:: GET /v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id} Shows the details of a L7 rule. If you are not an administrative user and the L7 rule object does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - l7policy_id: path-l7policy-id - l7rule_id: path-l7rule-id Curl Example ------------ .. literalinclude:: examples/l7rule-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - compare_type: l7rule-compare_type - created_at: created_at - id: l7rule-id - invert: l7rule-invert - key: l7rule-key - operating_status: operating_status - project_id: project_id - provisioning_status: provisioning_status - tags: tags - type: l7rule-type - updated_at: updated_at - value: l7rule-value Response Example ---------------- .. literalinclude:: examples/l7rule-show-response.json :language: javascript Update a L7 Rule ================ .. rest_method:: PUT /v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id} Updates a L7 rule. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the update, check that the L7 rule provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET operation to poll the L7 rule object for changes. This operation returns the updated L7 rule object with the ``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - compare_type: l7rule-compare_type-optional - invert: l7rule-invert-optional - key: l7rule-key-optional - l7policy_id: path-l7policy-id - l7rule_id: path-l7rule-id - tags: tags-optional - type: l7rule-type-optional - value: l7rule-value-optional Request Example --------------- .. literalinclude:: examples/l7rule-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/l7rule-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - compare_type: l7rule-compare_type - created_at: created_at - id: l7rule-id - invert: l7rule-invert - key: l7rule-key - operating_status: operating_status - project_id: project_id - provisioning_status: provisioning_status - tags: tags - type: l7rule-type - updated_at: updated_at - value: l7rule-value Response Example ---------------- .. literalinclude:: examples/l7rule-update-response.json :language: javascript Remove a L7 Rule ================ .. rest_method:: DELETE /v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id} Removes a L7 rule and its associated configuration from the project. The API immediately purges any and all configuration data, depending on the configuration settings. You cannot recover it. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - l7policy_id: path-l7policy-id - l7rule_id: path-l7rule-id Curl Example ------------ .. literalinclude:: examples/l7rule-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/listener.inc0000664000175000017500000004637100000000000020677 0ustar00zuulzuul00000000000000.. -*- rst -*- List Listeners ============== .. rest_method:: GET /v2/lbaas/listeners Lists all listeners for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list listeners for other projects. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/listeners-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - allowed_cidrs: allowed_cidrs - client_authentication: client_authentication - client_ca_tls_container_ref: client_ca_tls_container_ref - client_crl_container_ref: client_crl_container_ref - connection_limit: connection_limit - created_at: created_at - default_pool_id: default_pool_id - default_tls_container_ref: default_tls_container_ref - description: description - id: listener-id - insert_headers: insert_headers - l7policies: l7policy-ids - listener: listener - loadbalancers: loadbalancer-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol - protocol_port: protocol_port - provisioning_status: provisioning_status - sni_container_refs: sni_container_refs - tags: tags - timeout_client_data: timeout_client_data - timeout_member_connect: timeout_member_connect - timeout_member_data: timeout_member_data - timeout_tcp_inspect: timeout_tcp_inspect - tls_ciphers: tls_ciphers - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/listeners-list-response.json :language: javascript Create Listener =============== .. rest_method:: POST /v2/lbaas/listeners Creates a listener for a load balancer. The listener configures a port and protocol for the load balancer to listen on for incoming requests. A load balancer may have zero or more listeners configured. This operation provisions a new listener by using the configuration that you define in the request object. After the API validates the request and starts the provisioning process, the API returns a response object that contains a unique ID and the status of provisioning the listener. In the response, the listener :ref:`provisioning status` is ``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. If the status is ``PENDING_CREATE``, issue GET ``/v2/lbaas/listeners/{listener_id}`` to view the progress of the provisioning operation. When the listener status changes to ``ACTIVE``, the listener is successfully provisioned and is ready for further configuration. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. Specifying a project_id is deprecated. The listener will inherit the project_id of the parent load balancer. You can configure all documented features of the listener at creation time by specifying the additional elements or attributes in the request. To create a listener, the parent load balancer must have an ``ACTIVE`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - allowed_cidrs: allowed_cidrs-optional - client_authentication: client_authentication-optional - client_ca_tls_container_ref: client_ca_tls_container_ref-optional - client_crl_container_ref: client_crl_container_ref-optional - connection_limit: connection_limit-optional - default_pool: pool-optional - default_pool_id: default_pool_id-optional - default_tls_container_ref: default_tls_container_ref-optional - description: description-optional - insert_headers: insert_headers-optional - l7policies: l7policies-optional - listeners: listener - loadbalancer_id: loadbalancer-id - name: name-optional - project_id: project_id-optional-deprecated - protocol: protocol - protocol_port: protocol_port - sni_container_refs: sni_container_refs-optional - tags: tags-optional - timeout_client_data: timeout_client_data-optional - timeout_member_connect: timeout_member_connect-optional - timeout_member_data: timeout_member_data-optional - timeout_tcp_inspect: timeout_tcp_inspect-optional - tls_ciphers: tls_ciphers-optional .. _header_insertions: Supported HTTP Header Insertions -------------------------------- .. note:: Both the key and the values are always specified as strings when specifying header insertions. +-------------------------+--------+------------------------------------------------+ | Key | Value | Description | +=========================+========+================================================+ | X-Forwarded-For | string | When "``true``" a ``X-Forwarded-For`` header | | | | is inserted into the request to the backend | | | | ``member`` that specifies the client IP | | | | address. | +-------------------------+--------+------------------------------------------------+ | X-Forwarded-Port | string | When "``true``" a ``X-Forwarded-Port`` header | | | | is inserted into the request to the backend | | | | ``member`` that specifies the listener port. | +-------------------------+--------+------------------------------------------------+ | X-Forwarded-Proto | string | When "``true``" a ``X-Forwarded-Proto`` header | | | | is inserted into the request to the backend | | | | ``member``. HTTP for the HTTP listener | | | | protocol type, HTTPS for the TERMINATED_HTTPS | | | | listener protocol type. | | | | **New in version 2.1** | +-------------------------+--------+------------------------------------------------+ | X-SSL-Client-Verify | string | When "``true``" a ``X-SSL-Client-Verify`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains 0 if the | | | | client authentication was successful, or an | | | | result error number greater than 0 that align | | | | to the openssl veryify error codes. | +-------------------------+--------+------------------------------------------------+ | X-SSL-Client-Has-Cert | string | When "``true``" a ``X-SSL-Client-Has-Cert`` | | | | header is inserted into the request to the | | | | backend ``member`` that is ''true'' if a client| | | | authentication certificate was presented, and | | | | ''false'' if not. Does not indicate validity. | +-------------------------+--------+------------------------------------------------+ | X-SSL-Client-DN | string | When "``true``" a ``X-SSL-Client-DN`` header | | | | is inserted into the request to the backend | | | | ``member`` that contains the full | | | | Distinguished Name of the certificate | | | | presented by the client. | +-------------------------+--------+------------------------------------------------+ | X-SSL-Client-CN | string | When "``true``" a ``X-SSL-Client-CN`` header | | | | is inserted into the request to the backend | | | | ``member`` that contains the Common Name from | | | | the full Distinguished Name of the certificate | | | | presented by the client. | +-------------------------+--------+------------------------------------------------+ | X-SSL-Issuer | string | When "``true``" a ``X-SSL-Issuer`` header is | | | | inserted into the request to the backend | | | | ``member`` that contains the full | | | | Distinguished Name of the client certificate | | | | issuer. | +-------------------------+--------+------------------------------------------------+ | X-SSL-Client-SHA1 | string | When "``true``" a ``X-SSL-Client-SHA1`` header | | | | is inserted into the request to the backend | | | | ``member`` that contains the SHA-1 fingerprint | | | | of the certificate presented by the client in | | | | hex string format. | +-------------------------+--------+------------------------------------------------+ | X-SSL-Client-Not-Before | string | When "``true``" a ``X-SSL-Client-Not-Before`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains the start | | | | date presented by the client as a formatted | | | | string YYMMDDhhmmss[Z]. | +-------------------------+--------+------------------------------------------------+ | X-SSL-Client-Not-After | string | When "``true``" a ``X-SSL-Client-Not-After`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains the end date | | | | presented by the client as a formatted string | | | | YYMMDDhhmmss[Z]. | +-------------------------+--------+------------------------------------------------+ Request Example ---------------- .. literalinclude:: examples/listener-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/listener-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - allowed_cidrs: allowed_cidrs - client_authentication: client_authentication - client_ca_tls_container_ref: client_ca_tls_container_ref - client_crl_container_ref: client_crl_container_ref - connection_limit: connection_limit - created_at: created_at - default_pool_id: default_pool_id - default_tls_container_ref: default_tls_container_ref - description: description - id: listener-id - insert_headers: insert_headers - l7policies: l7policy-ids - listener: listener - loadbalancers: loadbalancer-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol - protocol_port: protocol_port - provisioning_status: provisioning_status - sni_container_refs: sni_container_refs - tags: tags - timeout_client_data: timeout_client_data - timeout_member_connect: timeout_member_connect - timeout_member_data: timeout_member_data - timeout_tcp_inspect: timeout_tcp_inspect - tls_ciphers: tls_ciphers - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/listener-create-response.json :language: javascript Show Listener details ===================== .. rest_method:: GET /v2/lbaas/listeners/{listener_id} Shows the details of a listener. If you are not an administrative user and the parent load balancer does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - listener_id: path-listener-id Curl Example ------------ .. literalinclude:: examples/listener-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - allowed_cidrs: allowed_cidrs - client_authentication: client_authentication - client_ca_tls_container_ref: client_ca_tls_container_ref - client_crl_container_ref: client_crl_container_ref - connection_limit: connection_limit - created_at: created_at - default_pool_id: default_pool_id - default_tls_container_ref: default_tls_container_ref - description: description - id: listener-id - insert_headers: insert_headers - l7policies: l7policy-ids - listener: listener - loadbalancers: loadbalancer-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol - protocol_port: protocol_port - provisioning_status: provisioning_status - sni_container_refs: sni_container_refs - tags: tags - timeout_client_data: timeout_client_data - timeout_member_connect: timeout_member_connect - timeout_member_data: timeout_member_data - timeout_tcp_inspect: timeout_tcp_inspect - tls_ciphers: tls_ciphers - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/listener-show-response.json :language: javascript Update a Listener ================= .. rest_method:: PUT /v2/lbaas/listeners/{listener_id} Update an existing listener. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the update, check that the listener provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET operation to poll the listener object for changes. This operation returns the updated listener object with the ``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - allowed_cidrs: allowed_cidrs-optional - client_authentication: client_authentication-optional - client_ca_tls_container_ref: client_ca_tls_container_ref-optional - client_crl_container_ref: client_crl_container_ref-optional - connection_limit: connection_limit-optional - default_pool_id: default_pool_id-optional - default_tls_container_ref: default_tls_container_ref-optional - description: description-optional - insert_headers: insert_headers-optional - listener_id: path-listener-id - name: name-optional - sni_container_refs: sni_container_refs-optional - tags: tags-optional - timeout_client_data: timeout_client_data-optional - timeout_member_connect: timeout_member_connect-optional - timeout_member_data: timeout_member_data-optional - timeout_tcp_inspect: timeout_tcp_inspect-optional - tls_ciphers: tls_ciphers-optional Request Example --------------- .. literalinclude:: examples/listener-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/listener-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - allowed_cidrs: allowed_cidrs - client_authentication: client_authentication - client_ca_tls_container_ref: client_ca_tls_container_ref - client_crl_container_ref: client_crl_container_ref - connection_limit: connection_limit - created_at: created_at - default_pool_id: default_pool_id - default_tls_container_ref: default_tls_container_ref - description: description - id: listener-id - insert_headers: insert_headers - l7policies: l7policy-ids - listener: listener - loadbalancers: loadbalancer-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol - protocol_port: protocol_port - provisioning_status: provisioning_status - sni_container_refs: sni_container_refs - tags: tags - timeout_client_data: timeout_client_data - timeout_member_connect: timeout_member_connect - timeout_member_data: timeout_member_data - timeout_tcp_inspect: timeout_tcp_inspect - tls_ciphers: tls_ciphers - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/listener-update-response.json :language: javascript Remove a Listener ================= .. rest_method:: DELETE /v2/lbaas/listeners/{listener_id} Removes a listener and its associated configuration from the project. The API immediately purges any and all configuration data, depending on the configuration settings. You cannot recover it. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - listener_id: path-listener-id Curl Example ------------ .. literalinclude:: examples/listener-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. Get Listener statistics ======================= .. rest_method:: GET /v2/lbaas/listeners/{listener_id}/stats Shows the current statistics for a listener. This operation returns the statistics of a listener object identified by listener_id. If you are not an administrative user and the parent load balancer does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - listener_id: path-listener-id Curl Example ------------ .. literalinclude:: examples/listener-stats-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - stats: stats - active_connections: active_connections - bytes_in: bytes_in - bytes_out: bytes_out - request_errors: request_errors - total_connections: total_connections Response Example ---------------- .. literalinclude:: examples/listener-stats-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/loadbalancer.inc0000664000175000017500000003666700000000000021470 0ustar00zuulzuul00000000000000.. -*- rst -*- List Load Balancers =================== .. rest_method:: GET /v2/lbaas/loadbalancers Lists all load balancers for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list load balancers for other projects. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/loadbalancers-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - availability_zone: availability-zone-name - created_at: created_at - description: description - flavor_id: flavor-id - id: loadbalancer-id - listeners: listeners - loadbalancers: loadbalancers - name: name - operating_status: operating_status - pools: pools_ids - project_id: project_id - provider: provider - provisioning_status: provisioning_status - tags: tags - updated_at: updated_at - vip_address: vip_address - vip_network_id: vip_network_id - vip_port_id: vip_port_id - vip_qos_policy_id: vip_qos_policy_id - vip_subnet_id: vip_subnet_id Response Example ---------------- .. literalinclude:: examples/loadbalancers-list-response.json :language: javascript Create a Load Balancer ====================== .. rest_method:: POST /v2/lbaas/loadbalancers Creates a load balancer. This operation provisions a new load balancer by using the configuration that you define in the request object. After the API validates the request and starts the provisioning process, the API returns a response object that contains a unique ID and the status of provisioning the load balancer. In the response, the load balancer :ref:`provisioning status` is ``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. If the status is ``PENDING_CREATE``, issue GET ``/v2/lbaas/loadbalancers/{loadbalancer_id}`` to view the progress of the provisioning operation. When the load balancer status changes to ``ACTIVE``, the load balancer is successfully provisioned and is ready for further configuration. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. Administrative users can specify a project ID that is different than their own to create load balancers for other projects. An optional ``flavor_id`` attribute can be used to create the load balancer using a pre-configured octavia flavor. Flavors are created by the operator to allow custom load balancer configurations, such as allocating more memory for the load balancer. An optional ``vip_qos_policy_id`` attribute from Neutron can be used to apply QoS policies on a loadbalancer VIP, also could pass a 'null' value to remove QoS policies. You can also specify the ``provider`` attribute when you create a load balancer. The ``provider`` attribute specifies which backend should be used to create the load balancer. This could be the default provider (``octavia``) or a vendor supplied ``provider`` if one has been installed. Setting both a flavor_id and a provider will result in a conflict error. Specifying a Virtual IP (VIP) is mandatory. There are three ways to specify a VIP network for the load balancer: 1. Provide a ``vip_port_id``. Providing a neutron port ID for the ``vip_port_id`` tells octavia to use this port for the VIP. Some port settings may be changed or removed as required by octavia, but the IP address will be retained. If the port has more than one subnet you must specify either the ``vip_subnet_id`` or ``vip_address`` to clarify which address should be used for the VIP. 2. Provide a ``vip_network_id``. When a ``vip_network_ip`` is specified, unless neither ``vip_subnet_id`` nor ``vip_address`` is specified, octavia will select a subnet from the network, preferring IPv4 over IPv6 subnets. 3. Provide a ``vip_subnet_id``. Specifying a neutron subnet ID will tell octavia to create a neutron port on this subnet and allocate an IP address from the subnet if the ``vip_address`` was not specified. If ``vip_address`` was specified, octavia will attempt to allocate the ``vip_address`` from the subnet for the VIP address. .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - availability_zone: availability-zone-name-optional - description: description-optional - flavor_id: flavor-id-optional - listeners: listeners-optional - loadbalancer: loadbalancer - name: name-optional - project_id: project_id-optional - provider: provider-optional - tags: tags-optional - vip_address: vip_address-optional - vip_network_id: vip_network_id-optional - vip_port_id: vip_port_id-optional - vip_qos_policy_id: vip_qos_policy_id-optional - vip_subnet_id: vip_subnet_id-optional Request Example ---------------- .. literalinclude:: examples/loadbalancer-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/loadbalancer-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - availability_zone: availability-zone-name - created_at: created_at - description: description - flavor_id: flavor-id - id: loadbalancer-id - listeners: listeners - loadbalancer: loadbalancer - name: name - operating_status: operating_status - pools: pools_ids - project_id: project_id - provider: provider - provisioning_status: provisioning_status - tags: tags - updated_at: updated_at - vip_address: vip_address - vip_network_id: vip_network_id - vip_port_id: vip_port_id - vip_qos_policy_id: vip_qos_policy_id - vip_subnet_id: vip_subnet_id Response Example ---------------- .. literalinclude:: examples/loadbalancer-create-response.json :language: javascript Creating a Fully Populated Load Balancer ---------------------------------------- You can configure all documented features of the load balancer at creation time by specifying the additional elements or attributes in the request. Note: all pools must have names, and must only be fully defined once. To reference a pool from multiple objects, supply the pool name only for all subsequent references. Request Example --------------- .. literalinclude:: examples/loadbalancer-full-create-request.json :language: javascript Response Example ---------------- .. literalinclude:: examples/loadbalancer-full-create-response.json :language: javascript Show Load Balancer details ========================== .. rest_method:: GET /v2/lbaas/loadbalancers/{loadbalancer_id} Shows the details of a load balancer. If you are not an administrative user and the load balancer object does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - loadbalancer_id: path-loadbalancer-id Curl Example ------------ .. literalinclude:: examples/loadbalancer-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - availability_zone: availability-zone-name - created_at: created_at - description: description - flavor_id: flavor-id - id: loadbalancer-id - loadbalancer: loadbalancer - listeners: listeners - name: name - operating_status: operating_status - pools: pools_ids - project_id: project_id - provider: provider - provisioning_status: provisioning_status - tags: tags - updated_at: updated_at - vip_address: vip_address - vip_network_id: vip_network_id - vip_port_id: vip_port_id - vip_qos_policy_id: vip_qos_policy_id - vip_subnet_id: vip_subnet_id Response Example ---------------- .. literalinclude:: examples/loadbalancer-show-response.json :language: javascript Update a Load Balancer ====================== .. rest_method:: PUT /v2/lbaas/loadbalancers/{loadbalancer_id} Updates a load balancer. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the update, check that the load balancer provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET operation to poll the load balancer object for changes. This operation returns the updated load balancer object with the ``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-optional - description: description-optional - loadbalancer: loadbalancer - loadbalancer_id: path-loadbalancer-id - name: name-optional - tags: tags-optional - vip_qos_policy_id: vip_qos_policy_id-optional Request Example --------------- .. literalinclude:: examples/loadbalancer-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/loadbalancer-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - created_at: created_at - description: description - flavor_id: flavor-id - id: loadbalancer-id - listeners: listeners - loadbalancer: loadbalancer - name: name - operating_status: operating_status - pools: pools_ids - project_id: project_id - provider: provider - provisioning_status: provisioning_status - tags: tags - updated_at: updated_at - vip_address: vip_address - vip_network_id: vip_network_id - vip_port_id: vip_port_id - vip_qos_policy_id: vip_qos_policy_id - vip_subnet_id: vip_subnet_id Response Example ---------------- .. literalinclude:: examples/loadbalancer-update-response.json :language: javascript Remove a Load Balancer ====================== .. rest_method:: DELETE /v2/lbaas/loadbalancers/{loadbalancer_id} Removes a load balancer and its associated configuration from the project. The optional parameter ``cascade`` when defined as ``true`` will delete all child objects of the load balancer. The API immediately purges any and all configuration data, depending on the configuration settings. You cannot recover it. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - cascade: cascade-delete - loadbalancer_id: path-loadbalancer-id Curl Example ------------ .. literalinclude:: examples/loadbalancer-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. Get Load Balancer statistics ============================ .. rest_method:: GET /v2/lbaas/loadbalancers/{loadbalancer_id}/stats Shows the current statistics for a load balancer. This operation returns the statistics of a load balancer object identified by loadbalancer_id. If you are not an administrative user and the load balancer object does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - loadbalancer_id: path-loadbalancer-id Curl Example ------------ .. literalinclude:: examples/loadbalancer-stats-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - stats: stats - active_connections: active_connections - bytes_in: bytes_in - bytes_out: bytes_out - request_errors: request_errors - total_connections: total_connections Response Example ---------------- .. literalinclude:: examples/loadbalancer-stats-response.json :language: javascript Get the Load Balancer status tree ================================= .. rest_method:: GET /v2/lbaas/loadbalancers/{loadbalancer_id}/status Shows the status tree for a load balancer. This operation returns a status tree for a load balancer object, by load balancer ID. ``provisioning_status`` is the status associated with lifecycle of the resource. See :ref:`prov_status` for descriptions of the status codes. ``operating_status`` is the observed status of the resource. See :ref:`op_status` for descriptions of the status codes. If you are not an administrative user and the load balancer object does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. If the operation succeeds, the returned element is a status tree that contains the load balancer and all provisioning and operating statuses for its children. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - loadbalancer_id: path-loadbalancer-id Curl Example ------------ .. literalinclude:: examples/loadbalancer-status-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - action: action - address: address - healthmonitor: healthmonitor-status - id: id - l7policies: l7policies-status-object-list - l7rules: l7rules-status-object-list - listeners: listeners-status-object-list - loadbalancer: loadbalancer-status - members: members-status-object-list - name: name - operating_status: operating_status - pools: pools-status-list - protocol_port: protocol_port - provisioning_status: provisioning_status - statuses: statuses - type: type Response Example ---------------- .. literalinclude:: examples/loadbalancer-status-response.json :language: javascript Failover a load balancer ======================== .. rest_method:: PUT /v2/lbaas/loadbalancers/{loadbalancer_id}/failover Performs a failover of a load balancer. This operation is only available to users with load balancer administrative rights. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - loadbalancer_id: path-loadbalancer-id Curl Example ------------ .. literalinclude:: examples/loadbalancer-failover-curl :language: bash Response -------- There is no body content for the response of a successful failover request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/member.inc0000664000175000017500000002730600000000000020316 0ustar00zuulzuul00000000000000.. -*- rst -*- List Members ============ .. rest_method:: GET /v2/lbaas/pools/{pool_id}/members Lists all members for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list members for other projects. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - pool_id: path-pool-id - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/members-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - address: address-member - admin_state_up: admin_state_up - backup: backup - created_at: created_at - id: member-id - monitor_address: monitor_address - monitor_port: monitor_port - name: name - operating_status: operating_status - project_id: project_id - protocol_port: protocol_port-member - provisioning_status: provisioning_status - subnet_id: subnet_id - tags: tags - updated_at: updated_at - weight: weight Response Example ---------------- .. literalinclude:: examples/members-list-response.json :language: javascript Create Member ============= .. rest_method:: POST /v2/lbaas/pools/{pool_id}/members This operation provisions a member and adds it to a pool by using the configuration that you define in the request object. After the API validates the request and starts the provisioning process, it returns a response object, which contains a unique ID. In the response, the member :ref:`provisioning status` is ``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. If the status is ``PENDING_CREATE``, issue GET ``/v2/lbaas/pools/{pool_id}/members/{member_id}`` to view the progress of the provisioning operation. When the member status changes to ``ACTIVE``, the member is successfully provisioned and is ready for further configuration. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. At a minimum, you must specify these member attributes: - ``address``. The IP address of the backend member to receive traffic from the load balancer. - ``protocol_port`` The port on which the backend member listens for traffic. Some attributes receive default values if you omit them from the request: - ``admin_state_up``. Default is ``true``. - ``backup``. Default is ``false``. - ``weight``. Default is ``1``. If you omit the ``subnet_id`` parameter, the ``vip_subnet_id`` for the parent load balancer will be used for the member subnet UUID. The member ``address`` does not necessarily need to be a member of the ``subnet_id`` subnet. Members can be routable from the subnet specified either via the default route or by using ``host_routes`` defined on the subnet. Administrative users can specify a project ID that is different than their own to create members for other projects. ``monitor_address`` and/or ``monitor_port`` can be used to have the health monitor, if one is configured for the pool, connect to an alternate IP address and port when executing a health check on the member. To create a member, the load balancer must have an ``ACTIVE`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - address: address - backup: backup-optional - monitor_address: monitor_address-optional - monitor_port: monitor_port-optional - name: name-optional - pool_id: path-pool-id - project_id: project_id-optional-deprecated - protocol_port: protocol_port - subnet_id: subnet_id-optional - tags: tags-optional - weight: weight-optional Request Example ---------------- .. literalinclude:: examples/member-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/member-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - address: address-member - admin_state_up: admin_state_up - backup: backup - created_at: created_at - id: member-id - monitor_address: monitor_address - monitor_port: monitor_port - name: name - operating_status: operating_status - project_id: project_id - protocol_port: protocol_port-member - provisioning_status: provisioning_status - subnet_id: subnet_id - tags: tags - updated_at: updated_at - weight: weight Response Example ---------------- .. literalinclude:: examples/member-create-response.json :language: javascript Show Member details =================== .. rest_method:: GET /v2/lbaas/pools/{pool_id}/members/{member-id} Shows the details of a pool member. If you are not an administrative user and the parent load balancer does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - member_id: path-member-id - pool_id: path-pool-id Curl Example ------------ .. literalinclude:: examples/member-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - address: address-member - admin_state_up: admin_state_up - backup: backup - created_at: created_at - id: member-id - monitor_address: monitor_address - monitor_port: monitor_port - name: name - operating_status: operating_status - project_id: project_id - protocol_port: protocol_port-member - provisioning_status: provisioning_status - subnet_id: subnet_id - tags: tags - updated_at: updated_at - weight: weight Response Example ---------------- .. literalinclude:: examples/member-show-response.json :language: javascript Update a Member =============== .. rest_method:: PUT /v2/lbaas/pools/{pool_id}/members/{member_id} Update an existing member. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the update, check that the member provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET operation to poll the member object for changes. Setting the member weight to ``0`` means that the member will not receive new requests but will finish any existing connections. This "drains" the backend member of active connections. This operation returns the updated member object with the ``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - backup: backup-optional - member_id: path-member-id - monitor_address: monitor_address-optional - monitor_port: monitor_port-optional - name: name-optional - pool_id: path-pool-id - tags: tags-optional - weight: weight-optional Request Example --------------- .. literalinclude:: examples/member-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/member-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - address: address-member - admin_state_up: admin_state_up - backup: backup - created_at: created_at - id: member-id - monitor_address: monitor_address - monitor_port: monitor_port - name: name - operating_status: operating_status - project_id: project_id - protocol_port: protocol_port-member - provisioning_status: provisioning_status - subnet_id: subnet_id - tags: tags - updated_at: updated_at - weight: weight Response Example ---------------- .. literalinclude:: examples/member-update-response.json :language: javascript Batch Update Members ==================== .. rest_method:: PUT /v2/lbaas/pools/{pool_id}/members Set the state of members for a pool in one API call. This may include creating new members, deleting old members, and updating existing members. Existing members are matched based on address/port combination. For example, assume a pool currently has two members. These members have the following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. Now assume a PUT request is made that includes members with address/port combinations: '192.0.2.16:80' and '192.0.2.17:80'. The member '192.0.2.15:80' will be deleted, because it was not in the request. The member '192.0.2.16:80' will be updated to match the request data for that member, because it was matched. The member '192.0.2.17:80' will be created, because no such member existed. The optional parameter ``additive_only`` when defined as ``true`` will skip deletions for members missing from the provided list. If this were set in the above example, the member '192.0.2.15:80' would have remained in the pool. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the updates, check that the member provisioning statuses are ``ACTIVE`` for new or updated members, and that any unspecified members were correctly deleted. If the statuses are ``PENDING_UPDATE`` or ``PENDING_DELETE``, use GET to poll the member objects for changes. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - additive_only: additive-only - admin_state_up: admin_state_up-default-optional - address: address - backup: backup-optional - monitor_address: monitor_address-optional - monitor_port: monitor_port-optional - name: name-optional - pool_id: path-pool-id - project_id: project_id-optional-deprecated - protocol_port: protocol_port - subnet_id: subnet_id-optional - tags: tags-optional - weight: weight-optional Request Example --------------- .. literalinclude:: examples/member-batch-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/member-batch-update-curl :language: bash Response -------- There is no body content for the response of a successful PUT request. Remove a Member =============== .. rest_method:: DELETE /v2/lbaas/pools/{pool_id}/members/{member_id} Removes a member and its associated configuration from the pool. The API immediately purges any and all configuration data, depending on the configuration settings. You cannot recover it. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - member_id: path-member-id - pool_id: path-pool-id Curl Example ------------ .. literalinclude:: examples/member-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/pool.inc0000664000175000017500000003074500000000000020021 0ustar00zuulzuul00000000000000.. -*- rst -*- List Pools ========== .. rest_method:: GET /v2/lbaas/pools Lists all pools for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list pools for other projects. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/pools-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - ca_tls_container_ref: ca_tls_container_ref - created_at: created_at - crl_container_ref: crl_container_ref - description: description - healthmonitor_id: healthmonitor-id - id: pool-id - lb_algorithm: lb-algorithm - listeners: listener-ids - loadbalancers: loadbalancer-ids - members: member-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol-pools - provisioning_status: provisioning_status - session_persistence: session_persistence - tags: tags - tls_ciphers: tls_ciphers - tls_container_ref: tls_container_ref - tls_enabled: tls_enabled - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/pools-list-response.json :language: javascript Create Pool =========== .. rest_method:: POST /v2/lbaas/pools Creates a pool for a load balancer. The pool defines how requests should be balanced across the backend member servers. This operation provisions a pool by using the configuration that you define in the request object. After the API validates the request and starts the provisioning process, the API returns a response object, which contains a unique ID. In the response, the pool :ref:`provisioning status` is ``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. If the status is ``PENDING_CREATE``, issue GET ``/v2/lbaas/pools/{pool_id}`` to view the progress of the provisioning operation. When the pool status changes to ``ACTIVE``, the pool is successfully provisioned and is ready for further configuration. At a minimum, you must specify these pool attributes: - ``protocol`` The protocol for which this pool and its members listen. A valid value is ``HTTP``, ``HTTPS``, ``PROXY``, ``TCP``, or ``UDP``. - ``lb_algorithm`` The load-balancer algorithm, such as ``ROUND_ROBIN``, ``LEAST_CONNECTIONS``, ``SOURCE_IP`` and ``SOURCE_IP_PORT``, that distributes traffic to the pool members. The load-balancer provider must support this algorithm. - ``listener_id`` The ID of the listener in which this pool becomes the default pool. Each listener has only one default pool. ---OR--- - ``loadbalancer_id`` The ID of the load balancer under which this pool will be created. Each load balancer can have zero or more pools associated with it. These pools can be used for L7policies. .. note:: Either ``listener_id`` or ``loadbalancer_id`` must be specified. Some attributes receive default values if you omit them from the request: - ``admin_state_up`` Default is ``true``. - ``name`` Default is an empty string. - ``description`` Default is an empty string. If the API cannot fulfill the request due to insufficient data or data that is not valid, the service returns the HTTP ``Bad Request (400)`` response code with information about the failure in the response body. Validation errors require that you correct the error and submit the request again. Specifying a project_id is deprecated. The pool will inherit the project_id of the parent load balancer. You can configure all documented features of the pool at creation time by specifying the additional elements or attributes in the request. To create a pool, the parent load balancer must have an ``ACTIVE`` provisioning status. ``SOURCE_IP_PORT`` algorithm is available from version 2.13. .. rest_status_code:: success ../http-status.yaml - 201 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 - 503 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - ca_tls_container_ref: ca_tls_container_ref-optional - crl_container_ref: crl_container_ref-optional - description: description-optional - lb_algorithm: lb-algorithm - listener_id: listener-id-pool-optional - loadbalancer_id: loadbalancer-id-pool-optional - name: name-optional - project_id: project_id-optional-deprecated - protocol: protocol-pools - session_persistence: session_persistence-optional - tags: tags-optional - tls_enabled: tls_enabled-optional - tls_ciphers: tls_ciphers-optional - tls_container_ref: tls_container_ref-optional .. _session_persistence: Pool Session Persistence ------------------------ Pool session persistence tells the load balancer to attempt to send future requests from a client to the same backend member as the initial request. When the pool has no session persistence, the session persistence object is ``null``. Octavia currently support three session persistence methods: +-----------------+----------------------------------------------------------+ | Method | Description | +=================+==========================================================+ | ``APP_COOKIE`` | Use the specified ``cookie_name`` send future requests | | | to the same member. | +-----------------+----------------------------------------------------------+ | ``HTTP_COOKIE`` | The load balancer will generate a cookie that is | | | inserted into the response. This cookie will be used to | | | send future requests to the same member. | +-----------------+----------------------------------------------------------+ | ``SOURCE_IP`` | The source IP address on the request will be hashed to | | | send future requests to the same member. | +-----------------+----------------------------------------------------------+ Pool Session Persistence Object ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. rest_parameters:: ../parameters.yaml - type: session_persistence_type - cookie_name: session_persistence_cookie - persistence_timeout: session_persistence_timeout - persistence_granularity: session_persistence_granularity Pool Session Persistence Object Example ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. literalinclude:: examples/pool-session-persistence-obj.json :language: javascript Request Example ---------------- .. literalinclude:: examples/pool-create-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/pool-create-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - ca_tls_container_ref: ca_tls_container_ref - created_at: created_at - crl_container_ref: crl_container_ref - description: description - healthmonitor_id: healthmonitor-id - id: pool-id - lb_algorithm: lb-algorithm - listeners: listener-ids - loadbalancers: loadbalancer-ids - members: member-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol-pools - provisioning_status: provisioning_status - session_persistence: session_persistence - tags: tags - tls_enabled: tls_enabled - tls_ciphers: tls_ciphers - tls_container_ref: tls_container_ref - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/pool-create-response.json :language: javascript Show Pool details ================= .. rest_method:: GET /v2/lbaas/pools/{pool_id} Shows the details of a pool. If you are not an administrative user and the parent load balancer does not belong to your project, the service returns the HTTP ``Forbidden (403)`` response code. This operation does not require a request body. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 401 - 403 - 404 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - pool_id: path-pool-id Curl Example ------------ .. literalinclude:: examples/pool-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - ca_tls_container_ref: ca_tls_container_ref - created_at: created_at - crl_container_ref: crl_container_ref - description: description - healthmonitor_id: healthmonitor-id - id: pool-id - lb_algorithm: lb-algorithm - listeners: listener-ids - loadbalancers: loadbalancer-ids - members: member-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol-pools - provisioning_status: provisioning_status - session_persistence: session_persistence - tags: tags - tls_enabled: tls_enabled - tls_ciphers: tls_ciphers - tls_container_ref: tls_container_ref - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/pool-show-response.json :language: javascript Update a Pool ============= .. rest_method:: PUT /v2/lbaas/pools/{pool_id} Update an existing pool. If the request is valid, the service returns the ``Accepted (202)`` response code. To confirm the update, check that the pool provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET operation to poll the pool object for changes. This operation returns the updated pool object with the ``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up-default-optional - ca_tls_container_ref: ca_tls_container_ref-optional - crl_container_ref: crl_container_ref-optional - description: description-optional - lb_algorithm: lb-algorithm-optional - name: name-optional - pool_id: path-pool-id - session_persistence: session_persistence-optional - tags: tags-optional - tls_enabled: tls_enabled-optional - tls_ciphers: tls_ciphers-optional - tls_container_ref: tls_container_ref-optional Request Example --------------- .. literalinclude:: examples/pool-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/pool-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - admin_state_up: admin_state_up - ca_tls_container_ref: ca_tls_container_ref - created_at: created_at - crl_container_ref: crl_container_ref - description: description - healthmonitor_id: healthmonitor-id - id: pool-id - lb_algorithm: lb-algorithm - listeners: listener-ids - loadbalancers: loadbalancer-ids - members: member-ids - name: name - operating_status: operating_status - project_id: project_id - protocol: protocol-pools - provisioning_status: provisioning_status - session_persistence: session_persistence - tags: tags - tls_enabled: tls_enabled - tls_ciphers: tls_ciphers - tls_container_ref: tls_container_ref - updated_at: updated_at Response Example ---------------- .. literalinclude:: examples/pool-update-response.json :language: javascript Remove a Pool ============= .. rest_method:: DELETE /v2/lbaas/pools/{pool_id} Removes a pool and its associated configuration from the load balancer. The API immediately purges any and all configuration data, depending on the configuration settings. You cannot recover it. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - pool_id: path-pool-id Curl Example ------------ .. literalinclude:: examples/pool-delete-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/provider.inc0000664000175000017500000000647500000000000020705 0ustar00zuulzuul00000000000000.. -*- rst -*- List Providers ============== .. rest_method:: GET /v2/lbaas/providers Lists all enabled provider drivers. Use the ``fields`` query parameter to control which fields are returned in the response body. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields Curl Example ------------ .. literalinclude:: examples/provider-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - name: provider-name - description: provider-description Response Example ---------------- .. literalinclude:: examples/provider-list-response.json :language: javascript Show Provider Flavor Capabilities ================================= .. rest_method:: GET /v2/lbaas/providers/{provider}/flavor_capabilities Shows the provider driver flavor capabilities. These are the features of the provider driver that can be configured in an Octavia flavor. This API returns a list of dictionaries with the name and description of each flavor capability of the provider. The list might be empty and a provider driver may not implement this feature. **New in version 2.6** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - provider: path-provider Curl Example ------------ .. literalinclude:: examples/provider-flavor-capability-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - flavor_capabilities: flavor-capabilities - name: flavor-capability-name - description: flavor-capability-description Response Example ---------------- .. literalinclude:: examples/provider-flavor-capability-show-response.json :language: javascript Show Provider Availability Zone Capabilities ============================================ .. rest_method:: GET /v2/lbaas/providers/{provider}/availability_zone_capabilities Shows the provider driver availability zone capabilities. These are the features of the provider driver that can be configured in an Octavia availability zone. This API returns a list of dictionaries with the name and description of each availability zone capability of the provider. The list might be empty and a provider driver may not implement this feature. **New in version 2.14** .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - provider: path-provider Curl Example ------------ .. literalinclude:: examples/provider-availability-zone-capability-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - availability_zone_capabilities: availability-zone-capabilities - name: availability-zone-capability-name - description: availability-zone-capability-description Response Example ---------------- .. literalinclude:: examples/provider-availability-zone-capability-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/api-ref/source/v2/quota.inc0000664000175000017500000001257200000000000020177 0ustar00zuulzuul00000000000000.. -*- rst -*- List Quota ========== .. rest_method:: GET /v2/lbaas/quotas Lists all quotas for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to list quotas for other projects. If the quota is listed as ``null`` the quota is using the deployment default quota settings. A quota of ``-1`` means the quota is unlimited. The list might be empty. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - project_id: project_id_query Curl Example ------------ .. literalinclude:: examples/quotas-list-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - healthmonitor: quota-health_monitor - listener: quota-listener - loadbalancer: quota-load_balancer - member: quota-member - pool: quota-pool - project_id: project_id Response Example ---------------- .. literalinclude:: examples/quotas-list-response.json :language: javascript Show Quota Defaults =================== .. rest_method:: GET /v2/lbaas/quotas/defaults Show the quota defaults configured for the deployment. A quota of ``-1`` means the quota is unlimited. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- There are no request parameters for the show quota defaults API. Curl Example ------------ .. literalinclude:: examples/quotas-defaults-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - healthmonitor: quota-health_monitor - listener: quota-listener - loadbalancer: quota-load_balancer - member: quota-member - pool: quota-pool Response Example ---------------- .. literalinclude:: examples/quotas-defaults-response.json :language: javascript Show Project Quota ================== .. rest_method:: GET /v2/lbaas/quotas/{project_id} Show the quota for the project. Use the ``fields`` query parameter to control which fields are returned in the response body. Additionally, you can filter results by using query string parameters. For information, see :ref:`filtering`. Administrative users can specify a project ID that is different than their own to show quota for other projects. A quota of ``-1`` means the quota is unlimited. .. rest_status_code:: success ../http-status.yaml - 200 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - fields: fields - project_id: path-project-id Curl Example ------------ .. literalinclude:: examples/quota-show-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - healthmonitor: quota-health_monitor - listener: quota-listener - loadbalancer: quota-load_balancer - member: quota-member - pool: quota-pool Response Example ---------------- .. literalinclude:: examples/quotas-show-response.json :language: javascript Update a Quota ============== .. rest_method:: PUT /v2/lbaas/quotas/{project_id} Updates a quota for a project. If the request is valid, the service returns the ``Accepted (202)`` response code. This operation returns the updated quota object. If the quota is specified as ``null`` the quota will use the deployment default quota settings. Specifying a quota of ``-1`` means the quota is unlimited. Specifying a quota of ``0`` means the project cannot create any of the resource. .. rest_status_code:: success ../http-status.yaml - 202 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - healthmonitor: quota-health_monitor-optional - listener: quota-listener-optional - loadbalancer: quota-load_balancer-optional - member: quota-member-optional - pool: quota-pool-optional - project_id: path-project-id Request Example --------------- .. literalinclude:: examples/quota-update-request.json :language: javascript Curl Example ------------ .. literalinclude:: examples/quota-update-curl :language: bash Response Parameters ------------------- .. rest_parameters:: ../parameters.yaml - healthmonitor: quota-health_monitor - listener: quota-listener - loadbalancer: quota-load_balancer - member: quota-member - pool: quota-pool Response Example ---------------- .. literalinclude:: examples/quota-update-response.json :language: javascript Reset a Quota ============= .. rest_method:: DELETE /v2/lbaas/quotas/{project_id} Resets a project quota to use the deployment default quota. .. rest_status_code:: success ../http-status.yaml - 204 .. rest_status_code:: error ../http-status.yaml - 400 - 401 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: ../parameters.yaml - project_id: path-project-id Curl Example ------------ .. literalinclude:: examples/quota-reset-curl :language: bash Response -------- There is no body content for the response of a successful DELETE request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/babel.cfg0000664000175000017500000000002100000000000014711 0ustar00zuulzuul00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/bin/0000775000175000017500000000000000000000000013742 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/bin/create_dual_intermediate_CA.sh0000775000175000017500000001673000000000000021655 0ustar00zuulzuul00000000000000#!/bin/bash # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" echo "Please use the Octavia Certificate Configuration guide:" echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # This script produces weak security PKI to save resources in the test gates. # It should be modified to use stronger encryption (aes256), better pass # phrases, and longer keys (4096). # Please see the Octavia Certificate Configuration guide: # https://docs.openstack.org/octavia/latest/admin/guides/certificates.html set -x -e CA_PATH=dual_ca rm -rf $CA_PATH mkdir $CA_PATH chmod 700 $CA_PATH cd $CA_PATH mkdir -p etc/octavia/certs chmod 700 etc/octavia/certs ###### Client Root CA mkdir client_ca cd client_ca mkdir certs crl newcerts private chmod 700 private touch index.txt echo 1000 > serial # Create the client CA private key openssl genrsa -aes128 -out private/ca.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 private/ca.key.pem # Create the client CA root certificate openssl req -config ../../openssl.cnf -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientRootCA" -passin pass:not-secure-passphrase ###### Client Intermediate CA mkdir intermediate_ca mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private chmod 700 intermediate_ca/private touch intermediate_ca/index.txt echo 1000 > intermediate_ca/serial # Create the client intermediate CA private key openssl genrsa -aes128 -out intermediate_ca/private/intermediate.ca.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 intermediate_ca/private/intermediate.ca.key.pem # Create the client intermediate CA certificate signing request openssl req -config ../../openssl.cnf -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/client_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientIntermediateCA" -passin pass:not-secure-passphrase # Create the client intermediate CA certificate openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/client_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch # Create the client CA certificate chain cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem ###### Create the client key and certificate openssl genrsa -aes128 -out intermediate_ca/private/controller.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 intermediate_ca/private/controller.key.pem # Create the client controller certificate signing request openssl req -config ../../openssl.cnf -key intermediate_ca/private/controller.key.pem -new -sha256 -out intermediate_ca/controller.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=OctaviaController" -passin pass:not-secure-passphrase # Create the client controller certificate openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions usr_cert -days 1825 -notext -md sha256 -in intermediate_ca/controller.csr -out intermediate_ca/certs/controller.cert.pem -passin pass:not-secure-passphrase -batch # Build the cancatenated client cert and key openssl rsa -in intermediate_ca/private/controller.key.pem -out intermediate_ca/private/client.cert-and-key.pem -passin pass:not-secure-passphrase cat intermediate_ca/certs/controller.cert.pem >> intermediate_ca/private/client.cert-and-key.pem # We are done with the client CA cd .. ###### Stash the octavia default client CA cert files cp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/client_ca.cert.pem chmod 444 etc/octavia/certs/client_ca.cert.pem cp client_ca/intermediate_ca/private/client.cert-and-key.pem etc/octavia/certs/client.cert-and-key.pem chmod 600 etc/octavia/certs/client.cert-and-key.pem ###### Server Root CA mkdir server_ca cd server_ca mkdir certs crl newcerts private chmod 700 private touch index.txt echo 1000 > serial # Create the server CA private key openssl genrsa -aes128 -out private/ca.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 private/ca.key.pem # Create the server CA root certificate openssl req -config ../../openssl.cnf -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerRootCA" -passin pass:not-secure-passphrase ###### Server Intermediate CA mkdir intermediate_ca mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private chmod 700 intermediate_ca/private touch intermediate_ca/index.txt echo 1000 > intermediate_ca/serial # Create the server intermediate CA private key openssl genrsa -aes128 -out intermediate_ca/private/intermediate.ca.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 intermediate_ca/private/intermediate.ca.key.pem # Create the server intermediate CA certificate signing request openssl req -config ../../openssl.cnf -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/server_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerIntermediateCA" -passin pass:not-secure-passphrase # Create the server intermediate CA certificate openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/server_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch # Create the server CA certificate chain cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem # We are done with the server CA cd .. ###### Stash the octavia default server CA cert files cp server_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/server_ca-chain.cert.pem chmod 444 etc/octavia/certs/server_ca-chain.cert.pem cp server_ca/intermediate_ca/certs/intermediate.cert.pem etc/octavia/certs/server_ca.cert.pem chmod 400 etc/octavia/certs/server_ca.cert.pem cp server_ca/intermediate_ca/private/intermediate.ca.key.pem etc/octavia/certs/server_ca.key.pem chmod 400 etc/octavia/certs/server_ca.key.pem ##### Validate the Octavia PKI files set +x echo "################# Verifying the Octavia files ###########################" openssl verify -CAfile etc/octavia/certs/client_ca.cert.pem etc/octavia/certs/client.cert-and-key.pem openssl verify -CAfile etc/octavia/certs/server_ca-chain.cert.pem etc/octavia/certs/server_ca.cert.pem # We are done, stop enforcing shell errexit set +e echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" echo "Please use the Octavia Certificate Configuration guide:" echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/bin/create_single_CA_intermediate_CA.sh0000775000175000017500000001300300000000000022542 0ustar00zuulzuul00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" echo "Single CA mode is insecure, do not use this! It is for testing only." echo "Please use the Octavia Certificate Configuration guide:" echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # This script produces weak security PKI to save resources in the test gates. # A single CA should never be used in a production deployment. This script # exists purely to test legacy migrations / deployments where someone # acidently used a single CA. set -x -e CA_PATH=single_ca rm -rf $CA_PATH mkdir $CA_PATH chmod 700 $CA_PATH cd $CA_PATH mkdir -p etc/octavia/certs chmod 700 etc/octavia/certs ###### Client Root CA mkdir client_ca cd client_ca mkdir certs crl newcerts private chmod 700 private touch index.txt echo 1000 > serial # Create the client CA private key openssl genrsa -aes128 -out private/ca.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 private/ca.key.pem # Create the client CA root certificate openssl req -config ../../openssl.cnf -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientRootCA" -passin pass:not-secure-passphrase ###### Client Intermediate CA mkdir intermediate_ca mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private chmod 700 intermediate_ca/private touch intermediate_ca/index.txt echo 1000 > intermediate_ca/serial # Create the client intermediate CA private key openssl genrsa -aes128 -out intermediate_ca/private/intermediate.ca.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 intermediate_ca/private/intermediate.ca.key.pem # Create the client intermediate CA certificate signing request openssl req -config ../../openssl.cnf -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/client_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientIntermediateCA" -passin pass:not-secure-passphrase # Create the client intermediate CA certificate openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/client_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch # Create the client CA certificate chain cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem ###### Create the client key and certificate openssl genrsa -aes128 -out intermediate_ca/private/controller.key.pem -passout pass:not-secure-passphrase 2048 chmod 400 intermediate_ca/private/controller.key.pem # Create the client controller certificate signing request openssl req -config ../../openssl.cnf -key intermediate_ca/private/controller.key.pem -new -sha256 -out intermediate_ca/controller.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=OctaviaController" -passin pass:not-secure-passphrase # Create the controller client certificate openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions usr_cert -days 1825 -notext -md sha256 -in intermediate_ca/controller.csr -out intermediate_ca/certs/controller.cert.pem -passin pass:not-secure-passphrase -batch # Build the cancatenated client cert and key openssl rsa -in intermediate_ca/private/controller.key.pem -out intermediate_ca/private/client.cert-and-key.pem -passin pass:not-secure-passphrase cat intermediate_ca/certs/controller.cert.pem >> intermediate_ca/private/client.cert-and-key.pem # We are done with the client CA cd .. ###### Stash the octavia default cert files cp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/client_ca.cert.pem chmod 444 etc/octavia/certs/client_ca.cert.pem cp client_ca/intermediate_ca/private/client.cert-and-key.pem etc/octavia/certs/client.cert-and-key.pem chmod 600 etc/octavia/certs/client.cert-and-key.pem cp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/server_ca.cert.pem chmod 444 etc/octavia/certs/server_ca.cert.pem cp client_ca/intermediate_ca/private/intermediate.ca.key.pem etc/octavia/certs/server_ca.key.pem chmod 600 etc/octavia/certs/server_ca.key.pem ##### Validate the Octavia PKI files set +x echo "################# Verifying the Octavia files ###########################" openssl verify -CAfile etc/octavia/certs/client_ca.cert.pem etc/octavia/certs/client.cert-and-key.pem openssl verify -CAfile etc/octavia/certs/server_ca.cert.pem etc/octavia/certs/server_ca.cert.pem # We are done, stop enforcing shell errexit set +e echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" echo "Single CA mode is insecure, do not use this! It is for testing only." echo "Please use the Octavia Certificate Configuration guide:" echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/bin/openssl.cnf0000664000175000017500000001046200000000000016120 0ustar00zuulzuul00000000000000# OpenSSL root CA configuration file. [ ca ] # `man ca` default_ca = CA_default [ CA_default ] # Directory and file locations. dir = ./ certs = $dir/certs crl_dir = $dir/crl new_certs_dir = $dir/newcerts database = $dir/index.txt serial = $dir/serial RANDFILE = $dir/private/.rand # The root key and root certificate. private_key = $dir/private/ca.key.pem certificate = $dir/certs/ca.cert.pem # For certificate revocation lists. crlnumber = $dir/crlnumber crl = $dir/crl/ca.crl.pem crl_extensions = crl_ext default_crl_days = 30 # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 name_opt = ca_default cert_opt = ca_default # 10 years default_days = 7300 preserve = no policy = policy_strict [ CA_intermediate ] # Directory and file locations. dir = ./intermediate_ca certs = $dir/certs crl_dir = $dir/crl new_certs_dir = $dir/newcerts database = $dir/index.txt serial = $dir/serial RANDFILE = $dir/private/.rand # The root key and root certificate. private_key = ./private/ca.key.pem certificate = ./certs/ca.cert.pem # For certificate revocation lists. crlnumber = $dir/crlnumber crl = $dir/crl/ca.crl.pem crl_extensions = crl_ext default_crl_days = 30 # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 name_opt = ca_default cert_opt = ca_default # 5 years default_days = 3650 preserve = no policy = policy_strict [ policy_strict ] # The root CA should only sign intermediate certificates that match. # See the POLICY FORMAT section of `man ca`. countryName = match stateOrProvinceName = match organizationName = match organizationalUnitName = optional commonName = supplied emailAddress = optional [ req ] # Options for the `req` tool (`man req`). default_bits = 2048 distinguished_name = req_distinguished_name string_mask = utf8only # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 # Extension to add when the -x509 option is used. x509_extensions = v3_ca [ req_distinguished_name ] # See . countryName = Country Name (2 letter code) stateOrProvinceName = State or Province Name localityName = Locality Name 0.organizationName = Organization Name organizationalUnitName = Organizational Unit Name commonName = Common Name emailAddress = Email Address # Optionally, specify some defaults. countryName_default = US stateOrProvinceName_default = Oregon localityName_default = Corvallis 0.organizationName_default = OpenStack organizationalUnitName_default = Octavia emailAddress_default = commonName_default = example.org [ v3_ca ] # Extensions for a typical CA (`man x509v3_config`). subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer basicConstraints = critical, CA:true keyUsage = critical, digitalSignature, cRLSign, keyCertSign [ v3_intermediate_ca ] # Extensions for a typical intermediate CA (`man x509v3_config`). subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer basicConstraints = critical, CA:true, pathlen:0 keyUsage = critical, digitalSignature, cRLSign, keyCertSign [ usr_cert ] # Extensions for client certificates (`man x509v3_config`). basicConstraints = CA:FALSE nsCertType = client, email nsComment = "OpenSSL Generated Client Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth, emailProtection [ server_cert ] # Extensions for server certificates (`man x509v3_config`). basicConstraints = CA:FALSE nsCertType = server nsComment = "OpenSSL Generated Server Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer:always keyUsage = critical, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth [ crl_ext ] # Extension for CRLs (`man x509v3_config`). authorityKeyIdentifier=keyid:always ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/bindep.txt0000664000175000017500000000162500000000000015200 0ustar00zuulzuul00000000000000# Docs package dependencies graphviz [doc] # PDF Docs package dependencies make [doc] fonts-freefont-otf [doc platform:dpkg] fonts-liberation [doc platform:dpkg] texlive-latex-base [doc platform:dpkg] texlive-latex-extra [doc platform:dpkg] texlive-xetex [doc platform:dpkg] texlive-fonts-recommended [doc platform:dpkg] xindy [doc platform:dpkg] latexmk [doc platform:dpkg] texlive [doc platform:rpm] texlive-fncychap [doc platform:rpm] texlive-titlesec [doc platform:rpm] texlive-tabulary [doc platform:rpm] texlive-framed [doc platform:rpm] texlive-wrapfig [doc platform:rpm] texlive-upquote [doc platform:rpm] texlive-capt-of [doc platform:rpm] texlive-needspace [doc platform:rpm] texlive-polyglossia [doc platform:rpm] texlive-xindy [doc platform:rpm] latexmk [doc platform:rpm] python3-sphinxcontrib-svg2pdfconverter-common [doc platform:rpm] librsvg2-tools [doc platform:rpm] librsvg2-bin [doc platform:dpkg] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/0000775000175000017500000000000000000000000014776 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/README.md0000664000175000017500000000151200000000000016254 0ustar00zuulzuul00000000000000This directory contains the octavia devstack plugin. To configure octavia, in the [[local|localrc]] section you will need to enable the octavia devstack plugin and enable the octavia service by editing the [[local|localrc]] section of your local.conf file. 1) Enable the plugin To enable the octavia plugin, add a line of the form: enable_plugin octavia [GITREF] where is the URL of an octavia repository [GITREF] is an optional git ref (branch/ref/tag). The default is master. For example enable_plugin octavia https://opendev.org/openstack/octavia master 2) Enable the Octavia services For example ENABLED_SERVICES+=,octavia,o-api,o-cw,o-hk,o-hm,o-da For more information, see the "Externally Hosted Plugins" section of https://docs.openstack.org/devstack/latest/plugins.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/contrib/0000775000175000017500000000000000000000000016436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/contrib/new-octavia-devstack.sh0000775000175000017500000000374600000000000023026 0ustar00zuulzuul00000000000000#!/bin/bash # # These instructions assume an Ubuntu-based host or VM for running devstack. # Please note that if you are running this in a VM, it is vitally important # that the underlying hardware have nested virtualization enabled or you will # experience very poor amphora performance. # Set up the packages we need. Ubuntu package manager is assumed. apt-get update apt-get install git vim -y # TODO(sbalukoff): Add prerequisites for other distributions. # Clone the devstack repo git clone https://github.com/openstack-dev/devstack.git $HOME/devstack cat < $HOME/devstack/localrc enable_plugin barbican https://opendev.org/openstack/barbican enable_plugin neutron https://opendev.org/openstack/neutron enable_plugin octavia https://opendev.org/openstack/octavia LIBS_FROM_GIT+=python-octaviaclient KEYSTONE_TOKEN_FORMAT=fernet DATABASE_PASSWORD=secretdatabase RABBIT_PASSWORD=secretrabbit ADMIN_PASSWORD=secretadmin SERVICE_PASSWORD=secretservice SERVICE_TOKEN=111222333444 # Enable Logging LOGFILE=/opt/stack/logs/stack.sh.log VERBOSE=True LOG_COLOR=True # Pre-requisite ENABLED_SERVICES=key,rabbit,mysql # Nova ENABLED_SERVICES+=,n-api,n-obj,n-cpu,n-cond,n-sch # Placement service needed for Nova ENABLED_SERVICES+=,placement-api,placement-client # Glance ENABLED_SERVICES+=,g-api,g-reg # Neutron ENABLED_SERVICES+=,neutron-api,neutron-agent,neutron-dhcp,neutron-l3 ENABLED_SERVICES+=,neutron-metadata-agent,neutron-qos # Tempest (optional) #ENABLED_SERVICES+=,tempest # Octavia ENABLED_SERVICES+=,octavia,o-api,o-cw,o-hm,o-hk,o-da EOF # Create the stack user $HOME/devstack/tools/create-stack-user.sh # Move everything into place mv $HOME/devstack /opt/stack/ chown -R stack:stack /opt/stack/devstack/ # Fix permissions on current tty so screens can attach chmod go+rw `tty` # Stack that stack! su - stack -c /opt/stack/devstack/stack.sh # Add environment variables for auth/endpoints echo 'source /opt/stack/devstack/openrc admin admin' >> /opt/stack/.bashrc # Drop into a shell exec su - stack ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3222165 octavia-6.2.2/devstack/etc/0000775000175000017500000000000000000000000015551 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/etc/octavia/0000775000175000017500000000000000000000000017177 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/etc/octavia/haproxy.cfg0000664000175000017500000000110300000000000021345 0ustar00zuulzuul00000000000000global daemon log /dev/log local0 log /dev/log local1 notice defaults log global retries 3 option redispatch timeout connect 5000 timeout client 50000 timeout server 50000 frontend octavia-frontend-api option httplog bind 0.0.0.0:OCTAVIA_PORT mode http default_backend octavia-backend-api backend octavia-backend-api mode http balance roundrobin # the devstack plugin will add entries here looking like: # server octavia-main : weight 1 # server octavia-second : weight 1 # ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/etc/rsyslog/0000775000175000017500000000000000000000000017253 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/etc/rsyslog/10-octavia-log-offloading.conf0000664000175000017500000000122500000000000024653 0ustar00zuulzuul00000000000000# Work around CentOS/RHEL umask override of file permissions # Note: This is a global rsyslog setting, you probably do not want to set # this outside of testing! $umask 0000 # provides UDP syslog reception module(load="imudp") input(type="imudp" port=["%ADMIN_PORT%", "%TENANT_PORT%"]) if ($inputname == "imudp" and $syslogfacility-text == "local0" and $syslogseverity-text == "info") then { action(type="omfile" FileCreateMode="0644" File="/var/log/octavia-tenant-traffic.log")&stop } if ($inputname == "imudp" and $syslogfacility-text != "local0") then { action(type="omfile" FileCreateMode="0644" File="/var/log/octavia-amphora.log")&stop } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3222165 octavia-6.2.2/devstack/files/0000775000175000017500000000000000000000000016100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/files/debs/0000775000175000017500000000000000000000000017015 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/files/debs/octavia0000664000175000017500000000003300000000000020362 0ustar00zuulzuul00000000000000golang debootstrap rsyslog ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/files/rpms/0000775000175000017500000000000000000000000017061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/files/rpms/octavia0000664000175000017500000000003300000000000020426 0ustar00zuulzuul00000000000000golang debootstrap rsyslog ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/files/rpms-suse/0000775000175000017500000000000000000000000020036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/files/rpms-suse/octavia0000664000175000017500000000003400000000000021404 0ustar00zuulzuul00000000000000debootstrap dpkg go rsyslog ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/files/wsgi/0000775000175000017500000000000000000000000017051 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/files/wsgi/octavia-api.template0000664000175000017500000000302300000000000023001 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a template Apache2 configuration file for using the # Octavia API through mod_wsgi. This version assumes you are # running devstack to configure the software. Listen %OCTAVIA_SERVICE_PORT% WSGIDaemonProcess octavia-wsgi processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup octavia-wsgi WSGIScriptAlias / /usr/local/bin/octavia-wsgi WSGIApplicationGroup %{GLOBAL} ErrorLog /var/log/%APACHE_NAME%/octavia_error.log = 2.4> ErrorLogFormat "%{cu}t %M" CustomLog /var/log/%APACHE_NAME%/octavia_access.log combined %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% WSGIProcessGroup octavia-wsgi = 2.4> Require all granted Order allow,deny Allow from all ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/plugin.sh0000664000175000017500000010130400000000000016627 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # devstack plugin for octavia GET_PIP_CACHE_LOCATION=/opt/stack/cache/files/get-pip.py function octavia_install { setup_develop $OCTAVIA_DIR if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then if ! [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then if [[ ${DISTRO} =~ (rhel|centos) ]]; then install_package qemu-kvm else install_package qemu fi fi fi } function octaviaclient_install { if use_library_from_git "python-octaviaclient"; then git_clone_by_name "python-octaviaclient" setup_dev_lib "python-octaviaclient" else pip_install_gr python-octaviaclient fi } function octavia_lib_install { if use_library_from_git "octavia-lib"; then git_clone_by_name "octavia-lib" setup_dev_lib "octavia-lib" else pip_install_gr octavia-lib fi } function install_diskimage_builder { if use_library_from_git "diskimage-builder"; then GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL GITDIR["diskimage-builder"]=$DISKIMAGE_BUILDER_DIR GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF git_clone_by_name "diskimage-builder" setup_dev_lib -bindep "diskimage-builder" fi } function set_octavia_worker_image_owner_id { image_id=$(openstack image list --property name=${OCTAVIA_AMP_IMAGE_NAME} -f value -c ID) owner_id=$(openstack image show ${image_id} -c owner -f value) iniset $OCTAVIA_CONF controller_worker amp_image_owner_id ${owner_id} } function build_octavia_worker_image { # set up diskimage-builder if we need to install_diskimage_builder # Pull in DIB local elements if they are defined in devstack if [ -n "$DIB_LOCAL_ELEMENTS" ]; then export DIB_LOCAL_ELEMENTS=$DIB_LOCAL_ELEMENTS fi # pull the agent code from the current code zuul has a reference to if [ -n "$DIB_REPOLOCATION_pip_and_virtualenv" ]; then export DIB_REPOLOCATION_pip_and_virtualenv=$DIB_REPOLOCATION_pip_and_virtualenv elif [ -f $GET_PIP_CACHE_LOCATION ] ; then export DIB_REPOLOCATION_pip_and_virtualenv=file://$GET_PIP_CACHE_LOCATION fi export DIB_REPOLOCATION_amphora_agent=$OCTAVIA_DIR export DIB_REPOREF_amphora_agent=$(git --git-dir="$OCTAVIA_DIR/.git" log -1 --pretty="format:%H") TOKEN=$(openstack token issue -f value -c id) die_if_not_set $LINENO TOKEN "Keystone failed to get token." octavia_dib_tracing_arg= if [ "$OCTAVIA_DIB_TRACING" != "0" ]; then octavia_dib_tracing_arg="-x" fi if [[ ${OCTAVIA_AMP_BASE_OS:+1} ]] ; then export PARAM_OCTAVIA_AMP_BASE_OS='-i '$OCTAVIA_AMP_BASE_OS fi if [[ ${OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID:+1} ]] ; then export PARAM_OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID='-d '$OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID fi if [[ ${OCTAVIA_AMP_IMAGE_SIZE:+1} ]] ; then export PARAM_OCTAVIA_AMP_IMAGE_SIZE='-s '$OCTAVIA_AMP_IMAGE_SIZE fi # Use the infra pypi mirror if it is available if [[ -e /etc/ci/mirror_info.sh ]]; then source /etc/ci/mirror_info.sh fi if [[ ${NODEPOOL_PYPI_MIRROR:+1} ]]; then if [[ ${DIB_LOCAL_ELEMENTS:+1} ]]; then export DIB_LOCAL_ELEMENTS="${DIB_LOCAL_ELEMENTS} pypi" else export DIB_LOCAL_ELEMENTS='pypi' fi export DIB_PYPI_MIRROR_URL=$NODEPOOL_PYPI_MIRROR export DIB_PYPI_MIRROR_URL_1=$NODEPOOL_WHEEL_MIRROR export DIB_PIP_RETRIES=0 fi if ! [ -f $OCTAVIA_AMP_IMAGE_FILE ]; then local dib_logs=/var/log/dib-build if [[ -e ${dib_logs} ]]; then sudo rm -rf ${dib_logs} fi sudo mkdir -m755 ${dib_logs} sudo chown $STACK_USER ${dib_logs} # Build amphora image with master DIB in a Python 3 virtual environment ( DIB_VENV_DIR=$(mktemp -d) DIB_GIT_DIR=/tmp/dib-octavia python3 -m venv $DIB_VENV_DIR export USE_PYTHON3=True export PATH=$DIB_VENV_DIR/bin:$PATH if ! [ -d $DIB_GIT_DIR ]; then git clone ${GITREPO["diskimage-builder"]} $DIB_GIT_DIR fi (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) | sed '/diskimage-builder/d' > $DIB_VENV_DIR/u-c.txt pip install -c $DIB_VENV_DIR/u-c.txt $DIB_GIT_DIR $OCTAVIA_DIR/diskimage-create/diskimage-create.sh -l ${dib_logs}/$(basename $OCTAVIA_AMP_IMAGE_FILE).log $octavia_dib_tracing_arg -o $OCTAVIA_AMP_IMAGE_FILE ${PARAM_OCTAVIA_AMP_BASE_OS:-} ${PARAM_OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID:-} ${PARAM_OCTAVIA_AMP_IMAGE_SIZE:-} ) fi upload_image file://${OCTAVIA_AMP_IMAGE_FILE} $TOKEN } function _configure_octavia_apache_wsgi { # Make sure mod_wsgi is enabled in apache # This is important for multinode where other services have not yet # enabled it. install_apache_wsgi local octavia_apache_conf octavia_apache_conf=$(apache_site_config_for octavia) # Use the alternate port if we are running multinode behind haproxy if [ $OCTAVIA_NODE != 'standalone' ] && [ $OCTAVIA_NODE != 'api' ]; then local octavia_api_port=$OCTAVIA_HA_PORT else local octavia_api_port=$OCTAVIA_PORT fi local octavia_ssl="" local octavia_certfile="" local octavia_keyfile="" local venv_path="" if is_ssl_enabled_service octavia; then octavia_ssl="SSLEngine On" octavia_certfile="SSLCertificateFile $OCTAVIA_SSL_CERT" octavia_keyfile="SSLCertificateKeyFile $OCTAVIA_SSL_KEY" fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["octavia"]}/lib/$(python_version)/site-packages" fi sudo cp ${OCTAVIA_DIR}/devstack/files/wsgi/octavia-api.template $octavia_apache_conf sudo sed -e " s|%OCTAVIA_SERVICE_PORT%|$octavia_api_port|g; s|%USER%|$APACHE_USER|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%SSLENGINE%|$octavia_ssl|g; s|%SSLCERTFILE%|$octavia_certfile|g; s|%SSLKEYFILE%|$octavia_keyfile|g; s|%VIRTUALENV%|$venv_path|g s|%APIWORKERS%|$API_WORKERS|g; " -i $octavia_apache_conf } function _configure_octavia_apache_uwsgi { write_uwsgi_config "$OCTAVIA_UWSGI_CONF" "$OCTAVIA_UWSGI_APP" "/$OCTAVIA_SERVICE_TYPE" } function _cleanup_octavia_apache_wsgi { if [[ "$WSGI_MODE" == "uwsgi" ]]; then remove_uwsgi_config "$OCTAVIA_UWSGI_CONF" "$OCTAVIA_UWSGI_APP" restart_apache_server else sudo rm -f $(apache_site_config_for octavia) restart_apache_server fi } function _start_octavia_apache_wsgi { if [[ "$WSGI_MODE" == "uwsgi" ]]; then run_process o-api "$OCTAVIA_BIN_DIR/uwsgi --ini $OCTAVIA_UWSGI_CONF" enable_apache_site octavia-wsgi else enable_apache_site octavia fi restart_apache_server } function _stop_octavia_apache_wsgi { if [[ "$WSGI_MODE" == "uwsgi" ]]; then disable_apache_site octavia-wsgi stop_process o-api else disable_apache_site octavia fi restart_apache_server } function create_octavia_accounts { create_service_user $OCTAVIA # Increase the octavia account secgroups quota # This is imporant for concurrent tempest testing openstack quota set --secgroups 100 $OCTAVIA_PROJECT_NAME openstack quota set --secgroup-rules 1000 $OCTAVIA_PROJECT_NAME octavia_service=$(get_or_create_service "octavia" \ $OCTAVIA_SERVICE_TYPE "Octavia Load Balancing Service") if [[ "$WSGI_MODE" == "uwsgi" ]] && [[ "$OCTAVIA_NODE" == "main" ]] ; then get_or_create_endpoint $octavia_service \ "$REGION_NAME" \ "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/$OCTAVIA_SERVICE_TYPE" elif [[ "$WSGI_MODE" == "uwsgi" ]]; then get_or_create_endpoint $octavia_service \ "$REGION_NAME" \ "$OCTAVIA_PROTOCOL://$SERVICE_HOST/$OCTAVIA_SERVICE_TYPE" else get_or_create_endpoint $octavia_service \ "$REGION_NAME" \ "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/" fi } function install_redis { if is_fedora; then install_package redis elif is_ubuntu; then install_package redis-server elif is_suse; then install_package redis else exit_distro_not_supported "redis installation" fi start_service redis pip_install_gr redis } function stop_redis { stop_service redis || true } function uninstall_redis { if is_fedora; then uninstall_package redis elif is_ubuntu; then uninstall_package redis-server elif is_suse; then uninstall_package redis fi pip_unistall redis } function octavia_configure { sudo mkdir -m 755 -p $OCTAVIA_CONF_DIR safe_chown $STACK_USER $OCTAVIA_CONF_DIR sudo mkdir -m 700 -p $OCTAVIA_RUN_DIR safe_chown $STACK_USER $OCTAVIA_RUN_DIR if ! [ -e $OCTAVIA_CONF ] ; then cp $OCTAVIA_DIR/etc/octavia.conf $OCTAVIA_CONF fi if ! [ -e $OCTAVIA_AUDIT_MAP ] ; then cp $OCTAVIA_DIR/etc/audit/octavia_api_audit_map.conf.sample $OCTAVIA_AUDIT_MAP fi # Use devstack logging configuration setup_logging $OCTAVIA_CONF iniset $OCTAVIA_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL # Change bind host iniset $OCTAVIA_CONF api_settings bind_host $(ipv6_unquote $SERVICE_HOST) iniset $OCTAVIA_CONF api_settings api_handler queue_producer iniset $OCTAVIA_CONF database connection "mysql+pymysql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_HOST}:3306/octavia" if [[ ${OCTAVIA_ENABLE_AMPHORAV2_PROVIDER} == True ]]; then iniset $OCTAVIA_CONF task_flow persistence_connection "mysql+pymysql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_HOST}:3306/octavia_persistence" fi # Configure keystone auth_token for all users configure_keystone_authtoken_middleware $OCTAVIA_CONF octavia # Ensure config is set up properly for authentication as admin iniset $OCTAVIA_CONF service_auth auth_url $OS_AUTH_URL iniset $OCTAVIA_CONF service_auth auth_type password iniset $OCTAVIA_CONF service_auth username $OCTAVIA_USERNAME iniset $OCTAVIA_CONF service_auth password $OCTAVIA_PASSWORD iniset $OCTAVIA_CONF service_auth user_domain_name $OCTAVIA_USER_DOMAIN_NAME iniset $OCTAVIA_CONF service_auth project_name $OCTAVIA_PROJECT_NAME iniset $OCTAVIA_CONF service_auth project_domain_name $OCTAVIA_PROJECT_DOMAIN_NAME iniset $OCTAVIA_CONF service_auth cafile $SSL_BUNDLE_FILE iniset $OCTAVIA_CONF service_auth memcached_servers $SERVICE_HOST:11211 # Setting other required default options iniset $OCTAVIA_CONF controller_worker amphora_driver ${OCTAVIA_AMPHORA_DRIVER} iniset $OCTAVIA_CONF controller_worker compute_driver ${OCTAVIA_COMPUTE_DRIVER} iniset $OCTAVIA_CONF controller_worker volume_driver ${OCTAVIA_VOLUME_DRIVER} iniset $OCTAVIA_CONF controller_worker network_driver ${OCTAVIA_NETWORK_DRIVER} iniset $OCTAVIA_CONF controller_worker amp_image_tag ${OCTAVIA_AMP_IMAGE_TAG} iniuncomment $OCTAVIA_CONF health_manager heartbeat_key iniset $OCTAVIA_CONF health_manager heartbeat_key ${OCTAVIA_HEALTH_KEY} iniset $OCTAVIA_CONF house_keeping amphora_expiry_age ${OCTAVIA_AMP_EXPIRY_AGE} iniset $OCTAVIA_CONF house_keeping load_balancer_expiry_age ${OCTAVIA_LB_EXPIRY_AGE} iniset $OCTAVIA_CONF DEFAULT transport_url $(get_transport_url) iniset $OCTAVIA_CONF oslo_messaging rpc_thread_pool_size 2 iniset $OCTAVIA_CONF oslo_messaging topic octavia_prov # Uncomment other default options iniuncomment $OCTAVIA_CONF haproxy_amphora base_path iniuncomment $OCTAVIA_CONF haproxy_amphora base_cert_dir iniuncomment $OCTAVIA_CONF haproxy_amphora connection_max_retries iniuncomment $OCTAVIA_CONF haproxy_amphora connection_retry_interval iniuncomment $OCTAVIA_CONF haproxy_amphora rest_request_conn_timeout iniuncomment $OCTAVIA_CONF haproxy_amphora rest_request_read_timeout iniuncomment $OCTAVIA_CONF controller_worker amp_active_retries iniuncomment $OCTAVIA_CONF controller_worker amp_active_wait_sec iniuncomment $OCTAVIA_CONF controller_worker workers iniuncomment $OCTAVIA_CONF controller_worker loadbalancer_topology iniset $OCTAVIA_CONF controller_worker loadbalancer_topology ${OCTAVIA_LB_TOPOLOGY} # devstack optimizations for tempest runs iniset $OCTAVIA_CONF haproxy_amphora connection_max_retries 1500 iniset $OCTAVIA_CONF haproxy_amphora connection_retry_interval 1 iniset $OCTAVIA_CONF haproxy_amphora rest_request_conn_timeout ${OCTAVIA_AMP_CONN_TIMEOUT} iniset $OCTAVIA_CONF haproxy_amphora rest_request_read_timeout ${OCTAVIA_AMP_READ_TIMEOUT} iniset $OCTAVIA_CONF controller_worker amp_active_retries 100 iniset $OCTAVIA_CONF controller_worker amp_active_wait_sec 2 iniset $OCTAVIA_CONF controller_worker workers 2 if [[ -a $OCTAVIA_SSH_DIR ]] ; then rm -rf $OCTAVIA_SSH_DIR fi mkdir -m755 $OCTAVIA_SSH_DIR if [[ "$(trueorfalse False OCTAVIA_USE_PREGENERATED_SSH_KEY)" == "True" ]]; then cp -fp ${OCTAVIA_PREGENERATED_SSH_KEY_PATH} ${OCTAVIA_AMP_SSH_KEY_PATH} cp -fp ${OCTAVIA_PREGENERATED_SSH_KEY_PATH}.pub ${OCTAVIA_AMP_SSH_KEY_PATH}.pub chmod 0600 ${OCTAVIA_AMP_SSH_KEY_PATH} else ssh-keygen -b $OCTAVIA_AMP_SSH_KEY_BITS -t $OCTAVIA_AMP_SSH_KEY_TYPE -N "" -f ${OCTAVIA_AMP_SSH_KEY_PATH} fi iniset $OCTAVIA_CONF controller_worker amp_ssh_key_name ${OCTAVIA_AMP_SSH_KEY_NAME} if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] || [ $OCTAVIA_NODE == 'api' ]; then recreate_database_mysql octavia octavia-db-manage upgrade head if [[ ${OCTAVIA_ENABLE_AMPHORAV2_PROVIDER} == True ]]; then recreate_database_mysql octavia_persistence octavia-db-manage upgrade_persistence fi fi if [[ -a $OCTAVIA_CERTS_DIR ]] ; then rm -rf $OCTAVIA_CERTS_DIR fi # amphorav2 required redis installation if [[ ${OCTAVIA_ENABLE_AMPHORAV2_PROVIDER} == True ]]; then install_redis fi if [[ "$(trueorfalse False OCTAVIA_USE_PREGENERATED_CERTS)" == "True" ]]; then cp -rfp ${OCTAVIA_PREGENERATED_CERTS_DIR} ${OCTAVIA_CERTS_DIR} else pushd $OCTAVIA_DIR/bin source create_dual_intermediate_CA.sh mkdir -p ${OCTAVIA_CERTS_DIR}/private chmod 700 ${OCTAVIA_CERTS_DIR}/private cp -p etc/octavia/certs/server_ca.cert.pem ${OCTAVIA_CERTS_DIR}/ cp -p etc/octavia/certs/server_ca-chain.cert.pem ${OCTAVIA_CERTS_DIR}/ cp -p etc/octavia/certs/server_ca.key.pem ${OCTAVIA_CERTS_DIR}/private/ cp -p etc/octavia/certs/client_ca.cert.pem ${OCTAVIA_CERTS_DIR}/ cp -p etc/octavia/certs/client.cert-and-key.pem ${OCTAVIA_CERTS_DIR}/private/ popd fi iniset $OCTAVIA_CONF certificates ca_certificate ${OCTAVIA_CERTS_DIR}/server_ca.cert.pem iniset $OCTAVIA_CONF certificates ca_private_key ${OCTAVIA_CERTS_DIR}/private/server_ca.key.pem iniset $OCTAVIA_CONF certificates ca_private_key_passphrase not-secure-passphrase iniset $OCTAVIA_CONF controller_worker client_ca ${OCTAVIA_CERTS_DIR}/client_ca.cert.pem iniset $OCTAVIA_CONF haproxy_amphora client_cert ${OCTAVIA_CERTS_DIR}/private/client.cert-and-key.pem iniset $OCTAVIA_CONF haproxy_amphora server_ca ${OCTAVIA_CERTS_DIR}/server_ca-chain.cert.pem # Controller side symmetric encryption, not used for PKI iniset $OCTAVIA_CONF certificates server_certs_key_passphrase insecure-key-do-not-use-this-key if [[ "$OCTAVIA_USE_LEGACY_RBAC" == "True" ]]; then cp $OCTAVIA_DIR/etc/policy/admin_or_owner-policy.json $OCTAVIA_CONF_DIR/policy.json fi # create dhclient.conf file for dhclient sudo mkdir -m755 -p $OCTAVIA_DHCLIENT_DIR sudo cp $OCTAVIA_DIR/etc/dhcp/dhclient.conf $OCTAVIA_DHCLIENT_CONF if [[ "$OCTAVIA_USE_MOD_WSGI" == "True" ]]; then if [[ "$WSGI_MODE" == "uwsgi" ]]; then _configure_octavia_apache_uwsgi else _configure_octavia_apache_wsgi fi fi if [ $OCTAVIA_NODE == 'main' ]; then configure_octavia_api_haproxy # make sure octavia is reachable from haproxy iniset $OCTAVIA_CONF api_settings bind_port ${OCTAVIA_HA_PORT} iniset $OCTAVIA_CONF api_settings bind_host 0.0.0.0 fi if [ $OCTAVIA_NODE != 'main' ] && [ $OCTAVIA_NODE != 'standalone' ] ; then # make sure octavia is reachable from haproxy from main node iniset $OCTAVIA_CONF api_settings bind_port ${OCTAVIA_HA_PORT} iniset $OCTAVIA_CONF api_settings bind_host 0.0.0.0 fi # set default graceful_shutdown_timeout to 300 sec (5 minutes) # TODO(gthiemonge) update this value after persistant taskflow commits are # merged iniset $OCTAVIA_CONF DEFAULT graceful_shutdown_timeout 300 } function create_mgmt_network_interface { if [ $OCTAVIA_MGMT_PORT_IP != 'auto' ]; then SUBNET_ID=$(openstack subnet show lb-mgmt-subnet -f value -c id) PORT_FIXED_IP="--fixed-ip subnet=$SUBNET_ID,ip-address=$OCTAVIA_MGMT_PORT_IP" fi MGMT_PORT_ID=$(openstack port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --host=$(hostname) -c id -f value --network lb-mgmt-net $PORT_FIXED_IP octavia-health-manager-$OCTAVIA_NODE-listen-port) MGMT_PORT_MAC=$(openstack port show -c mac_address -f value $MGMT_PORT_ID) MGMT_PORT_IP=$(openstack port show -f yaml -c fixed_ips $MGMT_PORT_ID | awk -v IP_VER=$SERVICE_IP_VERSION '{FS=",|";gsub(",","");gsub("'\''","");for(line = 1; line <= NF; ++line) {if ($line ~ /^.*- ip_address:/) {split($line, word, " ");if ((IP_VER == "4" || IP_VER == "") && word[3] ~ /\./) print word[3];if (IP_VER == "6" && word[3] ~ /:/) print word[3];} else {split($line, word, " ");for(ind in word) {if (word[ind] ~ /^ip_address=/) {split(word[ind], token, "=");if ((IP_VER == "4" || IP_VER == "") && token[2] ~ /\./) print token[2];if (IP_VER == "6" && token[2] ~ /:/) print token[2];}}}}}') if function_exists octavia_create_network_interface_device ; then octavia_create_network_interface_device o-hm0 $MGMT_PORT_ID $MGMT_PORT_MAC elif [[ $NEUTRON_AGENT == "openvswitch" || $Q_AGENT == "openvswitch" ]]; then sudo ovs-vsctl -- --may-exist add-port ${OVS_BRIDGE:-br-int} o-hm0 -- set Interface o-hm0 type=internal -- set Interface o-hm0 external-ids:iface-status=active -- set Interface o-hm0 external-ids:attached-mac=$MGMT_PORT_MAC -- set Interface o-hm0 external-ids:iface-id=$MGMT_PORT_ID -- set Interface o-hm0 external-ids:skip_cleanup=true elif [[ $NEUTRON_AGENT == "linuxbridge" || $Q_AGENT == "linuxbridge" ]]; then if ! ip link show o-hm0 ; then sudo ip link add o-hm0 type veth peer name o-bhm0 NETID=$(openstack network show lb-mgmt-net -c id -f value) BRNAME=brq$(echo $NETID|cut -c 1-11) sudo brctl addif $BRNAME o-bhm0 sudo ip link set o-bhm0 up fi else die "Unknown network controller. Please define octavia_create_network_interface_device" fi sudo ip link set dev o-hm0 address $MGMT_PORT_MAC if [ $SERVICE_IP_VERSION == '6' ] ; then # Allow the required IPv6 ICMP messages sudo ip6tables -I INPUT -i o-hm0 -p ipv6-icmp -j ACCEPT sudo ip6tables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_HM_LISTEN_PORT -j ACCEPT sudo ip6tables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_ADMIN_PORT -j ACCEPT sudo ip6tables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_TENANT_PORT -j ACCEPT else sudo iptables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_HM_LISTEN_PORT -j ACCEPT sudo iptables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_ADMIN_PORT -j ACCEPT sudo iptables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_TENANT_PORT -j ACCEPT fi if [ $OCTAVIA_CONTROLLER_IP_PORT_LIST == 'auto' ] ; then iniset $OCTAVIA_CONF health_manager controller_ip_port_list $MGMT_PORT_IP:$OCTAVIA_HM_LISTEN_PORT else iniset $OCTAVIA_CONF health_manager controller_ip_port_list $OCTAVIA_CONTROLLER_IP_PORT_LIST fi iniset $OCTAVIA_CONF health_manager bind_ip $MGMT_PORT_IP iniset $OCTAVIA_CONF health_manager bind_port $OCTAVIA_HM_LISTEN_PORT iniset $OCTAVIA_CONF amphora_agent admin_log_targets "${MGMT_PORT_IP}:${OCTAVIA_AMP_LOG_ADMIN_PORT}" iniset $OCTAVIA_CONF amphora_agent tenant_log_targets "${MGMT_PORT_IP}:${OCTAVIA_AMP_LOG_TENANT_PORT}" # Setting these here as the devstack rsyslog configuration expects # these values. iniset $OCTAVIA_CONF amphora_agent user_log_facility 0 iniset $OCTAVIA_CONF amphora_agent administrative_log_facility 1 } function build_mgmt_network { # Create network and attach a subnet openstack network create lb-mgmt-net if [ $SERVICE_IP_VERSION == '6' ] ; then openstack subnet create --subnet-range $OCTAVIA_MGMT_SUBNET_IPV6 --allocation-pool start=$OCTAVIA_MGMT_SUBNET_IPV6_START,end=$OCTAVIA_MGMT_SUBNET_IPV6_END --network lb-mgmt-net --ip-version 6 --ipv6-address-mode slaac --ipv6-ra-mode slaac lb-mgmt-subnet else openstack subnet create --subnet-range $OCTAVIA_MGMT_SUBNET --allocation-pool start=$OCTAVIA_MGMT_SUBNET_START,end=$OCTAVIA_MGMT_SUBNET_END --network lb-mgmt-net lb-mgmt-subnet fi # Create security group and rules # Used for the amphora lb-mgmt-net ports openstack security group create lb-mgmt-sec-grp if [ $SERVICE_IP_VERSION == '6' ] ; then openstack security group rule create --protocol ipv6-icmp --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp openstack security group rule create --protocol tcp --dst-port 22 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp openstack security group rule create --protocol tcp --dst-port 9443 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp else openstack security group rule create --protocol icmp lb-mgmt-sec-grp openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp fi # Create security group and rules # Used for the health manager port openstack security group create lb-health-mgr-sec-grp if [ $SERVICE_IP_VERSION == '6' ] ; then openstack security group rule create --protocol ipv6-icmp --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp openstack security group rule create --protocol udp --dst-port $OCTAVIA_HM_LISTEN_PORT --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_ADMIN_PORT --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_TENANT_PORT --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp else openstack security group rule create --protocol udp --dst-port $OCTAVIA_HM_LISTEN_PORT lb-health-mgr-sec-grp openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_ADMIN_PORT lb-health-mgr-sec-grp openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_TENANT_PORT lb-health-mgr-sec-grp fi } function configure_lb_mgmt_sec_grp { OCTAVIA_MGMT_SEC_GRP_ID=$(openstack security group show lb-mgmt-sec-grp -f value -c id) iniset ${OCTAVIA_CONF} controller_worker amp_secgroup_list ${OCTAVIA_MGMT_SEC_GRP_ID} } function create_amphora_flavor { # Pass even if it exists to avoid race condition on multinode openstack flavor create --id auto --ram 1024 --disk ${OCTAVIA_AMP_IMAGE_SIZE:-2} --vcpus 1 --private m1.amphora -f value -c id --property hw_rng:allowed=True || true amp_flavor_id=$(openstack flavor show m1.amphora -f value -c id) iniset $OCTAVIA_CONF controller_worker amp_flavor_id $amp_flavor_id } function configure_octavia_api_haproxy { install_package haproxy cp ${OCTAVIA_DIR}/devstack/etc/octavia/haproxy.cfg ${OCTAVIA_CONF_DIR}/haproxy.cfg sed -i.bak "s/OCTAVIA_PORT/${OCTAVIA_PORT}/" ${OCTAVIA_CONF_DIR}/haproxy.cfg NODES=(${OCTAVIA_NODES//,/ }) for NODE in ${NODES[@]}; do DATA=(${NODE//:/ }) NAME=$(echo -e "${DATA[0]}" | tr -d '[[:space:]]') IP=$(echo -e "${DATA[1]}" | tr -d '[[:space:]]') echo " server octavia-${NAME} ${IP}:80 weight 1" >> ${OCTAVIA_CONF_DIR}/haproxy.cfg done } function configure_rsyslog { sudo cp ${OCTAVIA_DIR}/devstack/etc/rsyslog/10-octavia-log-offloading.conf /etc/rsyslog.d/ sudo sed -e " s|%ADMIN_PORT%|${OCTAVIA_AMP_LOG_ADMIN_PORT}|g; s|%TENANT_PORT%|${OCTAVIA_AMP_LOG_TENANT_PORT}|g; " -i /etc/rsyslog.d/10-octavia-log-offloading.conf } function octavia_start { if ! ps aux | grep -q [o]-hm0 && [ $OCTAVIA_NODE != 'api' ] ; then if [ $SERVICE_IP_VERSION == '6' ] ; then # This is probably out of scope here? Load it from config MGMT_PORT_IP=$(iniget $OCTAVIA_CONF health_manager bind_ip) sudo ip addr add $MGMT_PORT_IP/64 dev o-hm0 sudo ip link set o-hm0 up else sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF fi fi if [ $OCTAVIA_NODE == 'main' ]; then run_process $OCTAVIA_API_HAPROXY "/usr/sbin/haproxy -db -V -f ${OCTAVIA_CONF_DIR}/haproxy.cfg" fi if [[ "$OCTAVIA_USE_MOD_WSGI" == "True" ]]; then _start_octavia_apache_wsgi else run_process $OCTAVIA_API "$OCTAVIA_API_BINARY $OCTAVIA_API_ARGS" fi run_process $OCTAVIA_DRIVER_AGENT "$OCTAVIA_DRIVER_AGENT_BINARY $OCTAVIA_DRIVER_AGENT_ARGS" run_process $OCTAVIA_CONSUMER "$OCTAVIA_CONSUMER_BINARY $OCTAVIA_CONSUMER_ARGS" run_process $OCTAVIA_HOUSEKEEPER "$OCTAVIA_HOUSEKEEPER_BINARY $OCTAVIA_HOUSEKEEPER_ARGS" run_process $OCTAVIA_HEALTHMANAGER "$OCTAVIA_HEALTHMANAGER_BINARY $OCTAVIA_HEALTHMANAGER_ARGS" restart_service rsyslog } function octavia_stop { # octavia-specific stop actions if [[ "$OCTAVIA_USE_MOD_WSGI" == "True" ]]; then _stop_octavia_apache_wsgi else stop_process $OCTAVIA_API fi stop_process $OCTAVIA_DRIVER_AGENT stop_process $OCTAVIA_CONSUMER stop_process $OCTAVIA_HOUSEKEEPER stop_process $OCTAVIA_HEALTHMANAGER # Kill dhclient process started for o-hm0 interface pids=$(ps aux | awk '/[o]-hm0/ { print $2 }') [ ! -z "$pids" ] && sudo kill $pids if function_exists octavia_delete_network_interface_device ; then octavia_delete_network_interface_device o-hm0 elif [[ $NEUTRON_AGENT == "openvswitch" || $Q_AGENT == "openvswitch" ]]; then : # Do nothing elif [[ $NEUTRON_AGENT == "linuxbridge" || $Q_AGENT == "linuxbridge" ]]; then if ip link show o-hm0 ; then sudo ip link del o-hm0 fi else die "Unknown network controller. Please define octavia_delete_network_interface_device" fi if [[ ${OCTAVIA_ENABLE_AMPHORAV2_PROVIDER} == True ]]; then stop_redis fi } function octavia_cleanup { if [ ${OCTAVIA_AMP_IMAGE_NAME}x != x ] ; then rm -rf ${OCTAVIA_AMP_IMAGE_NAME}* fi if [ ${OCTAVIA_AMP_SSH_KEY_NAME}x != x ] ; then rm -f ${OCTAVIA_AMP_SSH_KEY_NAME}* fi if [ ${OCTAVIA_SSH_DIR}x != x ] ; then rm -rf ${OCTAVIA_SSH_DIR} fi if [ ${OCTAVIA_CONF_DIR}x != x ] ; then sudo rm -rf ${OCTAVIA_CONF_DIR} fi if [ ${OCTAVIA_RUN_DIR}x != x ] ; then sudo rm -rf ${OCTAVIA_RUN_DIR} fi if [ ${OCTAVIA_AMP_SSH_KEY_PATH}x != x ] ; then rm -f ${OCTAVIA_AMP_SSH_KEY_PATH} ${OCTAVIA_AMP_SSH_KEY_PATH}.pub fi if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then if [ ${OCTAVIA_AMP_SSH_KEY_NAME}x != x ] ; then openstack keypair delete ${OCTAVIA_AMP_SSH_KEY_NAME} fi fi if [[ "$OCTAVIA_USE_MOD_WSGI" == "True" ]]; then _cleanup_octavia_apache_wsgi fi sudo rm -rf $OCTAVIA_DIR/bin/dual_ca sudo rm -rf $OCTAVIA_DIR/bin/single_ca sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR if [[ ${OCTAVIA_ENABLE_AMPHORAV2_PROVIDER} == True ]]; then uninstall_redis fi sudo rm -f /etc/rsyslog.d/10-octavia-log-offloading.conf restart_service rsyslog } function add_load-balancer_roles { openstack role create load-balancer_observer openstack role create load-balancer_global_observer openstack role create load-balancer_member openstack role create load-balancer_admin openstack role create load-balancer_quota_admin openstack role add --user demo --project demo load-balancer_member } function octavia_init { if [ $OCTAVIA_NODE != 'main' ] && [ $OCTAVIA_NODE != 'standalone' ] && [ $OCTAVIA_NODE != 'api' ]; then # without the other services enabled apparently we don't have # credentials at this point # TOP_DIR=$(cd $(dirname "$0") && pwd) source ${TOP_DIR}/openrc admin admin OCTAVIA_AMP_NETWORK_ID=$(openstack network show lb-mgmt-net -f value -c id) iniset $OCTAVIA_CONF controller_worker amp_boot_network_list ${OCTAVIA_AMP_NETWORK_ID} fi if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then # things that should only happen on the ha main node / or once if ! openstack keypair show ${OCTAVIA_AMP_SSH_KEY_NAME} ; then openstack keypair create --public-key ${OCTAVIA_AMP_SSH_KEY_PATH}.pub ${OCTAVIA_AMP_SSH_KEY_NAME} fi # Check if an amphora image is already loaded AMPHORA_IMAGE_NAME=$(openstack image list --property name=${OCTAVIA_AMP_IMAGE_NAME} -f value -c Name) export AMPHORA_IMAGE_NAME if [ "$AMPHORA_IMAGE_NAME" == ${OCTAVIA_AMP_IMAGE_NAME} ]; then echo "Found existing amphora image: $AMPHORA_IMAGE_NAME" echo "Skipping amphora image build" export DISABLE_AMP_IMAGE_BUILD=True fi if ! [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then build_octavia_worker_image fi OCTAVIA_AMP_IMAGE_ID=$(openstack image list -f value --property name=${OCTAVIA_AMP_IMAGE_NAME} -c ID) if [ -n "$OCTAVIA_AMP_IMAGE_ID" ]; then openstack image set --tag ${OCTAVIA_AMP_IMAGE_TAG} --property hw_architecture='x86_64' --property hw_rng_model=virtio ${OCTAVIA_AMP_IMAGE_ID} fi # Create a management network. build_mgmt_network OCTAVIA_AMP_NETWORK_ID=$(openstack network show lb-mgmt-net -f value -c id) iniset $OCTAVIA_CONF controller_worker amp_boot_network_list ${OCTAVIA_AMP_NETWORK_ID} create_octavia_accounts add_load-balancer_roles elif [ $OCTAVIA_NODE == 'api' ] ; then create_octavia_accounts add_load-balancer_roles fi if [ $OCTAVIA_NODE != 'api' ] ; then create_mgmt_network_interface create_amphora_flavor configure_lb_mgmt_sec_grp configure_rsyslog fi if ! [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then set_octavia_worker_image_owner_id fi } function _configure_tempest { iniset $TEMPEST_CONFIG service_available octavia "True" } # check for service enabled if is_service_enabled $OCTAVIA; then if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then # main-ha node stuff only if ! is_service_enabled $NEUTRON_ANY; then die "The neutron-api/q-svc service must be enabled to use $OCTAVIA" fi if [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then echo "Found DISABLE_AMP_IMAGE_BUILD == True" echo "Skipping amphora image build" fi fi if [[ "$1" == "stack" && "$2" == "install" ]]; then # Perform installation of service source echo_summary "Installing octavia" octavia_lib_install octavia_install octaviaclient_install elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Configure after the other layer 1 and 2 services have been configured # TODO: need to make sure this runs after LBaaS V2 configuration echo_summary "Configuring octavia" octavia_configure elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # Initialize and start the octavia service echo_summary "Initializing Octavia" octavia_init echo_summary "Starting Octavia" octavia_start elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then if is_service_enabled tempest; then # Configure Tempest for Congress _configure_tempest fi fi fi if [[ "$1" == "unstack" ]]; then # Shut down Octavia services if is_service_enabled $OCTAVIA; then echo_summary "Stopping octavia" octavia_stop fi fi if [[ "$1" == "clean" ]]; then # Remember clean.sh first calls unstack.sh if is_service_enabled $OCTAVIA; then echo_summary "Cleaning up octavia" octavia_cleanup fi fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/pregenerated/0000775000175000017500000000000000000000000017443 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/pregenerated/certs/0000775000175000017500000000000000000000000020563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/certs/client_ca.cert.pem0000664000175000017500000000241600000000000024146 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDjTCCAnWgAwIBAgIJAPJtDNgcwPTZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwxFDASBgNVBAcMC1NwcmluZ2ZpZWxkMQww CgYDVQQKDANEaXMxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAgFw0xNjEwMTQx MzQzNDJaGA8yMDY2MTAwMjEzNDM0MlowXDELMAkGA1UEBhMCVVMxDzANBgNVBAgM BkRlbmlhbDEUMBIGA1UEBwwLU3ByaW5nZmllbGQxDDAKBgNVBAoMA0RpczEYMBYG A1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEAxptZMcFHFsCXWUxWNOkXXARCvAkZ7MeXDAyKzadWup9Trzn3qdz1h6+e VbPBYTiJeuvX7RWpfN3lhFqy9Y+Fu0ip98zZE7ZjbvUx13BQBkXiJpqsYIoD6IK1 Lh4J9Exllzy7bTQ0f/IX1yrRztXkpRM5KvcbfUrGAMEy4SW6Idc6ZI+lwxvVIhqZ KXAyTBg4f8hMhPO5RYFyaxS2PdNDaTLrvb1aDiuYLqcpDcr4/0YSg0iejklMHovC oLK/uEFgRGYDSX+Os1CUdtnVzLpkFHZtomtEB0kUug4lZpGQckappLq+dWNTu43O tJzbEa9lpYT8P/nie94tBQYx5+HgSwIDAQABo1AwTjAdBgNVHQ4EFgQUBpJ+Zoky aGdQtMu9NzcoqOPc+yMwHwYDVR0jBBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMw DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAJe8mlfQ69kyrIuIdbTtg Kl7ndj7MGQnmNfxytBB5gqUFwswEPKs4VTp3Pp+EStJZxJ8qeeG9B+g3oU3Rhpqc CDhIyCW8shE2ACKLl0zRRk91LDyXASI4UyvjgN71Ti91VZ3oPVvTIefG6CMeI9oD Spl6TbPzCOl2rFrTWmdwM3qIVpmhGntdWnA6btga6Fz7dRwUPwycJyhzfLmnjRlQ 3+QxmF2T5iIYw4B1Lsiz1uy27egMuq2M4Hvd2pSGhCB9l/3ZmEXvbF1aFVcnoEHH /aHqOCx2fQTty1M+qnvofs1dNJlyyxq2LuE4r4wocSTRVfexaichhtsSkjQJ60w1 VA== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/pregenerated/certs/private/0000775000175000017500000000000000000000000022235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/certs/private/client.cert-and-key.pem0000664000175000017500000001373500000000000026511 0ustar00zuulzuul00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 1 (0x1) Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=Denial, L=Springfield, O=Dis, CN=www.example.com Validity Not Before: Oct 14 13:43:42 2016 GMT Not After : Oct 2 13:43:42 2066 GMT Subject: C=US, ST=Denial, O=Dis, CN=www.example.com Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (2048 bit) Modulus: 00:d3:57:2f:a9:3b:cb:e3:71:ef:db:42:f0:af:c8: 58:95:39:65:93:48:d7:c0:71:db:6b:11:95:3c:92: 01:fa:d6:32:ed:83:53:a6:b7:3f:f1:f4:ba:65:42: f0:b6:53:69:48:94:08:ae:2c:f5:80:53:24:e0:98: 31:21:74:e3:f5:ef:c8:77:76:80:89:02:52:9f:9d: 69:f0:b1:e5:83:55:6f:ec:dd:aa:e7:92:09:d1:a1: 17:e4:cc:42:69:13:82:42:3c:71:e2:d4:e8:22:5f: b1:74:c9:2c:31:0a:70:5c:42:f7:77:d1:e1:76:83: 8f:f1:a2:06:20:55:e3:ea:fa:65:5c:83:89:7e:32: 20:8b:45:2a:51:0b:34:f1:f5:77:15:7b:fc:f0:6d: e4:34:7d:54:8e:8a:f3:0a:a6:f1:7f:d6:65:2b:b3: ef:82:17:31:97:f4:71:5f:67:d7:80:11:d5:43:82: 2f:0f:4e:39:49:45:0c:a8:8e:1a:29:7a:4e:bf:94: c8:af:42:2f:9f:bb:e9:43:18:f3:a6:9a:e8:c8:ad: eb:df:2c:94:fe:2b:a7:60:27:fc:b8:1a:3c:2e:6e: f3:60:51:e7:0c:53:70:de:88:b6:6d:6c:6a:21:17: 0a:17:d3:e5:94:fc:13:79:33:8f:6d:e7:89:b7:66: 7e:29 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: OpenSSL Generated Certificate X509v3 Subject Key Identifier: 73:7C:07:15:F5:CD:48:57:D5:D2:79:DF:0E:C4:E8:7A:29:1F:03:16 X509v3 Authority Key Identifier: keyid:06:92:7E:66:89:32:68:67:50:B4:CB:BD:37:37:28:A8:E3:DC:FB:23 Signature Algorithm: sha256WithRSAEncryption be:89:f1:3e:5e:3b:72:80:96:8b:74:d5:1d:06:14:02:d9:35: b5:41:ed:6e:43:c1:d6:b9:1d:07:08:4e:c6:93:d0:a2:b8:93: 81:71:34:d1:a8:f5:1d:d5:48:6f:14:af:14:65:69:1b:4e:9f: 87:25:a2:62:fa:99:fd:c1:e6:ce:fb:87:44:38:b7:8b:c7:1d: 88:0e:61:2f:14:d8:61:b2:bd:01:b5:a6:6c:11:76:b6:57:a1: 03:cb:6a:8e:dc:97:25:33:75:49:a9:44:d7:08:6c:3d:ae:2e: fe:4e:69:47:c1:3b:43:6a:fe:89:10:9f:3a:7f:7b:28:61:3b: 4a:62:bb:c3:7f:01:7b:90:e1:38:e2:83:b4:c1:4f:ac:5a:12: 9b:5e:4b:64:9e:50:d8:6f:79:7f:8f:f1:4e:4b:eb:9e:0e:b7: 64:36:9c:cf:bc:7c:bd:a4:1e:37:a6:5f:2e:b1:24:88:50:cc: 68:91:95:b8:9e:2a:00:5a:fb:28:eb:a1:9b:4f:54:cd:01:d3: 90:34:b4:5d:aa:db:2e:90:37:0b:a6:8c:3c:80:43:c2:88:2d: 00:b1:a1:5d:fe:4e:98:02:57:5f:fb:fc:78:7d:59:04:96:9c: 2a:1a:be:ca:5b:87:2b:66:bc:55:6f:14:dd:85:e1:b5:4b:6f: f7:c7:dd:eb -----BEGIN CERTIFICATE----- MIIDmjCCAoKgAwIBAgIBATANBgkqhkiG9w0BAQsFADBcMQswCQYDVQQGEwJVUzEP MA0GA1UECAwGRGVuaWFsMRQwEgYDVQQHDAtTcHJpbmdmaWVsZDEMMAoGA1UECgwD RGlzMRgwFgYDVQQDDA93d3cuZXhhbXBsZS5jb20wIBcNMTYxMDE0MTM0MzQyWhgP MjA2NjEwMDIxMzQzNDJaMEYxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwx DDAKBgNVBAoMA0RpczEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkq hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA01cvqTvL43Hv20Lwr8hYlTllk0jXwHHb axGVPJIB+tYy7YNTprc/8fS6ZULwtlNpSJQIriz1gFMk4JgxIXTj9e/Id3aAiQJS n51p8LHlg1Vv7N2q55IJ0aEX5MxCaROCQjxx4tToIl+xdMksMQpwXEL3d9HhdoOP 8aIGIFXj6vplXIOJfjIgi0UqUQs08fV3FXv88G3kNH1UjorzCqbxf9ZlK7Pvghcx l/RxX2fXgBHVQ4IvD045SUUMqI4aKXpOv5TIr0Ivn7vpQxjzpproyK3r3yyU/iun YCf8uBo8Lm7zYFHnDFNw3oi2bWxqIRcKF9PllPwTeTOPbeeJt2Z+KQIDAQABo3sw eTAJBgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBD ZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUc3wHFfXNSFfV0nnfDsToeikfAxYwHwYDVR0j BBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMwDQYJKoZIhvcNAQELBQADggEBAL6J 8T5eO3KAlot01R0GFALZNbVB7W5Dwda5HQcITsaT0KK4k4FxNNGo9R3VSG8UrxRl aRtOn4clomL6mf3B5s77h0Q4t4vHHYgOYS8U2GGyvQG1pmwRdrZXoQPLao7clyUz dUmpRNcIbD2uLv5OaUfBO0Nq/okQnzp/eyhhO0piu8N/AXuQ4Tjig7TBT6xaEpte S2SeUNhveX+P8U5L654Ot2Q2nM+8fL2kHjemXy6xJIhQzGiRlbieKgBa+yjroZtP VM0B05A0tF2q2y6QNwumjDyAQ8KILQCxoV3+TpgCV1/7/Hh9WQSWnCoavspbhytm vFVvFN2F4bVLb/fH3es= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDTVy+pO8vjce/b QvCvyFiVOWWTSNfAcdtrEZU8kgH61jLtg1Omtz/x9LplQvC2U2lIlAiuLPWAUyTg mDEhdOP178h3doCJAlKfnWnwseWDVW/s3arnkgnRoRfkzEJpE4JCPHHi1OgiX7F0 ySwxCnBcQvd30eF2g4/xogYgVePq+mVcg4l+MiCLRSpRCzTx9XcVe/zwbeQ0fVSO ivMKpvF/1mUrs++CFzGX9HFfZ9eAEdVDgi8PTjlJRQyojhopek6/lMivQi+fu+lD GPOmmujIrevfLJT+K6dgJ/y4GjwubvNgUecMU3DeiLZtbGohFwoX0+WU/BN5M49t 54m3Zn4pAgMBAAECggEAZu5MwUDlYaZJauHkdci/FBa7WQueQRVzB2et5q06F6Ah d7qBkG4pz78g1VbQBA0F9xpaS/KLs29LQ7P8Ic5bhJm/aiemHJSsBx9UzKzoGpoP BC9GILjo3Vd3WrD9G04sH/Ruh0qosK0osbeVNWFfLiBThOEMzXrwLYB7OV57viJI 4YAXGOzOgK3aMHF8cYRRgTDIi2dGAMH1EyIIB8gKYlp1PdMmaTOk2LBhechuImRX 4LgvM1fUdJ7utyQKEXMJEg+wzV9BMlX6nvM3vVWdYZy2Hsu9DDyJUFYQk9cDpXNP RF4jjLUtz6gEZOlotOQgPWqLANJrt/BdVfyeA97psQKBgQD7SeNlQd2bu8GfH0vB mjzSWmJ3nDnpeaUR9MIYVQ6zNlvYPjM2BMVQtE5+VWK15YOjD5L9SoresNKubrSv wzNFeqf6Dvq7zJ+6Rkst7GcRV/P3D4C3ZeKeDNjVm4eMRCa5ttIJlLmfqffeLO9M RSanNjnjwWENgsXCCvlVBfc9ZQKBgQDXTY8X9ug9xVlqBR4TMfzXBadzP+nDqYd9 MkH3tEltLba0vP4vKyjQa8A9FMzSRr9bv13mNpAbFEDGnhzv1l5OlHTM6tG//Rxq nnhmFLFWZl8WowP0LiPTafrDjGEX/7iDAJjAtSacBBm6EGaM8igWEQT0WXwsQbTw rlRolJ5DdQKBgQDgMBJ80x+IAiGC+iPXLOjYbqTsu2d7YfigJXJIzRHZV0Tnjs6X gfgbwVFKKplvWL1xa8Ki0a9FcBH2Z3QyXv9OHFjiohyWEb/rKy2FYiSt938Dy0P1 2yMsCKAnKqPqwx6dj3qh65sT1Er8X7B6pjMO+TT6ehtBN4uBS9MYRMNIdQKBgQDU 6UztTOzDUSqn7mGcZ916IYxDK1wXcsmapB2aQD4wanl4aEEREiQtX7DednhKJU5N A4RvCVweezvHbkp9Xscp/CM5FanQqxPz17yGbkYkg93au+BIE2y4P+CMioDlw6uK WQe14i5JMMDkQB25mirMD46PuQJTnbK6JBsyxG1xlQKBgGtcSY0AyVq00p0kkxNm KhzI+17T0j0CuczJ/X+NvCUjLsx3NTJatRkJNYHWG7jUqs1vvtZbHVspQeteMlEi rNE/xz98iG2eC8AdW+TmZvySmIZgoAoPuopUvBzRiyfLQVh4pPuikbTDghEn+CSG WSyOd/I4JsH4xQFJC89nnm5M -----END PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/certs/private/server_ca.key.pem0000664000175000017500000000334600000000000025506 0ustar00zuulzuul00000000000000-----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: AES-128-CBC,B6C2D5A9657E9635BE06551CAD6EF969 N90cGt5rEntmiPvIAQwbO9W02blpDRZLJYMJeqttqxttnq6+InYQL3M4nJmR8XVz /bCjWhMQlh5kEKzBtjhu5xFXqYhF3q9UcA6/13VY4gicrSHwwpoVLP0X2IXFp6ub t4haSggaH6F2ZxF9DJCVG6+GyqOpuTPlGD4QiEf40NTo7x2H+JCEveLsIaSUljTV W/XZDk1RSo8hMpr+huqCQOZxfhEuM76gSK8wPW3nCzVoBMCk/1RpMcXq8A7FT9gd 0V+2jwucDPOEVrTLmYjh/Aln6ATdte2l/b9XKPnAoVW6psYw83pu2hXtjgfCI+ey IbRvzJ9djPvx0qhEu/EQIcKLFfNt/+OExm7rce8+O6NcB1x+bFbvCLamPYQxtcjE xjqOWD0QT+VtIdqnG631jctN2mocmhVWfmp6le1RlkwfKSsbS1lb6Lcj/TasTlai 5c6hfYB83drlJUw0374PuWn8Tb62HGaROK8JEG07CcgNT1l8KXHrCpLzwEQvRtP+ Bze+mlbjScm21ny280huQz5hiNdDrH9q/YzVHcHEVICAnimEsZeaQCyEt0Um9h56 gvTZ6Udh/SeetBsL77hQ3EwDYs2nNdacaOIu5tASrfdMXWdSiLiNR8zK7y7x4a0b GrgrerYJPWdb2axy4rrhzzlPRTHCJL1gA/E3CYC5mObk07tCMoQt7Ak3dofto9jG 1CSRLGqbP31k7tXBOLCwNAYekQkDWRQV4u0vf2aWJdLjxLwiX7424E6p/cvaUi5B Sv+Iit3Zuee7Tq6DK0rv+5oWZmyfC/rzHcqmAMUhnjfBBlcI1N22BrBEBpfX6zq1 DnIwiS9ayJMzaExSS+tBuqoHuoLMo2Fn++NpYxIUrwtQBvAD1Qxqx6QacTGFK025 UpyV/ML+FdENujwU6KYYdciHX3E7nU4UYC/qwT7u9B/k3OiTS37GSlnz4ZkU34cF UiBcN2gXqYYxsonD37vUX40oTjrQYaQJbWcGgcyNw7Z5U4GV7t1ZFcxNBuE485pE jqZiDkeP5zmk+r9AB7djUpcowQ0TpPs1SthPsllv/LidusA8DwmeGp063fa1wScv gH6iJ40HRc7ffwN4ikk409L8awjpSA+HyXC+BsjIaG9uyaoy6XpjjQHrl/kZgeS2 Nm3wvq00OFKYLi8UgmXlrRNMyNc/osTSAesdJeaiNHUM/+nrdTL1SaOvht/6i07B bG7Vqv3LtpWvd8fDhSPR/1eiBaYBzDJ+jx25oX5Wbv4/AbsG5/BEgfrBJnMddPyv Y8X6LY3IpUqRx1sf1L3ia3YxWp5r3bfcCQvVL0W6brEKxbw8BTHFrS3qaBOOfLrC XuiMKEUcSlexxYnYcJr1RnBYQ4HqcAOCbqQAhXqFv5nge+5gSskP8MS/FtGZ0+nm wi2ak3WmZbpr08mVnjHVhhxnuuVm7esYhNJLwXvSITXfUPPgpjvzYe0ABLdtWVuo s4NsU/1XG33I4r+gnrHQyFxsgaZ3rr5VpcbTHLzDzBgTRWk06AZB/nxyfAexE67U VHRL+4FP+ee5CxpWkT8i0/n2PJ/U/42+pglZmxEzIw76PqcT0aqmnpSwsEnnMH0w -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/certs/server_ca-chain.cert.pem0000664000175000017500000000241600000000000025256 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDjTCCAnWgAwIBAgIJAPJtDNgcwPTZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwxFDASBgNVBAcMC1NwcmluZ2ZpZWxkMQww CgYDVQQKDANEaXMxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAgFw0xNjEwMTQx MzQzNDJaGA8yMDY2MTAwMjEzNDM0MlowXDELMAkGA1UEBhMCVVMxDzANBgNVBAgM BkRlbmlhbDEUMBIGA1UEBwwLU3ByaW5nZmllbGQxDDAKBgNVBAoMA0RpczEYMBYG A1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEAxptZMcFHFsCXWUxWNOkXXARCvAkZ7MeXDAyKzadWup9Trzn3qdz1h6+e VbPBYTiJeuvX7RWpfN3lhFqy9Y+Fu0ip98zZE7ZjbvUx13BQBkXiJpqsYIoD6IK1 Lh4J9Exllzy7bTQ0f/IX1yrRztXkpRM5KvcbfUrGAMEy4SW6Idc6ZI+lwxvVIhqZ KXAyTBg4f8hMhPO5RYFyaxS2PdNDaTLrvb1aDiuYLqcpDcr4/0YSg0iejklMHovC oLK/uEFgRGYDSX+Os1CUdtnVzLpkFHZtomtEB0kUug4lZpGQckappLq+dWNTu43O tJzbEa9lpYT8P/nie94tBQYx5+HgSwIDAQABo1AwTjAdBgNVHQ4EFgQUBpJ+Zoky aGdQtMu9NzcoqOPc+yMwHwYDVR0jBBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMw DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAJe8mlfQ69kyrIuIdbTtg Kl7ndj7MGQnmNfxytBB5gqUFwswEPKs4VTp3Pp+EStJZxJ8qeeG9B+g3oU3Rhpqc CDhIyCW8shE2ACKLl0zRRk91LDyXASI4UyvjgN71Ti91VZ3oPVvTIefG6CMeI9oD Spl6TbPzCOl2rFrTWmdwM3qIVpmhGntdWnA6btga6Fz7dRwUPwycJyhzfLmnjRlQ 3+QxmF2T5iIYw4B1Lsiz1uy27egMuq2M4Hvd2pSGhCB9l/3ZmEXvbF1aFVcnoEHH /aHqOCx2fQTty1M+qnvofs1dNJlyyxq2LuE4r4wocSTRVfexaichhtsSkjQJ60w1 VA== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/certs/server_ca.cert.pem0000664000175000017500000000241600000000000024176 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDjTCCAnWgAwIBAgIJAPJtDNgcwPTZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwxFDASBgNVBAcMC1NwcmluZ2ZpZWxkMQww CgYDVQQKDANEaXMxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAgFw0xNjEwMTQx MzQzNDJaGA8yMDY2MTAwMjEzNDM0MlowXDELMAkGA1UEBhMCVVMxDzANBgNVBAgM BkRlbmlhbDEUMBIGA1UEBwwLU3ByaW5nZmllbGQxDDAKBgNVBAoMA0RpczEYMBYG A1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEAxptZMcFHFsCXWUxWNOkXXARCvAkZ7MeXDAyKzadWup9Trzn3qdz1h6+e VbPBYTiJeuvX7RWpfN3lhFqy9Y+Fu0ip98zZE7ZjbvUx13BQBkXiJpqsYIoD6IK1 Lh4J9Exllzy7bTQ0f/IX1yrRztXkpRM5KvcbfUrGAMEy4SW6Idc6ZI+lwxvVIhqZ KXAyTBg4f8hMhPO5RYFyaxS2PdNDaTLrvb1aDiuYLqcpDcr4/0YSg0iejklMHovC oLK/uEFgRGYDSX+Os1CUdtnVzLpkFHZtomtEB0kUug4lZpGQckappLq+dWNTu43O tJzbEa9lpYT8P/nie94tBQYx5+HgSwIDAQABo1AwTjAdBgNVHQ4EFgQUBpJ+Zoky aGdQtMu9NzcoqOPc+yMwHwYDVR0jBBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMw DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAJe8mlfQ69kyrIuIdbTtg Kl7ndj7MGQnmNfxytBB5gqUFwswEPKs4VTp3Pp+EStJZxJ8qeeG9B+g3oU3Rhpqc CDhIyCW8shE2ACKLl0zRRk91LDyXASI4UyvjgN71Ti91VZ3oPVvTIefG6CMeI9oD Spl6TbPzCOl2rFrTWmdwM3qIVpmhGntdWnA6btga6Fz7dRwUPwycJyhzfLmnjRlQ 3+QxmF2T5iIYw4B1Lsiz1uy27egMuq2M4Hvd2pSGhCB9l/3ZmEXvbF1aFVcnoEHH /aHqOCx2fQTty1M+qnvofs1dNJlyyxq2LuE4r4wocSTRVfexaichhtsSkjQJ60w1 VA== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/regenerate-certs.sh0000775000175000017500000000102500000000000023237 0ustar00zuulzuul00000000000000#!/bin/bash GEN_DIR=/tmp/certs rm -rf $GEN_DIR bash ../../bin/create_certificates.sh $GEN_DIR $(pwd)/../../etc/certificates/openssl.cnf for file in client.key client.pem ca_01.pem private/cakey.pem; do cp -v $GEN_DIR/$file certs/$file done echo "" echo Validating client cert with CA: openssl verify -verbose -CAfile certs/ca_01.pem certs/client.pem echo "" echo CA expiration time: openssl x509 -enddate -noout -in certs/ca_01.pem echo "" echo Client cert expiration time: openssl x509 -enddate -noout -in certs/client.pem ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/pregenerated/ssh-keys/0000775000175000017500000000000000000000000021211 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/ssh-keys/octavia_ssh_key0000664000175000017500000000322000000000000024304 0ustar00zuulzuul00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA/V449K2GRGBMypMBVkIBZRfDFVDUeJvEebVlCuNW33bmcblS x8LB3+oCclhhfToCrtHO5Hndk2oMCJyQRojCcuox4Uauq5I+0cIo1mowmdlqFIDP 7YQEJVnJZQah96F468LY3dc9fyp+2Y3XVeeOjY3ChuBSUaQUb58aNH7lSgS/QwHv /6sYRmej16CBmYK+NQlxgBFShA9M1F+DNVBnk229iP2+uwfmQyCAv188Ts/tDb3e 974QOmv+vJqJo0nnJIYQd0jOlLIiDfHC3+JsWslYGk8YbBeLsxkdljnLHpIY47pb i6L4Sy993tlb/2XfbCyw+L+dFoZhynNxyt/c4QIDAQABAoIBAFDaqq5aWcikOp1C wGB4e9148cZxnvxGKTL10iLhXa2+Udfk3iflXN1J3jIDRkkiJA0J405CHZWXd/Of kuMPbY4icnyDg+Y4q1dg8ItMI+pU2Wdlm/Ud9fy9ZGma7kEKBH6oFXDl6TgVpZlj jF5boMBHhtZn650mEWd1jHVIMX+m1Z3lA9dA3qsDTLDmh5IPeH4InWumCn59qw3Z lMu8cKZLpiAJNEx428P0DbOMpTMgmgFIrRFMQeMRHukxf1X6UeHS3UgHUmTnA2jG IbGJShNQywxI1pAJKR6BgUJqxZZ1ukcWl8gO4bedkaTejJWIp65KwI7xMNPgYQEO V+8PfGECgYEA/v9r/ypQzkUEsxyNUqKhJ/02rgSSGdzQT52Fi82O1e2j63PbRPBL izkA9LkDoxz2RDnG6H3BFfj0QrCbDiV2DqtxBp+xu+mua60JysnjoTRQo1rXS/kr cDLsNL0q3s/dBNwUCwyoveHdX5V72E5ueqY/vhRbjHV66hzNno7ryqUCgYEA/l0p LIovymkpqG9wAquvyQXLbQk5qx71CXX0yjip5BEcPmQrEIbV0CwUtL3wKmuHx8xR dyyvTwSYWANHFVzB85itpAnRdJcRz02SU/4Qq2pMXbp/6oBK3CwAW0xp0l3k4Yol +SnfZkaQ8jcNDSb5oYxjsl0Jj40T7V3MTCd4QI0CgYEAoUYYHqy7qIl8PG+9bdsP g8QhFhQr9xFx6jidIttiECkZOCvxLPuxO59U3HI7O6lwk5vbEmWeffATRC6AEoVc 0lBZzq+ncEqOFum8vLXNMsJskbQ9YH55m5+JRp2xhHQAvDcYshhSjK1SHkbjqd2J ACcvP1+Ouxn+IB0RasvHk0UCgYEAgDhd5QHTjWjtguaJxA7fkanGHbSkyUnVo2s+ diGSIlEtt5Wuz6noZgOSfHmycu+5hlHMTxLLXD2ovdUJJA+aBT1Vanc4ilkMtT8Z IBXWOVJgJG86w+7fzZSwqVUfkteZ5MdK1Qryfg/cSPzPK24WMAUgzGxxwVcQUHsT 3N+YkpECgYB4fzJ10b4ZuYYQRSUAxcfQXTqAR1LH9WS0axGQhJrpxtUe9Jur1eJV NF+o9kcAhFqVCuoJXFn/puDqsYNz4MBYHMXd8S7DVbdOyZs0h/F3lLyTmWS99tjt cG7xtFl7/75WcbgITcJSbeACKGpC6g6U2vFF5IeM4wA0gOwY1G24fw== -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/pregenerated/ssh-keys/octavia_ssh_key.pub0000664000175000017500000000061200000000000025073 0ustar00zuulzuul00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD9Xjj0rYZEYEzKkwFWQgFlF8MVUNR4m8R5tWUK41bfduZxuVLHwsHf6gJyWGF9OgKu0c7ked2TagwInJBGiMJy6jHhRq6rkj7RwijWajCZ2WoUgM/thAQlWcllBqH3oXjrwtjd1z1/Kn7ZjddV546NjcKG4FJRpBRvnxo0fuVKBL9DAe//qxhGZ6PXoIGZgr41CXGAEVKED0zUX4M1UGeTbb2I/b67B+ZDIIC/XzxOz+0Nvd73vhA6a/68momjSeckhhB3SM6UsiIN8cLf4mxayVgaTxhsF4uzGR2WOcsekhjjuluLovhLL33e2Vv/Zd9sLLD4v50WhmHKc3HK39zh vagrant@main ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/samples/0000775000175000017500000000000000000000000016442 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/README-Vagrant.md0000664000175000017500000000463300000000000021327 0ustar00zuulzuul00000000000000This file describes how to use Vagrant (http://www.vagrantup.com) to create a devstack virtual environment that contains two nova instances running a simple web server and a working Neutron LBaaS Version 2 load balancer backed by Octavia. 1) Install vagrant on your host machine. Vagrant is available for Windows, Mac OS, and most Linux distributions. Download and install the package appropriate for your system. On Ubuntu, simply type: sudo apt-get install vagrant 2) copy 'Vagrantfile' from this directory to any appropriate directory mkdir $HOME/lbaas-octavia-vagrant # or any other appropriate directory cp -rfp $HOME/lbaas-octavia-vagrant 3) Continue either by the single node deployment (6GB RAM minimum), or by the multinode deployment (12GB RAM minimum). Single node deployment ~~~~~~~~~~~~~~~~~~~~~~ 1) Create and deploy the environment VM cd $HOME/lbaas-octavia-vagrant/single vagrant up Alternatively, you can specify the number of vcpus or memory: VM_CPUS=4 VM_MEMORY=8192 vagrant up 2) Wait for the vagrant VM to boot and install, typically 20-30 minutes 3) SSH into the vagrant box vagrant ssh 4) Continue on the common section below Multinode ~~~~~~~~~ This will create an environment where the octavia services are replicated across two nodes, and in front of the octavia api, an haproxy is configured to distribute traffic among both API servers, and provide failure tolerance. Please note that the database is a single mysql instance, with no clustering. 1) Create and deploy the environment VMs cd $HOME/lbaas-octavia-vagrant/multinode vagrant up main 2) Wait for the main node to be deployed, and then start the second node vagrant up second 3) Log in to the main node, and run local-manual.sh now that everything is deployed vagrant ssh main cd devstack ./local-manual.sh logout 4) SSH in any of the vagrant boxes: vagrant ssh main vagrant ssh second 4) Continue on the common section bellow Common to multinode and single node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1) Determine the loadbalancer IP: source openrc admin admin openstack loadbalancer show lb1 -f value -c vip_address 2) make HTTP requests to test your load balancer: curl where is the VIP address for lb1. The subsequent invocations of "curl " should demonstrate that the load balancer is alternating between two member nodes. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/samples/multinode/0000775000175000017500000000000000000000000020442 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/multinode/Vagrantfile0000775000175000017500000000247400000000000022641 0ustar00zuulzuul00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : require '../providers.rb' Vagrant.configure(2) do |config| config.vm.define 'main' do |main| configure_providers(main.vm) main.vm.network "private_network", ip:"192.168.42.10" main.vm.hostname = "main" main.vm.provision "shell", privileged: false, inline: <<-SHELL #!/usr/bin/env bash set -e sudo apt-get update sudo apt-get -y upgrade sudo apt-get -y install git git clone https://opendev.org/openstack/devstack cp /vagrant/local.conf ~/devstack cp /vagrant/local.sh ~/devstack/local-manual.sh cp /vagrant/webserver.sh ~/devstack cd ~/devstack ./stack.sh SHELL end config.vm.define 'second' do |second| configure_providers(second.vm) second.vm.network "private_network", ip:"192.168.42.11" second.vm.hostname = "second" second.vm.provision "shell", privileged: false, inline: <<-SHELL #!/usr/bin/env bash set -e sudo apt-get update sudo apt-get -y upgrade sudo apt-get -y install git git clone https://opendev.org/openstack/devstack cp /vagrant/local-2.conf ~/devstack/local.conf cd ~/devstack ./stack.sh SHELL end end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/multinode/local-2.conf0000664000175000017500000000356400000000000022552 0ustar00zuulzuul00000000000000[[local|localrc]] # The name of the RECLONE environment variable is a bit misleading. It doesn't actually # reclone repositories, rather it uses git fetch to make sure the repos are current. RECLONE=True # Load the external LBaaS plugin. enable_plugin neutron https://opendev.org/openstack/neutron enable_plugin octavia https://opendev.org/openstack/octavia LIBS_FROM_GIT+=python-octaviaclient DATABASE_PASSWORD=password ADMIN_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password RABBIT_PASSWORD=password # Enable Logging LOGFILE=$DEST/logs/stack.sh.log VERBOSE=True LOG_COLOR=True # Nova enable_service n-cpu # Neutron enable_service neutron enable_service neutron-agent enable_service neutron-qos # LBaaS V2 and Octavia enable_service octavia enable_service o-api enable_service o-cw enable_service o-hm enable_service o-hk #NOTE(mangelajo): there are possibly bugs in the housekeeper that needs to be # addressed to make it fully stateless. Now as per @lingxian # the house keeper could create more spare amphoras than needed # in parallel nodes. OCTAVIA_USE_PREGENERATED_CERTS=True OCTAVIA_USE_PREGENERATED_SSH_KEY=True OCTAVIA_CONTROLLER_IP_PORT_LIST=192.168.0.3:5555,192.168.0.4:5555 OCTAVIA_NODE=second # we are not enabling the mysql service here, but this is necessary # to get the connection string constructed DATABASE_TYPE=mysql NEUTRON_CORE_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan LOGFILE=$DEST/logs/stack.sh.log # Old log files are automatically removed after 7 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 HOST_IP=192.168.42.11 SERVICE_HOST=192.168.42.10 MULTI_HOST=1 NEUTRON_SERVICE_HOST=$SERVICE_HOST MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/multinode/local.conf0000664000175000017500000000363600000000000022413 0ustar00zuulzuul00000000000000[[local|localrc]] # The name of the RECLONE environment variable is a bit misleading. It doesn't actually # reclone repositories, rather it uses git fetch to make sure the repos are current. RECLONE=True # Load the external Octavia plugin. enable_plugin barbican https://opendev.org/openstack/barbican enable_plugin neutron https://opendev.org/openstack/neutron enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard enable_plugin octavia https://opendev.org/openstack/octavia LIBS_FROM_GIT+=python-octaviaclient DATABASE_PASSWORD=password ADMIN_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password RABBIT_PASSWORD=password # Enable Logging LOGFILE=$DEST/logs/stack.sh.log VERBOSE=True LOG_COLOR=True # Pre-requisites enable_service rabbit enable_service mysql enable_service key # Horizon enable_service horizon # Nova enable_service n-api enable_service n-cpu enable_service n-cond enable_service n-sch # Placement service needed for Nova enable_service placement-api enable_service placement-client # Glance enable_service g-api enable_service g-reg # Neutron enable_service neutron enable_service neutron-api enable_service neutron-agent enable_service neutron-dhcp enable_service neutron-l3 enable_service neutron-metadata-agent enable_service neutron-qos # Octavia enable_service octavia enable_service o-cw enable_service o-hm enable_service o-hk enable_service o-api enable_service o-api-ha enable_service o-da OCTAVIA_USE_PREGENERATED_CERTS=True OCTAVIA_USE_PREGENERATED_SSH_KEY=True OCTAVIA_CONTROLLER_IP_PORT_LIST=192.168.0.3:5555,192.168.0.4:5555 OCTAVIA_NODE=main OCTAVIA_NODES=main:192.168.42.10,second:192.168.42.11 NEUTRON_CORE_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan LOGFILE=$DEST/logs/stack.sh.log # Old log files are automatically removed after 7 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 HOST_IP=192.168.42.10 MULTI_HOST=1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/multinode/local.sh0000775000175000017500000001027600000000000022101 0ustar00zuulzuul00000000000000#!/usr/bin/env bash set -ex # Sample ``local.sh`` that configures two simple webserver instances and sets # up a Neutron LBaaS Version 2 loadbalancer backed by Octavia. # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) BOOT_DELAY=60 # Import common functions source ${TOP_DIR}/functions # Use openrc + stackrc for settings source ${TOP_DIR}/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} # Polling functions function wait_for_loadbalancer_active { lb_name=$1 while [ $(openstack loadbalancer show $lb_name -f value -c provisioning_status) != "ACTIVE" ]; do sleep 2 done } if is_service_enabled nova; then # Unset DOMAIN env variables that are not needed for keystone v2 and set OpenStack demo user auth unset OS_USER_DOMAIN_ID unset OS_PROJECT_DOMAIN_ID source ${TOP_DIR}/openrc demo demo # Create an SSH key to use for the instances DEVSTACK_LBAAS_SSH_KEY_NAME=DEVSTACK_LBAAS_SSH_KEY_RSA DEVSTACK_LBAAS_SSH_KEY_DIR=${TOP_DIR} DEVSTACK_LBAAS_SSH_KEY=${DEVSTACK_LBAAS_SSH_KEY_DIR}/${DEVSTACK_LBAAS_SSH_KEY_NAME} rm -f ${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY} ssh-keygen -b 2048 -t rsa -f ${DEVSTACK_LBAAS_SSH_KEY} -N "" openstack keypair create --public-key=${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY_NAME} # Add tcp/22,80 and icmp to default security group openstack security group rule create --protocol tcp --dst-port 22:22 default openstack security group rule create --protocol tcp --dst-port 80:80 default openstack security group rule create --protocol icmp default # Boot some instances NOVA_BOOT_ARGS="--key-name ${DEVSTACK_LBAAS_SSH_KEY_NAME} --image $(openstack image show cirros-0.3.5-x86_64-disk -f value -c id) --flavor 1 --nic net-id=$(openstack network show private -f value -c id)" openstack server create ${NOVA_BOOT_ARGS} node1 openstack server create ${NOVA_BOOT_ARGS} node2 echo "Waiting ${BOOT_DELAY} seconds for instances to boot" sleep ${BOOT_DELAY} IP1=$(openstack server show node1 | awk '/private/ {ip = substr($4, 9, length($4)-9) ; if (ip ~ "\\.") print ip ; else print $5}') IP2=$(openstack server show node2 | awk '/private/ {ip = substr($4, 9, length($4)-9) ; if (ip ~ "\\.") print ip ; else print $5}') touch ~/.ssh/known_hosts ssh-keygen -R ${IP1} ssh-keygen -R ${IP2} # Get Neutron router namespace details NAMESPACE_NAME='qrouter-'$(openstack router show router1 -f value -c id) NAMESPACE_CMD_PREFIX='sudo ip netns exec' # Run a simple web server on the instances chmod 0755 ${TOP_DIR}/webserver.sh $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP1}:webserver.sh $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP2}:webserver.sh $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME ssh -o UserKnownHostsFile=/dev/null -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no -q cirros@${IP1} "screen -d -m sh webserver.sh" $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME ssh -o UserKnownHostsFile=/dev/null -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no -q cirros@${IP2} "screen -d -m sh webserver.sh" fi if is_service_enabled octavia; then SUBNET_ID=$(openstack subnet show private-subnet -f value -c id) openstack loadbalancer create --name lb1 --vip-subnet-id $SUBNET_ID wait_for_loadbalancer_active lb1 openstack loadbalancer listener create lb1 --protocol HTTP --protocol-port 80 --name listener1 wait_for_loadbalancer_active lb1 openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 wait_for_loadbalancer_active lb1 openstack loadbalancer member create --subnet-id $SUBNET_ID --address ${IP1} --protocol-port 80 pool1 wait_for_loadbalancer_active lb1 openstack loadbalancer member create --subnet-id $SUBNET_ID --address ${IP2} --protocol-port 80 pool1 fi echo "How to test load balancing:" echo "" echo "${NAMESPACE_CMD_PREFIX} ${NAMESPACE_NAME} curl $(openstack loadbalancer show lb1 -f value -c vip_address)" echo "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/multinode/webserver.sh0000664000175000017500000000042600000000000023004 0ustar00zuulzuul00000000000000#!/bin/sh MYIP=$(/sbin/ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}'); OUTPUT_STR="Welcome to $MYIP\r" OUTPUT_LEN=${#OUTPUT_STR} while true; do echo -e "HTTP/1.0 200 OK\r\nContent-Length: ${OUTPUT_LEN}\r\n\r\n${OUTPUT_STR}" | sudo nc -l -p 80 done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/providers.rb0000664000175000017500000000102300000000000021000 0ustar00zuulzuul00000000000000# defaults VM_MEMORY = ENV['VM_MEMORY'] || "8192" VM_CPUS = ENV['VM_CPUS'] || "1" def configure_providers(vm) vm.provider "virtualbox" do |vb, config| config.vm.box = "ubuntu/bionic64" vb.gui = true vb.memory = VM_MEMORY vb.cpus = VM_CPUS end vm.provider "libvirt" do |lb, config| config.vm.box = "celebdor/bionic64" config.vm.synced_folder './', '/vagrant', type: 'rsync' lb.nested = true lb.memory = VM_MEMORY lb.cpus = VM_CPUS lb.suspend_mode = 'managedsave' end end ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3542166 octavia-6.2.2/devstack/samples/singlenode/0000775000175000017500000000000000000000000020571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/singlenode/Vagrantfile0000775000175000017500000000702100000000000022761 0ustar00zuulzuul00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # All Vagrant configuration is done below. The "2" in Vagrant.configure # configures the configuration version (we support older styles for # backwards compatibility). Please don't change it unless you know what # you're doing. Vagrant.configure(2) do |config| # defaults VM_MEMORY = ENV['VM_MEMORY'] || "8192" VM_CPUS = ENV['VM_CPUS'] || "1" # The most common configuration options are documented and commented below. # For a complete reference, please see the online documentation at # https://docs.vagrantup.com. # Every Vagrant development environment requires a box. You can search for # boxes at https://atlas.hashicorp.com/search. config.vm.box = "ubuntu/xenial64" # Disable automatic box update checking. If you disable this, then # boxes will only be checked for updates when the user runs # `vagrant box outdated`. This is not recommended. # config.vm.box_check_update = false # Create a forwarded port mapping which allows access to a specific port # within the machine from a port on the host machine. In the example below, # accessing "localhost:8080" will access port 80 on the guest machine. #config.vm.network "forwarded_port", guest: 80, host: 8080 # Create a private network, which allows host-only access to the machine # using a specific IP. # config.vm.network "private_network", ip: "192.168.33.10" # Create a public network, which generally matched to bridged network. # Bridged networks make the machine appear as another physical device on # your network. # config.vm.network "public_network" # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. # config.vm.synced_folder "../data", "/vagrant_data" # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. # Example for VirtualBox: # config.vm.provider "virtualbox" do |vb| # Display the VirtualBox GUI when booting the machine vb.gui = true # Customize the amount of memory on the VM: vb.memory = VM_MEMORY vb.cpus = VM_CPUS end config.vm.provider "libvirt" do |lb, config| config.vm.box = "celebdor/xenial64" config.vm.synced_folder './', '/vagrant', type: 'rsync' lb.nested = true lb.memory = VM_MEMORY lb.cpus = VM_CPUS lb.suspend_mode = 'managedsave' end # # View the documentation for the provider you are using for more # information on available options # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies # such as FTP and Heroku are also available. See the documentation at # https://docs.vagrantup.com/v2/push/atlas.html for more information. # config.push.define "atlas" do |push| # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" # end # Enable provisioning with a shell script. Additional provisioners such as # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the # documentation for more information about their specific syntax and use. config.vm.provision "shell", privileged: false, inline: <<-SHELL #!/usr/bin/env bash sudo apt-get update sudo apt-get -y upgrade sudo apt-get -y install git git clone https://opendev.org/openstack/devstack cp /vagrant/local.* /vagrant/webserver.sh ~/devstack cd ~/devstack ./stack.sh SHELL end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/singlenode/local.conf0000664000175000017500000000447200000000000022541 0ustar00zuulzuul00000000000000# Sample ``local.conf`` that builds a devstack with neutron LBaaS Version 2 # NOTE: Copy this file to the root DevStack directory for it to work properly. # ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no # value has already been set; this lets ``local.conf`` effectively override the # default values. # The ``localrc`` section replaces the old ``localrc`` configuration file. # Note that if ``localrc`` is present it will be used in favor of this section. [[local|localrc]] # The name of the RECLONE environment variable is a bit misleading. It doesn't actually # reclone repositories, rather it uses git fetch to make sure the repos are current. RECLONE=True # Load the external Octavia plugin. enable_plugin barbican https://opendev.org/openstack/barbican enable_plugin neutron https://opendev.org/openstack/neutron enable_plugin octavia https://opendev.org/openstack/octavia enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard LIBS_FROM_GIT+=python-octaviaclient DATABASE_PASSWORD=password ADMIN_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password RABBIT_PASSWORD=password # Enable Logging LOGFILE=$DEST/logs/stack.sh.log VERBOSE=True LOG_COLOR=True # Pre-requisites enable_service rabbit enable_service mysql enable_service key # Horizon enable_service horizon # Nova enable_service n-api enable_service n-cpu enable_service n-cond enable_service n-sch # Placement service needed for Nova enable_service placement-api enable_service placement-client # Glance enable_service g-api enable_service g-reg # Neutron enable_service neutron enable_service neutron-api enable_service neutron-agent enable_service neutron-dhcp enable_service neutron-l3 enable_service neutron-metadata-agent enable_service neutron-qos # Octavia enable_service octavia enable_service o-cw enable_service o-hm enable_service o-hk enable_service o-api enable_service o-da # enable DVR NEUTRON_CORE_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan Q_DVR_MODE=dvr_snat LOGFILE=$DEST/logs/stack.sh.log # Old log files are automatically removed after 7 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/singlenode/local.sh0000775000175000017500000001027600000000000022230 0ustar00zuulzuul00000000000000#!/usr/bin/env bash set -ex # Sample ``local.sh`` that configures two simple webserver instances and sets # up a Neutron LBaaS Version 2 loadbalancer backed by Octavia. # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) BOOT_DELAY=60 # Import common functions source ${TOP_DIR}/functions # Use openrc + stackrc for settings source ${TOP_DIR}/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} # Polling functions function wait_for_loadbalancer_active { lb_name=$1 while [ $(openstack loadbalancer show $lb_name -f value -c provisioning_status) != "ACTIVE" ]; do sleep 2 done } if is_service_enabled nova; then # Unset DOMAIN env variables that are not needed for keystone v2 and set OpenStack demo user auth unset OS_USER_DOMAIN_ID unset OS_PROJECT_DOMAIN_ID source ${TOP_DIR}/openrc demo demo # Create an SSH key to use for the instances DEVSTACK_LBAAS_SSH_KEY_NAME=DEVSTACK_LBAAS_SSH_KEY_RSA DEVSTACK_LBAAS_SSH_KEY_DIR=${TOP_DIR} DEVSTACK_LBAAS_SSH_KEY=${DEVSTACK_LBAAS_SSH_KEY_DIR}/${DEVSTACK_LBAAS_SSH_KEY_NAME} rm -f ${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY} ssh-keygen -b 2048 -t rsa -f ${DEVSTACK_LBAAS_SSH_KEY} -N "" openstack keypair create --public-key=${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY_NAME} # Add tcp/22,80 and icmp to default security group openstack security group rule create --protocol tcp --dst-port 22:22 default openstack security group rule create --protocol tcp --dst-port 80:80 default openstack security group rule create --protocol icmp default # Boot some instances NOVA_BOOT_ARGS="--key-name ${DEVSTACK_LBAAS_SSH_KEY_NAME} --image $(openstack image show cirros-0.3.5-x86_64-disk -f value -c id) --flavor 1 --nic net-id=$(openstack network show private -f value -c id)" openstack server create ${NOVA_BOOT_ARGS} node1 openstack server create ${NOVA_BOOT_ARGS} node2 echo "Waiting ${BOOT_DELAY} seconds for instances to boot" sleep ${BOOT_DELAY} IP1=$(openstack server show node1 | awk '/private/ {ip = substr($4, 9, length($4)-9) ; if (ip ~ "\\.") print ip ; else print $5}') IP2=$(openstack server show node2 | awk '/private/ {ip = substr($4, 9, length($4)-9) ; if (ip ~ "\\.") print ip ; else print $5}') touch ~/.ssh/known_hosts ssh-keygen -R ${IP1} ssh-keygen -R ${IP2} # Get Neutron router namespace details NAMESPACE_NAME='qrouter-'$(openstack router show router1 -f value -c id) NAMESPACE_CMD_PREFIX='sudo ip netns exec' # Run a simple web server on the instances chmod 0755 ${TOP_DIR}/webserver.sh $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP1}:webserver.sh $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP2}:webserver.sh $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME ssh -o UserKnownHostsFile=/dev/null -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no -q cirros@${IP1} "screen -d -m sh webserver.sh" $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME ssh -o UserKnownHostsFile=/dev/null -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no -q cirros@${IP2} "screen -d -m sh webserver.sh" fi if is_service_enabled octavia; then SUBNET_ID=$(openstack subnet show private-subnet -f value -c id) openstack loadbalancer create --name lb1 --vip-subnet-id $SUBNET_ID wait_for_loadbalancer_active lb1 openstack loadbalancer listener create lb1 --protocol HTTP --protocol-port 80 --name listener1 wait_for_loadbalancer_active lb1 openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 wait_for_loadbalancer_active lb1 openstack loadbalancer member create --subnet-id $SUBNET_ID --address ${IP1} --protocol-port 80 pool1 wait_for_loadbalancer_active lb1 openstack loadbalancer member create --subnet-id $SUBNET_ID --address ${IP2} --protocol-port 80 pool1 fi echo "How to test load balancing:" echo "" echo "${NAMESPACE_CMD_PREFIX} ${NAMESPACE_NAME} curl $(openstack loadbalancer show lb1 -f value -c vip_address)" echo "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/samples/singlenode/webserver.sh0000664000175000017500000000042600000000000023133 0ustar00zuulzuul00000000000000#!/bin/sh MYIP=$(/sbin/ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}'); OUTPUT_STR="Welcome to $MYIP\r" OUTPUT_LEN=${#OUTPUT_STR} while true; do echo -e "HTTP/1.0 200 OK\r\nContent-Length: ${OUTPUT_LEN}\r\n\r\n${OUTPUT_STR}" | sudo nc -l -p 80 done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/settings0000664000175000017500000001330700000000000016565 0ustar00zuulzuul00000000000000# settings for octavia devstack plugin OCTAVIA=${OCTAVIA:-"octavia"} OCTAVIA_DIR=${OCTAVIA_DIR:-"${DEST}/octavia"} DISKIMAGE_BUILDER_REPO_URL=${DISKIMAGE_BUILDER_REPO_URL:-${GIT_BASE}/openstack/diskimage-builder.git} DISKIMAGE_BUILDER_REPO_REF=${DISKIMAGE_BUILDER_REPO_REF:-master} DISKIMAGE_BUILDER_DIR=$DEST/diskimage-builder OCTAVIA_BIN_DIR=${OCTAVIA_BIN_DIR:-$(get_python_exec_prefix)} OCTAVIA_CONF_DIR=${OCTAVIA_CONF_DIR:-"/etc/octavia"} OCTAVIA_SSH_DIR=${OCTAVIA_SSH_DIR:-${OCTAVIA_CONF_DIR}/.ssh} OCTAVIA_CERTS_DIR=${OCTAVIA_CERTS_DIR:-${OCTAVIA_CONF_DIR}/certs} # This needs to be under /etc/dhcp for apparmor # See https://storyboard.openstack.org/#!/story/1673269 OCTAVIA_DHCLIENT_DIR=${OCTAVIA_DHCLIENT_DIR:-"/etc/dhcp/octavia"} OCTAVIA_DHCLIENT_CONF=${OCTAVIA_DHCLIENT_CONF:-${OCTAVIA_DHCLIENT_DIR}/dhclient.conf} OCTAVIA_CONF=${OCTAVIA_CONF:-${OCTAVIA_CONF_DIR}/octavia.conf} OCTAVIA_AUDIT_MAP=${OCTAVIA_AUDIT_MAP:-${OCTAVIA_CONF_DIR}/octavia_api_audit_map.conf} OCTAVIA_RUN_DIR=${OCTAVIA_RUN_DIR:-"/var/run/octavia"} OCTAVIA_AMPHORA_DRIVER=${OCTAVIA_AMPHORA_DRIVER:-"amphora_haproxy_rest_driver"} OCTAVIA_NETWORK_DRIVER=${OCTAVIA_NETWORK_DRIVER:-"allowed_address_pairs_driver"} OCTAVIA_COMPUTE_DRIVER=${OCTAVIA_COMPUTE_DRIVER:-"compute_nova_driver"} OCTAVIA_VOLUME_DRIVER=${OCTAVIA_VOLUME_DRIVER:-"volume_noop_driver"} OCTAVIA_USERNAME=${OCTAVIA_ADMIN_USER:-"admin"} OCTAVIA_PASSWORD=${OCTAVIA_PASSWORD:-${ADMIN_PASSWORD}} OCTAVIA_PROJECT_NAME=${OCTAVIA_PROJECT_NAME:-$OCTAVIA_USERNAME} OCTAVIA_USER_DOMAIN_NAME=${OCTAVIA_USER_DOMAIN_NAME:-"Default"} OCTAVIA_PROJECT_DOMAIN_NAME=${OCTAVIA_PROJECT_DOMAIN_NAME:-"Default"} OCTAVIA_PROTOCOL=${OCTAVIA_PROTOCOL:-$SERVICE_PROTOCOL} OCTAVIA_PORT=${OCTAVIA_PORT:-"9876"} OCTAVIA_HA_PORT=${OCTAVIA_HA_PORT:-"9875"} OCTAVIA_HM_LISTEN_PORT=${OCTAVIA_HM_LISTEN_PORT:-"5555"} OCTAVIA_ENABLE_AMPHORAV2_PROVIDER=${OCTAVIA_ENABLE_AMPHORAV2_PROVIDER:-False} OCTAVIA_MGMT_SUBNET=${OCTAVIA_MGMT_SUBNET:-"192.168.0.0/24"} OCTAVIA_MGMT_SUBNET_START=${OCTAVIA_MGMT_SUBNET_START:-"192.168.0.2"} OCTAVIA_MGMT_SUBNET_END=${OCTAVIA_MGMT_SUBNET_END:-"192.168.0.200"} OCTAVIA_MGMT_SUBNET_IPV6=${OCTAVIA_MGMT_SUBNET_IPV6:-"fd00:0:0:42::/64"} OCTAVIA_MGMT_SUBNET_IPV6_START=${OCTAVIA_MGMT_SUBNET_IPV6_START:-"fd00:0:0:42:0:0:0:2"} OCTAVIA_MGMT_SUBNET_IPV6_END=${OCTAVIA_MGMT_SUBNET_IPV6_END:-"fd00:0:0:42:ffff:ffff:ffff:ffff"} OCTAVIA_AMP_SSH_KEY_BITS=${OCTAVIA_SSH_KEY_BITS:-"2048"} OCTAVIA_AMP_SSH_KEY_TYPE=${OCTAVIA_SSH_KEY_TYPE:-"rsa"} OCTAVIA_AMP_SSH_KEY_PATH=${OCTAVIA_SSH_KEY_PATH:-${OCTAVIA_SSH_DIR}/octavia_ssh_key} OCTAVIA_AMP_SSH_KEY_NAME=${OCTAVIA_AMP_SSH_KEY_NAME:-"octavia_ssh_key"} OCTAVIA_AMP_FLAVOR_ID=${OCTAVIA_AMP_FLAVOR_ID:-"10"} OCTAVIA_AMP_IMAGE_NAME=${OCTAVIA_AMP_IMAGE_NAME:-"amphora-x64-haproxy"} OCTAVIA_AMP_IMAGE_FILE=${OCTAVIA_AMP_IMAGE_FILE:-${OCTAVIA_DIR}/diskimage-create/${OCTAVIA_AMP_IMAGE_NAME}.qcow2} OCTAVIA_AMP_IMAGE_TAG="amphora" OCTAVIA_AMP_CONN_TIMEOUT=${OCTAVIA_AMP_CONN_TIMEOUT:-"10"} OCTAVIA_AMP_READ_TIMEOUT=${OCTAVIA_AMP_READ_TIMEOUT:-"120"} OCTAVIA_AMP_LOG_ADMIN_PORT=${OCTAVIA_AMP_LOG_ADMIN_PORT:="10514"} OCTAVIA_AMP_LOG_TENANT_PORT=${OCTAVIA_AMP_LOG_TENANT_PORT:="20514"} OCTAVIA_HEALTH_KEY=${OCTAVIA_HEALTH_KEY:-"insecure"} OCTAVIA_LB_TOPOLOGY=${OCTAVIA_LB_TOPOLOGY:-"SINGLE"} OCTAVIA_AMP_EXPIRY_AGE=${OCTAVIA_AMP_EXPIRY_AGE:-"3600"} OCTAVIA_LB_EXPIRY_AGE=${OCTAVIA_LB_EXPIRY_AGE:-"3600"} OCTAVIA_USE_MOD_WSGI=${OCTAVIA_USE_MOD_WSGI:-True} OCTAVIA_API_BINARY=${OCTAVIA_API_BINARY:-${OCTAVIA_BIN_DIR}/octavia-api} OCTAVIA_CONSUMER_BINARY=${OCTAVIA_CONSUMER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-worker} OCTAVIA_HOUSEKEEPER_BINARY=${OCTAVIA_HOUSEKEEPER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-housekeeping} OCTAVIA_HEALTHMANAGER_BINARY=${OCTAVIA_HEALTHMANAGER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-health-manager} OCTAVIA_DRIVER_AGENT_BINARY=${OCTAVIA_DRIVER_AGENT_BINARY:-${OCTAVIA_BIN_DIR}/octavia-driver-agent} OCTAVIA_API_ARGS=${OCTAVIA_API_ARGS:-" --config-file $OCTAVIA_CONF"} OCTAVIA_CONSUMER_ARGS=${OCTAVIA_CONSUMER_ARGS:-" --config-file $OCTAVIA_CONF"} OCTAVIA_HOUSEKEEPER_ARGS=${OCTAVIA_HOUSEKEEPER_ARGS:-" --config-file $OCTAVIA_CONF"} OCTAVIA_HEALTHMANAGER_ARGS=${OCTAVIA_HEALTHMANAGER_ARGS:-" --config-file $OCTAVIA_CONF"} OCTAVIA_DRIVER_AGENT_ARGS=${OCTAVIA_DRIVER_AGENT_ARGS:-" --config-file $OCTAVIA_CONF"} OCTAVIA_API="o-api" OCTAVIA_CONSUMER="o-cw" OCTAVIA_HOUSEKEEPER="o-hk" OCTAVIA_HEALTHMANAGER="o-hm" OCTAVIA_SERVICE="octavia" OCTAVIA_API_HAPROXY="o-api-ha" OCTAVIA_DRIVER_AGENT="o-da" # Client settings GITREPO["python-octaviaclient"]=${OCTAVIACLIENT_REPO:-${GIT_BASE}/openstack/python-octaviaclient.git} GITBRANCH["python-octaviaclient"]=${OCTAVIACLIENT_BRANCH:-master} GITDIR["python-octaviaclient"]=$DEST/python-octaviaclient # Library settings GITREPO["octavia-lib"]=${OCTAVIA_LIB_REPO:-${GIT_BASE}/openstack/octavia-lib.git} GITBRANCH["octavia-lib"]=${OCTAVIA_LIB_BRANCH:-master} GITDIR["octavia-lib"]=$DEST/octavia-lib NEUTRON_ANY=${NEUTRON_ANY:-"q-svc neutron-api"} # HA-deployment related settings OCTAVIA_USE_PREGENERATED_SSH_KEY=${OCTAVIA_USE_PREGENERATED_SSH_KEY:-"False"} OCTAVIA_PREGENERATED_SSH_KEY_PATH=${OCTAVIA_PREGENERATED_SSH_KEY_PATH:-"${OCTAVIA_DIR}/devstack/pregenerated/ssh-keys/octavia_ssh_key"} OCTAVIA_USE_PREGENERATED_CERTS=${OCTAVIA_USE_PREGENERATED_CERTS:-"False"} OCTAVIA_PREGENERATED_CERTS_DIR=${OCTAVIA_PREGENERATED_CERTS_DIR:-"${OCTAVIA_DIR}/devstack/pregenerated/certs"} OCTAVIA_NODE=${OCTAVIA_NODE:-"standalone"} OCTAVIA_CONTROLLER_IP_PORT_LIST=${OCTAVIA_CONTROLLER_IP_PORT_LIST:-"auto"} OCTAVIA_MGMT_PORT_IP=${OCTAVIA_MGMT_PORT_IP:-"auto"} OCTAVIA_DIB_TRACING=${OCTAVIA_DIB_TRACING:-"1"} OCTAVIA_SERVICE_TYPE="load-balancer" OCTAVIA_UWSGI_APP=${OCTAVIA_UWSGI_APP:-${OCTAVIA_BIN_DIR}/octavia-wsgi} OCTAVIA_UWSGI_CONF=${OCTAVIA_UWSGI_CONF:-${OCTAVIA_CONF_DIR}/octavia-uwsgi.ini} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/devstack/upgrade/0000775000175000017500000000000000000000000016425 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/upgrade/resources.sh0000775000175000017500000001166100000000000021003 0ustar00zuulzuul00000000000000#!/bin/bash set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $TOP_DIR/openrc admin demo set -o xtrace OCTAVIA_GRENADE_DIR=$(dirname $0) INSTANCE_USER_DATA_FILE=$OCTAVIA_GRENADE_DIR/vm_user_data.sh DEFAULT_INSTANCE_FLAVOR=${DEFAULT_INSTANCE_FLAVOR:-m1.tiny} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} # $1: desired provisioning_status # $2: desired operating_status # $3..n: command with arguments and parameters # TODO(cgoncalves): set timeout function _wait_for_status { while true; do eval $("${@:3}" -f shell -c provisioning_status -c operating_status) [[ $operating_status == "ONLINE" && $provisioning_status == "ACTIVE" ]] && break if [ $provisioning_status == "ERROR" ]; then die $LINENO "ERROR creating load balancer" fi sleep 10 done } function create { # TODO(cgoncalves): make create idempotent for resiliancy in testing # NOTE(cgoncalves): OS_USERNAME=demo is set to overcome security group name collision sc_rule_id=$(OS_USERNAME=demo openstack security group rule create -f value -c id --protocol tcp --ingress --dst-port 80 default) resource_save octavia sc_rule_id $sc_rule_id # create VMs vm1_ips=$(openstack server create -f value -c addresses --user-data $INSTANCE_USER_DATA_FILE --flavor $DEFAULT_INSTANCE_FLAVOR --image $DEFAULT_IMAGE_NAME --network $PRIVATE_NETWORK_NAME --wait vm1) vm2_ips=$(openstack server create -f value -c addresses --user-data $INSTANCE_USER_DATA_FILE --flavor $DEFAULT_INSTANCE_FLAVOR --image $DEFAULT_IMAGE_NAME --network $PRIVATE_NETWORK_NAME --wait vm2) vm1_ipv4=$(echo $vm1_ips | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+') vm2_ipv4=$(echo $vm2_ips | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+') openstack loadbalancer create --name lb1 --vip-subnet-id $PUBLIC_SUBNET_NAME _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer listener show listener1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer pool show pool1 openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path / --name hm1 pool1 _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer healthmonitor show hm1 openstack loadbalancer member create --subnet-id $PRIVATE_SUBNET_NAME --address $vm1_ipv4 --protocol-port 80 pool1 --name member1 _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer member show pool1 member1 openstack loadbalancer member create --subnet-id $PRIVATE_SUBNET_NAME --address $vm2_ipv4 --protocol-port 80 pool1 --name member2 _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer member show pool1 member2 lb_vip_ip=$(openstack loadbalancer show -f value -c vip_address lb1) resource_save octavia lb_vip_ip $lb_vip_ip echo "Octavia create: SUCCESS" } function verify { # verify control plane openstack loadbalancer show -f value -c operating_status lb1 | grep -q ONLINE openstack loadbalancer listener show -f value -c operating_status listener1 | grep -q ONLINE openstack loadbalancer pool show -f value -c operating_status pool1 | grep -q ONLINE openstack loadbalancer healthmonitor show -f value -c operating_status hm1 | grep -q ONLINE openstack loadbalancer member show -f value -c operating_status pool1 member1 | grep -q ONLINE openstack loadbalancer member show -f value -c operating_status pool1 member2 | grep -q ONLINE # verify data plane lb_vip_ip=$(resource_get octavia lb_vip_ip) curl --include -D lb.out $lb_vip_ip grep -q "^HTTP/1.1 200 OK" lb.out echo "Octavia verify: SUCCESS" } function verify_noapi { # verify data plane lb_vip_ip=$(resource_get octavia lb_vip_ip) curl --include -D lb.out $lb_vip_ip grep -q "^HTTP/1.1 200 OK" lb.out echo "Octavia verify_noapi: SUCCESS" } function destroy { sc_rule_id=$(resource_get octavia sc_rule_id) # make destroy idempotent for resiliancy in testing openstack loadbalancer show lb1 && openstack loadbalancer delete --cascade lb1 openstack server show vm1 && openstack server delete vm1 openstack server show vm2 && openstack server delete vm2 openstack security group rule show $sc_rule_id && openstack security group rule delete $sc_rule_id echo "Octavia destroy: SUCCESS" } # Dispatcher case $1 in "create") create ;; "verify_noapi") verify_noapi ;; "verify") verify ;; "destroy") destroy ;; "force_destroy") set +o errexit destroy ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/upgrade/settings0000664000175000017500000000015600000000000020212 0ustar00zuulzuul00000000000000register_project_for_upgrade octavia register_db_to_save octavia BASE_RUN_SMOKE=False TARGET_RUN_SMOKE=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/upgrade/shutdown.sh0000775000175000017500000000111200000000000020632 0ustar00zuulzuul00000000000000#!/bin/bash set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions # We need base DevStack functions for this source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache source $BASE_DEVSTACK_DIR/lib/neutron OCTAVIA_DEVSTACK_DIR=$(dirname $(dirname $0)) source $OCTAVIA_DEVSTACK_DIR/settings source $OCTAVIA_DEVSTACK_DIR/plugin.sh set -o xtrace octavia_stop # sanity check that service is actually down ensure_services_stopped o-api o-cw o-hk o-hm ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/upgrade/upgrade.sh0000775000175000017500000000400700000000000020414 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-octavia`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "********************************************************************" echo "ERROR: Abort $0" echo "********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade octavia # ============ # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/apache source $TARGET_DEVSTACK_DIR/lib/tls source $(dirname $(dirname $BASH_SOURCE))/settings source $(dirname $(dirname $BASH_SOURCE))/plugin.sh # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace # Save current config files for posterity [[ -d $SAVE_DIR/etc.octavia ]] || cp -pr $OCTAVIA_CONF_DIR $SAVE_DIR/etc.octavia # Install the target octavia octavia_lib_install octavia_install # calls upgrade-octavia for specific release upgrade_project octavia $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database octavia-db-manage upgrade head || die $LINO "DB migration error" octavia_start # Don't succeed unless the services come up ensure_services_started o-api o-cw o-hm o-hk set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/devstack/upgrade/vm_user_data.sh0000775000175000017500000000023100000000000021431 0ustar00zuulzuul00000000000000#!/bin/sh -v Body=$(hostname) Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body" while true; do echo -e $Response | nc -llp 80 done ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/diskimage-create/0000775000175000017500000000000000000000000016370 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/diskimage-create/README.rst0000664000175000017500000003074000000000000020063 0ustar00zuulzuul00000000000000=============================== Building Octavia Amphora Images =============================== Octavia is an operator-grade reference implementation for Load Balancing as a Service (LBaaS) for OpenStack. The component of Octavia that does the load balancing is known as amphora. Amphora may be a virtual machine, may be a container, or may run on bare metal. Creating images for bare metal amphora installs is outside the scope of this version but may be added in a future release. Prerequisites ============= Python pip should be installed as well as the python modules found in the requirements.txt file. To do so, you can use the following command on Ubuntu: .. code:: bash $ # Install python pip $ sudo apt install python-pip $ # Eventually create a virtualenv $ sudo apt install python-virtualenv $ virtualenv octavia_disk_image_create $ source octavia_disk_image_create/bin/activate $ # Install octavia requirements $ cd octavia/diskimage-create $ pip install -r requirements.txt Your cache directory should have at least 1GB available, the working directory will need ~1.5GB, and your image destination will need ~500MB The script will use the version of diskimage-builder installed on your system, or it can be overridden by setting the following environment variables: .. code-block:: bash DIB_REPO_PATH = //diskimage-builder DIB_ELEMENTS = //diskimage-builder/elements The following packages are required on each platform: Ubuntu .. code:: bash $ sudo apt install qemu-utils git kpartx debootstrap Fedora, CentOS and Red Hat Enterprise Linux .. code:: bash $ sudo dnf install qemu-img git e2fsprogs policycoreutils-python-utils Test Prerequisites ------------------ The tox image tests require libguestfs-tools 1.24 or newer. Libguestfs allows testing the Amphora image without requiring root privileges. On Ubuntu systems you also need to give read access to the kernels for the user running the tests: .. code:: bash $ sudo chmod 0644 /boot/vmlinuz* Usage ===== This script and associated elements will build Amphora images. Current support is with an Ubuntu base OS and HAProxy. The script can use Fedora as a base OS but these will not initially be tested or supported. As the project progresses and/or the diskimage-builder project adds support for additional base OS options they may become available for Amphora images. This does not mean that they are necessarily supported or tested. .. note:: If your cloud has multiple hardware architectures available to nova, remember to set the appropriate hw_architecture property on the image when you load it into glance. For example, when loading an amphora image built for "amd64" you would add "--property hw_architecture='x86_64'" to your "openstack image create" command line. The script will use environment variables to customize the build beyond the Octavia project defaults, such as adding elements. The supported and tested image is created by using the diskimage-create.sh defaults (no command line parameters or environment variables set). As the project progresses we may add additional supported configurations. Command syntax: .. code-block:: $ diskimage-create.sh [-a i386 | **amd64** | armhf | ppc64le ] [-b **haproxy** ] [-c **~/.cache/image-create** | ] [-d **bionic**/**8** | ] [-e] [-f] [-g **repository branch** | stable/train | stable/stein | ... ] [-h] [-i **ubuntu-minimal** | fedora | centos-minimal | rhel ] [-k ] [-l ] [-n] [-o **amphora-x64-haproxy** | ] [-p] [-r ] [-s **2** | ] [-t **qcow2** | tar ] [-v] [-w ] [-x] '-a' is the architecture type for the image (default: amd64) '-b' is the backend type (default: haproxy) '-c' is the path to the cache directory (default: ~/.cache/image-create) '-d' distribution release id (default on ubuntu: bionic) '-e' enable complete mandatory access control systems when available (default: permissive) '-f' disable tmpfs for build '-g' build the image for a specific OpenStack Git branch (default: current repository branch) '-h' display help message '-i' is the base OS (default: ubuntu-minimal) '-k' is the kernel meta package name, currently only for ubuntu-minimal base OS (default: linux-image-virtual) '-l' is output logfile (default: none) '-n' disable sshd (default: enabled) '-o' is the output image file name '-p' install amphora-agent from distribution packages (default: disabled)" '-r' enable the root account in the generated image (default: disabled) '-s' is the image size to produce in gigabytes (default: 2) '-t' is the image type (default: qcow2) '-v' display the script version '-w' working directory for image building (default: .) '-x' enable tracing for diskimage-builder Building Images for Alternate Branches ====================================== By default, the diskimage-create.sh script will build an amphora image using the Octavia Git branch of the repository. If you need an image for a specific branch, such as "stable/train", you need to specify the "-g" option with the branch name. An example for "stable/train" would be: .. code-block:: bash diskimage-create.sh -g stable/train Advanced Git Branch/Reference Based Images ------------------------------------------ If you need to build an image from a local repository or with a specific Git reference or branch, you will need to set some environment variables for diskimage-builder. .. note:: These advanced settings will override the "-g" diskimage-create.sh setting. Building From a Local Octavia Repository ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set the DIB_REPOLOCATION_amphora_agent variable to the location of the Git repository containing the amphora agent: .. code-block:: bash export DIB_REPOLOCATION_amphora_agent=/opt/stack/octavia Building With a Specific Git Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set the DIB_REPOREF_amphora_agent variable to point to the Git branch or reference of the amphora agent: .. code-block:: bash export DIB_REPOREF_amphora_agent=refs/changes/40/674140/7 See the `Environment Variables`_ section below for additional information and examples. Amphora Agent Upper Constraints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You may also need to specify which version of the OpenStack upper-constraints.txt file will be used to build the image. For example, to specify the "stable/train" upper constraints Git branch, set the following environment variable: .. code-block:: bash export DIB_REPOLOCATION_upper_constraints=https://opendev.org/openstack/requirements/raw/branch/stable/train/upper-constraints.txt See `Dependency Management for OpenStack Projects `_ for more information. Environment Variables ===================== These are optional environment variables that can be set to override the script defaults. DIB_REPOLOCATION_amphora_agent - Location of the amphora-agent code that will be installed in the image. - Default: https://opendev.org/openstack/octavia - Example: /tmp/octavia DIB_REPOREF_amphora_agent - The Git reference to checkout for the amphora-agent code inside the image. - Default: The current branch - Example: stable/stein - Example: refs/changes/40/674140/7 DIB_REPOLOCATION_upper_constraints - Location of the upper-constraints.txt file used for the image. - Default: The upper-constraints.txt for the current branch - Example: https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt - Example: https://opendev.org/openstack/requirements/raw/branch/stable/train/upper-constraints.txt CLOUD_INIT_DATASOURCES - Comma separated list of cloud-int datasources - Default: ConfigDrive - Options: NoCloud, ConfigDrive, OVF, MAAS, Ec2, - Reference: https://launchpad.net/cloud-init DIB_DISTRIBUTION_MIRROR - URL to a mirror for the base OS selected - Default: None DIB_ELEMENTS - Override the elements used to build the image - Default: None DIB_LOCAL_ELEMENTS - Elements to add to the build (requires DIB_LOCAL_ELEMENTS_PATH be specified) - Default: None DIB_LOCAL_ELEMENTS_PATH - Path to the local elements directory - Default: None DIB_REPO_PATH - Directory containing diskimage-builder - Default: /diskimage-builder - Reference: https://github.com/openstack/diskimage-builder OCTAVIA_REPO_PATH - Directory containing octavia - Default: - Reference: https://github.com/openstack/octavia Using distribution packages for amphora agent --------------------------------------------- By default, amphora agent is installed from Octavia Git repository. To use distribution packages, use the "-p" option. Note this needs a base system image with the required repositories enabled (for example RDO repositories for CentOS/Fedora). One of these variables must be set: DIB_LOCAL_IMAGE - Path to the locally downloaded image - Default: None DIB_CLOUD_IMAGES - Directory base URL to download the image from - Default: depends on the distribution RHEL specific variables ------------------------ Building a RHEL-based image requires: - a Red Hat Enterprise Linux KVM Guest Image, manually download from the Red Hat Customer Portal. Set the DIB_LOCAL_IMAGE variable to point to the file. More details at: /elements/rhel - a Red Hat subscription for the matching Red Hat OpenStack Platform repository if you want to install the amphora agent from the official distribution package (requires setting -p option in diskimage-create.sh). Set the needed registration parameters depending on your configuration. More details at: /elements/rhel-common Here is an example with Customer Portal registration and OSP 15 repository: .. code:: bash $ export DIB_LOCAL_IMAGE='/tmp/rhel-server-8.0-x86_64-kvm.qcow2' $ export REG_METHOD='portal' REG_REPOS='rhel-8-server-openstack-15-rpms' $ export REG_USER='' REG_PASSWORD='' REG_AUTO_ATTACH=true This example uses registration via a Satellite (the activation key must enable an OSP repository): .. code:: bash $ export DIB_LOCAL_IMAGE='/tmp/rhel-server-8.1-x86_64-kvm.qcow2' $ export REG_METHOD='satellite' REG_ACTIVATION_KEY="" $ export REG_SAT_URL="" REG_ORG="" Building in a virtualenv with tox --------------------------------- To make use of a virtualenv for Python dependencies you may run ``tox``. Note that you may still need to install binary dependencies on the host for the build to succeed. If you wish to customize your build modify ``tox.ini`` to pass on relevant environment variables or command line arguments to the ``diskimage-create.sh`` script. .. code:: bash $ tox -e build Container Support ================= The Docker command line required to import a tar file created with this script is: .. code:: bash $ docker import - image:amphora-x64-haproxy < amphora-x64-haproxy.tar References ========== This documentation and script(s) leverage prior work by the OpenStack TripleO and Sahara teams. Thank you to everyone that worked on them for providing a great foundation for creating Octavia Amphora images. * https://opendev.org/openstack/diskimage-builder * https://opendev.org/openstack/tripleo-image-elements * https://opendev.org/openstack/sahara-image-elements Copyright ========= Copyright 2014 Hewlett-Packard Development Company, L.P. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/diskimage-create/diskimage-create.sh0000775000175000017500000004333400000000000022134 0ustar00zuulzuul00000000000000#!/bin/bash # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # set -e usage() { echo echo "Usage: $(basename $0)" echo " [-a i386 | **amd64** | armhf | ppc64le]" echo " [-b **haproxy** ]" echo " [-c **~/.cache/image-create** | ]" echo " [-d **bionic**/**8** | ]" echo " [-e]" echo " [-f]" echo " [-g **repository branch** | stable/train | stable/stein | ... ]" echo " [-h]" echo " [-i **ubuntu-minimal** | fedora | centos-minimal | rhel ]" echo " [-k ]" echo " [-l ]" echo " [-n]" echo " [-o **amphora-x64-haproxy** | ]" echo " [-p]" echo " [-r ]" echo " [-s **2** | ]" echo " [-t **qcow2** | tar | vhd | raw ]" echo " [-v]" echo " [-w ]" echo " [-x]" echo echo " '-a' is the architecture type for the image (default: amd64)" echo " '-b' is the backend type (default: haproxy)" echo " '-c' is the path to the cache directory (default: ~/.cache/image-create)" echo " '-d' distribution release id (default on ubuntu: bionic)" echo " '-e' enable complete mandatory access control systems when available (default: permissive)" echo " '-f' disable tmpfs for build" echo " '-g' build the image for a specific OpenStack Git branch (default: current repository branch)" echo " '-h' display this help message" echo " '-i' is the base OS (default: ubuntu-minimal)" echo " '-k' is the kernel meta package name, currently only for ubuntu-minimal base OS (default: linux-image-virtual)" echo " '-l' is output logfile (default: none)" echo " '-n' disable sshd (default: enabled)" echo " '-o' is the output image file name" echo " '-p' install amphora-agent from distribution packages (default: disabled)" echo " '-r' enable the root account in the generated image (default: disabled)" echo " '-s' is the image size to produce in gigabytes (default: 2)" echo " '-t' is the image type (default: qcow2)" echo " '-v' display the script version" echo " '-w' working directory for image building (default: .)" echo " '-x' enable tracing for diskimage-builder" echo exit 1 } version() { echo "Amphora disk image creation script version:"\ "`cat $OCTAVIA_REPO_PATH/diskimage-create/version.txt`" exit 1 } find_system_elements() { # List of possible system installation directories local system_prefixes="/usr/share /usr/local/share" for prefix in $system_prefixes; do if [ -d $prefix/$1 ]; then echo $prefix/$1 return fi done } # Figure out where our directory is located if [ -z $OCTAVIA_REPO_PATH ]; then AMP_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) OCTAVIA_REPO_PATH=${OCTAVIA_REPO_PATH:-${AMP_DIR%/*}} fi dib_enable_tracing= AMP_LOGFILE="" while getopts "a:b:c:d:efg:hi:k:l:no:pt:r:s:vw:x" opt; do case $opt in a) AMP_ARCH=$OPTARG if [ $AMP_ARCH != "i386" ] && \ [ $AMP_ARCH != "amd64" ] && \ [ $AMP_ARCH != "ppc64le" ] && \ [ $AMP_ARCH != "armhf" ]; then echo "Error: Unsupported architecture " $AMP_ARCH " specified" exit 3 fi ;; b) if [ $OPTARG == "haproxy" ]; then AMP_BACKEND=$OPTARG-octavia else echo "Error: Unsupported backend type " $AMP_BACKEND " specified" exit 3 fi ;; c) AMP_CACHEDIR=$OPTARG ;; d) AMP_DIB_RELEASE=$OPTARG ;; e) AMP_ENABLE_FULL_MAC_SECURITY=1 ;; f) AMP_DISABLE_TMP_FS='--no-tmpfs' ;; g) if [ -z "$DIB_REPOREF_amphora_agent" ]; then echo "Building image with amphora agent from $OPTARG." export DIB_REPOREF_amphora_agent=$OPTARG else echo "Environment variable DIB_REPOREF_amphora_agent is set. Building the image with amphora agent $DIB_REPOREF_amphora_agent." fi if [ -z "$DIB_REPOLOCATION_upper_constraints" ]; then echo "Using upper constraints from https://opendev.org/openstack/requirements/raw/branch/$OPTARG/upper-constraints.txt." export DIB_REPOLOCATION_upper_constraints="https://opendev.org/openstack/requirements/raw/branch/$OPTARG/upper-constraints.txt" else echo "Environment variable DIB_REPOLOCATION_upper_constraints is set. Building the image with upper-constraints.txt from $DIB_REPOLOCATION_upper_constraints." fi ;; h) usage ;; i) AMP_BASEOS=$OPTARG if [ $AMP_BASEOS != "ubuntu" ] && \ [ $AMP_BASEOS != "ubuntu-minimal" ] && \ [ $AMP_BASEOS != "fedora" ] && \ [ $AMP_BASEOS != "centos" ] && \ [ $AMP_BASEOS != "centos-minimal" ] && \ [ $AMP_BASEOS != "rhel" ]; then echo "Error: Unsupported base OS " $AMP_BASEOS " specified" exit 3 fi if [ $AMP_BASEOS == "ubuntu" ]; then AMP_BASEOS="ubuntu-minimal" fi if [ $AMP_BASEOS == "centos" ]; then AMP_BASEOS="centos-minimal" fi ;; k) AMP_KERNEL=$OPTARG ;; l) AMP_LOGFILE="--logfile=$OPTARG" ;; n) AMP_DISABLE_SSHD=1 ;; o) AMP_OUTPUTFILENAME=$(readlink -f $OPTARG) amp_dir=$(dirname $AMP_OUTPUTFILENAME) if [ ! -d $amp_dir ]; then echo "Error: Directory $amp_dir does not exist" exit 3 fi ;; p) AMP_PACKAGE_INSTALL=1 ;; r) AMP_ROOTPW=$OPTARG ;; s) AMP_IMAGESIZE=$OPTARG if ! [[ $AMP_IMAGESIZE =~ ^[0-9]+$ ]]; then echo "Error: Invalid image size " $AMP_IMAGESIZE " specified" exit 3 fi ;; t) AMP_IMAGETYPE=$OPTARG if [ $AMP_IMAGETYPE != "qcow2" ] && \ [ $AMP_IMAGETYPE != "tar" ] && \ [ $AMP_IMAGETYPE != "vhd" ] && \ [ $AMP_IMAGETYPE != "raw" ]; then echo "Error: Unsupported image type " $AMP_IMAGETYPE " specified" exit 3 fi ;; v) version ;; w) AMP_WORKING_DIR=$OPTARG ;; x) dib_enable_tracing=1 ;; *) usage ;; esac done shift $((OPTIND-1)) if [ "$1" ]; then usage fi # Set the Octavia Amphora defaults if they aren't already set AMP_ARCH=${AMP_ARCH:-"amd64"} AMP_BACKEND=${AMP_BACKEND:-"haproxy-octavia"} AMP_CACHEDIR=${AMP_CACHEDIR:-"$HOME/.cache/image-create"} # Make sure we have an absolute path for the cache location mkdir -p $AMP_CACHEDIR AMP_CACHEDIR="$( cd "$AMP_CACHEDIR" && pwd )" AMP_BASEOS=${AMP_BASEOS:-"ubuntu-minimal"} if [ "$AMP_BASEOS" = "ubuntu-minimal" ]; then export DIB_RELEASE=${AMP_DIB_RELEASE:-"bionic"} elif [ "${AMP_BASEOS}" = "centos-minimal" ] || [ "${AMP_BASEOS}" = "rhel" ]; then export DIB_RELEASE=${AMP_DIB_RELEASE:-"8"} elif [ "${AMP_BASEOS}" = "fedora" ]; then export DIB_RELEASE=${AMP_DIB_RELEASE:-"28"} fi AMP_OUTPUTFILENAME=${AMP_OUTPUTFILENAME:-"$PWD/amphora-x64-haproxy"} AMP_IMAGETYPE=${AMP_IMAGETYPE:-"qcow2"} AMP_IMAGESIZE=${AMP_IMAGESIZE:-2} if [ "$AMP_BASEOS" = "ubuntu-minimal" ]; then export DIB_UBUNTU_KERNEL=${AMP_KERNEL:-"linux-image-virtual"} fi AMP_DISABLE_SSHD=${AMP_DISABLE_SSHD:-0} AMP_PACKAGE_INSTALL=${AMP_PACKAGE_INSTALL:-0} AMP_ENABLE_FULL_MAC_SECURITY=${AMP_ENABLE_FULL_MAC_SECURITY:-0} AMP_DISABLE_TMP_FS=${AMP_DISABLE_TMP_FS:-""} if [ "$AMP_BASEOS" = "rhel" -o "$AMP_BASEOS" = "centos-minimal" -o "$AMP_BASEOS" = "fedora" ] && [ "$AMP_IMAGESIZE" -lt 3 ]; then echo "RHEL/CentOS based amphora requires an image size of at least 3GB" exit 1 fi OCTAVIA_ELEMENTS_PATH=$OCTAVIA_REPO_PATH/elements if ! [ -d $OCTAVIA_ELEMENTS_PATH ]; then SYSTEM_OCTAVIA_ELEMENTS_PATH=$(find_system_elements octavia-image-elements) if [ -z ${SYSTEM_OCTAVIA_ELEMENTS_PATH} ]; then echo "ERROR: Octavia elements directory not found at: " $OCTAVIA_ELEMENTS_PATH " Exiting." exit 1 fi OCTAVIA_ELEMENTS_PATH=${SYSTEM_OCTAVIA_ELEMENTS_PATH} fi DIB_REPO_PATH=${DIB_REPO_PATH:-${OCTAVIA_REPO_PATH%/*}/diskimage-builder} if [ -d $DIB_REPO_PATH ]; then export PATH=$PATH:$DIB_REPO_PATH/bin else if ! disk-image-create --version > /dev/null 2>&1; then echo "ERROR: diskimage-builder repo directory not found at: " $DIB_REPO_PATH " or in path. Exiting." exit 1 fi fi # For system-wide installs, DIB will automatically find the elements, so we only check local path if [ "$DIB_LOCAL_ELEMENTS_PATH" ]; then export ELEMENTS_PATH=$OCTAVIA_ELEMENTS_PATH:$DIB_LOCAL_ELEMENTS_PATH else export ELEMENTS_PATH=$OCTAVIA_ELEMENTS_PATH fi export CLOUD_INIT_DATASOURCES=${CLOUD_INIT_DATASOURCES:-"ConfigDrive"} # Additional RHEL environment checks if [ "${AMP_BASEOS}" = "rhel" ]; then if [ -z "${DIB_LOCAL_IMAGE}" ]; then echo "DIB_LOCAL_IMAGE variable must be set and point to a RHEL base cloud image. Exiting." echo "For more information, see the README file in ${DIB_ELEMENTS_PATH}/elements/rhel" exit 1 fi fi # Find out what platform we are on if [ -e /etc/os-release ]; then platform=$(cat /etc/os-release | grep ^NAME= | sed -e 's/\(NAME="\)\(.*\)\("\)/\2/g') else platform=$(head -1 /etc/system-release | grep -e CentOS -e 'Red Hat Enterprise Linux' || :) if [ -z "$platform" ]; then echo -e "Unknown Host OS. Impossible to build images.\nAborting" exit 2 fi fi if [[ "$AMP_ROOTPW" ]] && [[ "$platform" != 'Ubuntu' ]] && ! [[ "$platform" =~ "Debian" ]]; then if [ "$(getenforce)" == "Enforcing" ]; then echo "A root password cannot be enabled for images built on this platform while SELinux is enabled." exit 1 fi fi if [ "$AMP_ROOTPW" ]; then echo "Warning: Using a root password in the image, NOT FOR PRODUCTION USAGE." fi # Make sure we have the required packages installed if [[ "$platform" = 'Ubuntu' || "$platform" =~ 'Debian' ]]; then PKG_LIST="qemu-utils git kpartx debootstrap" for pkg in $PKG_LIST; do if ! dpkg --get-selections 2> /dev/null | grep -q "^$pkg[[:space:]]*install$" >/dev/null; then echo "Required package " $pkg " is not installed. Exiting." echo "Binary dependencies on this platform are: ${PKG_LIST}" exit 1 fi done if [[ "$platform" = 'Ubuntu' ]]; then # Also check if we can build the BASEOS on this Ubuntu version UBUNTU_VERSION=`lsb_release -r | awk '{print $2}'` if [ "$AMP_BASEOS" != "ubuntu-minimal" ] && \ [ 1 -eq $(echo "$UBUNTU_VERSION < 16.04" | bc) ]; then echo "Ubuntu minimum version 16.04 required to build $AMP_BASEOS." echo "Earlier versions don't support the extended attributes required." exit 1 fi else # Check if we can build the BASEOS on this Debian version DEBIAN_VERSION=`lsb_release -r | awk '{print $2}'` # As minimal Ubuntu version is 14.04, for debian it is Debian 8 Jessie if [ "$AMP_BASEOS" != "ubuntu-minimal" ] && \ [ 1 -eq $(echo "$DEBIAN_VERSION < 8" | bc) ]; then echo "Debian minimum version 8 required to build $AMP_BASEOS." echo "Earlier versions don't support the extended attributes required." exit 1 fi fi elif [[ $platform =~ "SUSE" ]]; then # OpenSUSE # use rpm -q to check for qemu-tools and git-core PKG_LIST="qemu-tools git-core" for pkg in $PKG_LIST; do if ! rpm -q $pkg &> /dev/null; then echo "Required package " ${pkg/\*} " is not installed. Exiting." echo "Binary dependencies on this platform are: ${PKG_LIST}" exit 1 fi done elif [[ $platform =~ "Gentoo" ]]; then # Gentoo # Check /var/db for dev-vcs/git and app-emulation/[qemu|xen-tools] sys-fs/multipath-tools PKG_LIST="dev-vcs/git app-emulation/qemu|xen-tools sys-fs/multipath-tools" for pkg in $PKG_LIST; do if grep -qs '|' <<< "$pkg"; then c=$(cut -d / -f 1 <<<"$pkg") for p in $(cut -d / -f 2 <<<"$pkg" | tr "|" " "); do if [ -d /var/db/pkg/$c/$p-* ]; then continue 2 fi done echo "Required package " ${pkg/\*} " is not installed. Exiting." echo "Binary dependencies on this platform are: ${PKG_LIST}" exit 1 elif [ ! -d /var/db/pkg/$pkg-* ]; then echo "Required package " ${pkg/\*} " is not installed. Exiting." echo "Binary dependencies on this platform are: ${PKG_LIST}" exit 1 fi done else # fedora/centos/rhel # Actual qemu-img name may be qemu-img, qemu-img-ev, qemu-img-rhev, ... # "dnf|yum install qemu-img" works for all, but search requires wildcard PKG_LIST="qemu-img* git" for pkg in $PKG_LIST; do if ! rpm -qa $pkg ; then echo "Required package " ${pkg/\*} " is not installed. Exiting." echo "Binary dependencies on this platform are: ${PKG_LIST}" exit 1 fi done fi if [ "$AMP_WORKING_DIR" ]; then mkdir -p $AMP_WORKING_DIR TEMP=$(mktemp -d $AMP_WORKING_DIR/diskimage-create.XXXXXX) else TEMP=$(mktemp -d diskimage-create.XXXXXX) fi pushd $TEMP > /dev/null # Setup the elements list AMP_element_sequence=${AMP_element_sequence:-"base vm"} if [ "${AMP_BASEOS}" = "rhel" ] && [ "${DIB_RELEASE}" = "8" ]; then export DIB_INSTALLTYPE_pip_and_virtualenv=package fi AMP_element_sequence="$AMP_element_sequence ${AMP_BASEOS}" if [ "$AMP_PACKAGE_INSTALL" -eq 1 ]; then export DIB_INSTALLTYPE_amphora_agent=package else # We will need pip for amphora-agent AMP_element_sequence="$AMP_element_sequence pip-and-virtualenv" fi # Add our backend element (haproxy, etc.) AMP_element_sequence="$AMP_element_sequence $AMP_BACKEND" if [ "$AMP_ROOTPW" ]; then AMP_element_sequence="$AMP_element_sequence root-passwd" export DIB_PASSWORD=$AMP_ROOTPW fi # Add the Amphora Agent and Pyroute elements AMP_element_sequence="$AMP_element_sequence rebind-sshd" AMP_element_sequence="$AMP_element_sequence no-resolvconf" AMP_element_sequence="$AMP_element_sequence amphora-agent" AMP_element_sequence="$AMP_element_sequence sos" AMP_element_sequence="$AMP_element_sequence cloud-init-datasources" AMP_element_sequence="$AMP_element_sequence remove-default-ints" if [ "$AMP_ENABLE_FULL_MAC_SECURITY" -ne 1 ]; then # SELinux systems if [ "${AMP_BASEOS}" = "centos-minimal" ] || [ "${AMP_BASEOS}" = "fedora" ] || [ "${AMP_BASEOS}" = "rhel" ]; then AMP_element_sequence="$AMP_element_sequence selinux-permissive" fi fi # Disable the dnf makecache timer if [ "${AMP_BASEOS}" = "centos-minimal" ] || [ "${AMP_BASEOS}" = "fedora" ] || [ "${AMP_BASEOS}" = "rhel" ]; then AMP_element_sequence="$AMP_element_sequence disable-makecache" fi if [ "${AMP_BASEOS}" = "centos-minimal" ]; then export DIB_YUM_MINIMAL_CREATE_INTERFACES=0 fi # Add keepalived-octavia element AMP_element_sequence="$AMP_element_sequence keepalived-octavia" AMP_element_sequence="$AMP_element_sequence ipvsadmin" # Add pip-cache element AMP_element_sequence="$AMP_element_sequence pip-cache" # Add certificate ramfs element AMP_element_sequence="$AMP_element_sequence certs-ramfs" # Disable SSHD if requested if [ "$AMP_DISABLE_SSHD" -eq 1 ]; then AMP_element_sequence="$AMP_element_sequence remove-sshd" fi # Allow full elements override if [ "$DIB_ELEMENTS" ]; then AMP_element_sequence="$DIB_ELEMENTS" fi if [ "$DIB_LOCAL_ELEMENTS" ]; then AMP_element_sequence="$AMP_element_sequence $DIB_LOCAL_ELEMENTS" fi # Set Grub timeout to 0 (no timeout) for fast boot times export DIB_GRUB_TIMEOUT=${DIB_GRUB_TIMEOUT:-0} # Build the image export DIB_CLOUD_INIT_DATASOURCES=$CLOUD_INIT_DATASOURCES dib_trace_arg= if [ -n "$dib_enable_tracing" ]; then dib_trace_arg="-x" fi if [ "$USE_PYTHON3" = "False" ]; then export DIB_PYTHON_VERSION=2 fi disk-image-create $AMP_LOGFILE $dib_trace_arg -a $AMP_ARCH -o $AMP_OUTPUTFILENAME -t $AMP_IMAGETYPE --image-size $AMP_IMAGESIZE --image-cache $AMP_CACHEDIR $AMP_DISABLE_TMP_FS $AMP_element_sequence popd > /dev/null # out of $TEMP rm -rf $TEMP if [ -z "$DIB_REPOREF_amphora_agent" ]; then echo "Successfully built the amphora image using amphora-agent from the master branch." else echo "Successfully built the amphora using the $DIB_REPOREF_amphora_agent amphora-agent." fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/diskimage-create/image-tests.sh0000775000175000017500000000635300000000000021160 0ustar00zuulzuul00000000000000#!/bin/bash # # Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This file is necessary because tox cannot handle pipes in commands echo "Examining the Amphora image. This will take some time." if [ "$1" -a -f "$1" ]; then AMP_IMAGE_LOCATION=$1 elif [ "$1" ]; then AMP_IMAGE_LOCATION=$1/amphora-x64-haproxy.qcow2 else AMP_IMAGE_LOCATION=amphora-x64-haproxy.qcow2 fi if ! [ -f $AMP_IMAGE_LOCATION ]; then echo "ERROR: Amphora image not found at: $AMP_IMAGE_LOCATION" exit 1 fi # Check the image size (rounded in GB) AMP_IMAGE_SIZE=$(virt-filesystems --long --csv --blkdevs -a $AMP_IMAGE_LOCATION | \ awk -F ',' '$1 == "/dev/sda" { print int($3/1024^3 + 0.5)}') if [ $AMP_IMAGE_SIZE != 2 ]; then echo "ERROR: Amphora image did not pass the default size test" echo "On Ubuntu you may need to run 'sudo chmod 0644 /boot/vmlinuz*' for libguestfs" exit 1 else echo "Amphora image size is correct" fi # Get image information AMP_IMAGE_INFO=$(virt-inspector $AMP_IMAGE_LOCATION) # Check the kernel echo $AMP_IMAGE_INFO | \ virt-inspector --xpath \ '/operatingsystems/operatingsystem/distro' \ | grep -q 'ubuntu' if [ $? != 0 ]; then echo "ERROR: Amphora image is using the wrong default distribution" exit 1 else echo "Amphora image is using the correct distribution" fi echo $AMP_IMAGE_INFO | \ virt-inspector --xpath \ '/operatingsystems/operatingsystem/arch' \ | grep -q 'x86_64' if [ $? != 0 ]; then echo "ERROR: Amphora image is using the wrong default architecture" exit 1 else echo "Amphora image is using the correct architecture" fi echo $AMP_IMAGE_INFO | \ virt-inspector --xpath \ '/operatingsystems/operatingsystem/format' \ | grep -q 'installed' if [ $? != 0 ]; then echo "ERROR: Amphora image is in the wrong format (should be installed)" exit 1 else echo "Amphora image is using the correct format" fi # Check for HAProxy echo $AMP_IMAGE_INFO | \ virt-inspector --xpath \ '/operatingsystems/operatingsystem/applications/application/name[text()="haproxy"]' \ | grep -q 'haproxy' if [ $? != 0 ]; then echo "ERROR: Amphora image is missing the haproxy package" exit 1 else echo "HAProxy package found in the Amphora image" fi # Check for KeepAlived echo $AMP_IMAGE_INFO | \ virt-inspector --xpath \ '/operatingsystems/operatingsystem/applications/application/name[text()="keepalived"]' \ | grep -q 'keepalived' if [ $? != 0 ]; then echo "ERROR: Amphora image is missing the keepalived package" exit 1 else echo "keepalived package found in the Amphora image" fi echo "Amphora image looks good." exit 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/diskimage-create/requirements.txt0000664000175000017500000000006700000000000021657 0ustar00zuulzuul00000000000000Babel>=1.3 diskimage-builder>=2.24.0 PyYAML six>=1.9.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/diskimage-create/test-requirements.txt0000664000175000017500000000002600000000000022627 0ustar00zuulzuul00000000000000bashate doc8 Pygments ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/diskimage-create/tox.ini0000664000175000017500000000244600000000000017711 0ustar00zuulzuul00000000000000[tox] minversion = 1.8 envlist = bashate,docs,build,test skipsdist = True [testenv] basepython = python3 envdir = {toxworkdir}/venv setenv = VIRTUAL_ENV={envdir} passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY DIB_* CLOUD_INIT_DATASOURCES OCTAVIA_REPO_PATH install_command = pip install -U {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = virt-filesystems virt-inspector awk mkdir rm [testenv:docs] commands = doc8 README.rst ../elements/haproxy-octavia/README.rst ../elements/root-passwd/README.rst [testenv:bashate] commands = bashate diskimage-create.sh bashate image-tests.sh [testenv:build] # NOTE: specify cache directory explicitly with -c as the `diskimage-create.sh` # default is based off of `$HOME` which is not passed on in a `tox` environment. commands = ./diskimage-create.sh -o {toxinidir}/amphora-x64-haproxy -w {toxworkdir} -c {toxworkdir}/.cache [testenv:test] # Work around tox limitations with command pipes # https://bitbucket.org/hpk42/tox/issue/73/pipe-output-of-command-into-file commands = ./image-tests.sh {toxinidir}/.amp_tox_test rm -rf {toxinidir}/.amp_tox_test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/diskimage-create/version.txt0000664000175000017500000000000400000000000020610 0ustar00zuulzuul000000000000000.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/0000775000175000017500000000000000000000000013737 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/requirements.txt0000664000175000017500000000161200000000000017223 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Docs os-api-ref>=1.4.0 # Apache-2.0 sphinxcontrib-actdiag>=0.8.5 # BSD sphinxcontrib-apidoc>=0.2.1 # BSD sphinxcontrib-blockdiag>=1.5.4 # BSD sphinxcontrib-nwdiag>=0.9.5 # BSD sphinxcontrib-seqdiag>=0.8.4 # BSD docutils>=0.11 # OSI-Approved Open Source, Public Domain sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD graphviz!=0.5.0,>=0.4 # MIT License openstackdocstheme>=1.20.0 # Apache-2.0 sadisplay>=0.4.8 # BSD reno>=2.5.0 # Apache-2.0 sphinx-feature-classification>=0.2.0 # Apache-2.0 # PDF Docs sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD # This needs to be installed after above modules pydot>=1.2.4 # MIT License pydotplus>=2.0.2 # MIT License pyparsing>=2.1.0 # MIT networkx>=1.10 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/0000775000175000017500000000000000000000000015237 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/_static/0000775000175000017500000000000000000000000016665 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/_static/.placeholder0000664000175000017500000000000000000000000021136 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/admin/0000775000175000017500000000000000000000000016327 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/amphora-image-build.rst0000664000175000017500000003074000000000000022671 0ustar00zuulzuul00000000000000=============================== Building Octavia Amphora Images =============================== Octavia is an operator-grade reference implementation for Load Balancing as a Service (LBaaS) for OpenStack. The component of Octavia that does the load balancing is known as amphora. Amphora may be a virtual machine, may be a container, or may run on bare metal. Creating images for bare metal amphora installs is outside the scope of this version but may be added in a future release. Prerequisites ============= Python pip should be installed as well as the python modules found in the requirements.txt file. To do so, you can use the following command on Ubuntu: .. code:: bash $ # Install python pip $ sudo apt install python-pip $ # Eventually create a virtualenv $ sudo apt install python-virtualenv $ virtualenv octavia_disk_image_create $ source octavia_disk_image_create/bin/activate $ # Install octavia requirements $ cd octavia/diskimage-create $ pip install -r requirements.txt Your cache directory should have at least 1GB available, the working directory will need ~1.5GB, and your image destination will need ~500MB The script will use the version of diskimage-builder installed on your system, or it can be overridden by setting the following environment variables: .. code-block:: bash DIB_REPO_PATH = //diskimage-builder DIB_ELEMENTS = //diskimage-builder/elements The following packages are required on each platform: Ubuntu .. code:: bash $ sudo apt install qemu-utils git kpartx debootstrap Fedora, CentOS and Red Hat Enterprise Linux .. code:: bash $ sudo dnf install qemu-img git e2fsprogs policycoreutils-python-utils Test Prerequisites ------------------ The tox image tests require libguestfs-tools 1.24 or newer. Libguestfs allows testing the Amphora image without requiring root privileges. On Ubuntu systems you also need to give read access to the kernels for the user running the tests: .. code:: bash $ sudo chmod 0644 /boot/vmlinuz* Usage ===== This script and associated elements will build Amphora images. Current support is with an Ubuntu base OS and HAProxy. The script can use Fedora as a base OS but these will not initially be tested or supported. As the project progresses and/or the diskimage-builder project adds support for additional base OS options they may become available for Amphora images. This does not mean that they are necessarily supported or tested. .. note:: If your cloud has multiple hardware architectures available to nova, remember to set the appropriate hw_architecture property on the image when you load it into glance. For example, when loading an amphora image built for "amd64" you would add "--property hw_architecture='x86_64'" to your "openstack image create" command line. The script will use environment variables to customize the build beyond the Octavia project defaults, such as adding elements. The supported and tested image is created by using the diskimage-create.sh defaults (no command line parameters or environment variables set). As the project progresses we may add additional supported configurations. Command syntax: .. code-block:: $ diskimage-create.sh [-a i386 | **amd64** | armhf | ppc64le ] [-b **haproxy** ] [-c **~/.cache/image-create** | ] [-d **bionic**/**8** | ] [-e] [-f] [-g **repository branch** | stable/train | stable/stein | ... ] [-h] [-i **ubuntu-minimal** | fedora | centos-minimal | rhel ] [-k ] [-l ] [-n] [-o **amphora-x64-haproxy** | ] [-p] [-r ] [-s **2** | ] [-t **qcow2** | tar ] [-v] [-w ] [-x] '-a' is the architecture type for the image (default: amd64) '-b' is the backend type (default: haproxy) '-c' is the path to the cache directory (default: ~/.cache/image-create) '-d' distribution release id (default on ubuntu: bionic) '-e' enable complete mandatory access control systems when available (default: permissive) '-f' disable tmpfs for build '-g' build the image for a specific OpenStack Git branch (default: current repository branch) '-h' display help message '-i' is the base OS (default: ubuntu-minimal) '-k' is the kernel meta package name, currently only for ubuntu-minimal base OS (default: linux-image-virtual) '-l' is output logfile (default: none) '-n' disable sshd (default: enabled) '-o' is the output image file name '-p' install amphora-agent from distribution packages (default: disabled)" '-r' enable the root account in the generated image (default: disabled) '-s' is the image size to produce in gigabytes (default: 2) '-t' is the image type (default: qcow2) '-v' display the script version '-w' working directory for image building (default: .) '-x' enable tracing for diskimage-builder Building Images for Alternate Branches ====================================== By default, the diskimage-create.sh script will build an amphora image using the Octavia Git branch of the repository. If you need an image for a specific branch, such as "stable/train", you need to specify the "-g" option with the branch name. An example for "stable/train" would be: .. code-block:: bash diskimage-create.sh -g stable/train Advanced Git Branch/Reference Based Images ------------------------------------------ If you need to build an image from a local repository or with a specific Git reference or branch, you will need to set some environment variables for diskimage-builder. .. note:: These advanced settings will override the "-g" diskimage-create.sh setting. Building From a Local Octavia Repository ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set the DIB_REPOLOCATION_amphora_agent variable to the location of the Git repository containing the amphora agent: .. code-block:: bash export DIB_REPOLOCATION_amphora_agent=/opt/stack/octavia Building With a Specific Git Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set the DIB_REPOREF_amphora_agent variable to point to the Git branch or reference of the amphora agent: .. code-block:: bash export DIB_REPOREF_amphora_agent=refs/changes/40/674140/7 See the `Environment Variables`_ section below for additional information and examples. Amphora Agent Upper Constraints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You may also need to specify which version of the OpenStack upper-constraints.txt file will be used to build the image. For example, to specify the "stable/train" upper constraints Git branch, set the following environment variable: .. code-block:: bash export DIB_REPOLOCATION_upper_constraints=https://opendev.org/openstack/requirements/raw/branch/stable/train/upper-constraints.txt See `Dependency Management for OpenStack Projects `_ for more information. Environment Variables ===================== These are optional environment variables that can be set to override the script defaults. DIB_REPOLOCATION_amphora_agent - Location of the amphora-agent code that will be installed in the image. - Default: https://opendev.org/openstack/octavia - Example: /tmp/octavia DIB_REPOREF_amphora_agent - The Git reference to checkout for the amphora-agent code inside the image. - Default: The current branch - Example: stable/stein - Example: refs/changes/40/674140/7 DIB_REPOLOCATION_upper_constraints - Location of the upper-constraints.txt file used for the image. - Default: The upper-constraints.txt for the current branch - Example: https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt - Example: https://opendev.org/openstack/requirements/raw/branch/stable/train/upper-constraints.txt CLOUD_INIT_DATASOURCES - Comma separated list of cloud-int datasources - Default: ConfigDrive - Options: NoCloud, ConfigDrive, OVF, MAAS, Ec2, - Reference: https://launchpad.net/cloud-init DIB_DISTRIBUTION_MIRROR - URL to a mirror for the base OS selected - Default: None DIB_ELEMENTS - Override the elements used to build the image - Default: None DIB_LOCAL_ELEMENTS - Elements to add to the build (requires DIB_LOCAL_ELEMENTS_PATH be specified) - Default: None DIB_LOCAL_ELEMENTS_PATH - Path to the local elements directory - Default: None DIB_REPO_PATH - Directory containing diskimage-builder - Default: /diskimage-builder - Reference: https://github.com/openstack/diskimage-builder OCTAVIA_REPO_PATH - Directory containing octavia - Default: - Reference: https://github.com/openstack/octavia Using distribution packages for amphora agent --------------------------------------------- By default, amphora agent is installed from Octavia Git repository. To use distribution packages, use the "-p" option. Note this needs a base system image with the required repositories enabled (for example RDO repositories for CentOS/Fedora). One of these variables must be set: DIB_LOCAL_IMAGE - Path to the locally downloaded image - Default: None DIB_CLOUD_IMAGES - Directory base URL to download the image from - Default: depends on the distribution RHEL specific variables ------------------------ Building a RHEL-based image requires: - a Red Hat Enterprise Linux KVM Guest Image, manually download from the Red Hat Customer Portal. Set the DIB_LOCAL_IMAGE variable to point to the file. More details at: /elements/rhel - a Red Hat subscription for the matching Red Hat OpenStack Platform repository if you want to install the amphora agent from the official distribution package (requires setting -p option in diskimage-create.sh). Set the needed registration parameters depending on your configuration. More details at: /elements/rhel-common Here is an example with Customer Portal registration and OSP 15 repository: .. code:: bash $ export DIB_LOCAL_IMAGE='/tmp/rhel-server-8.0-x86_64-kvm.qcow2' $ export REG_METHOD='portal' REG_REPOS='rhel-8-server-openstack-15-rpms' $ export REG_USER='' REG_PASSWORD='' REG_AUTO_ATTACH=true This example uses registration via a Satellite (the activation key must enable an OSP repository): .. code:: bash $ export DIB_LOCAL_IMAGE='/tmp/rhel-server-8.1-x86_64-kvm.qcow2' $ export REG_METHOD='satellite' REG_ACTIVATION_KEY="" $ export REG_SAT_URL="" REG_ORG="" Building in a virtualenv with tox --------------------------------- To make use of a virtualenv for Python dependencies you may run ``tox``. Note that you may still need to install binary dependencies on the host for the build to succeed. If you wish to customize your build modify ``tox.ini`` to pass on relevant environment variables or command line arguments to the ``diskimage-create.sh`` script. .. code:: bash $ tox -e build Container Support ================= The Docker command line required to import a tar file created with this script is: .. code:: bash $ docker import - image:amphora-x64-haproxy < amphora-x64-haproxy.tar References ========== This documentation and script(s) leverage prior work by the OpenStack TripleO and Sahara teams. Thank you to everyone that worked on them for providing a great foundation for creating Octavia Amphora images. * https://opendev.org/openstack/diskimage-builder * https://opendev.org/openstack/tripleo-image-elements * https://opendev.org/openstack/sahara-image-elements Copyright ========= Copyright 2014 Hewlett-Packard Development Company, L.P. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/apache-httpd.rst0000664000175000017500000000175400000000000021432 0ustar00zuulzuul00000000000000 .. Copyright 2017 Intel Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================= Running Octavia in Apache ========================= To run Octavia in apache2, copy the ``httpd/octavia-api.conf`` sample configuration file to the appropriate location for the Apache server. On Debian/Ubuntu systems it is:: /etc/apache2/sites-available/octavia-api.conf Restart Apache to have it start serving Octavia. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/api-audit.rst0000664000175000017500000001307000000000000020737 0ustar00zuulzuul00000000000000 ==================== Octavia API Auditing ==================== The `keystonemiddleware audit middleware`_ supports delivery of Cloud Auditing Data Federation (CADF) audit events via Oslo messaging notifier capability. Based on `notification_driver` configuration, audit events can be routed to messaging infrastructure (notification_driver = messagingv2) or can be routed to a log file (notification_driver = log). More information about the CADF format can be found on the `DMTF Cloud Auditing Data Federation website `_. Audit middleware creates two events per REST API interaction. First event has information extracted from request data and the second one has request outcome (response). .. _keystonemiddleware audit middleware: https://docs.openstack.org/keystonemiddleware/latest/audit.html Configuring Octavia API Auditing ================================ Auditing can be enabled by making the following changes to the Octavia configuration file on your Octavia API instance(s). #. Enable auditing:: [audit] ... enabled = True #. Optionally specify the location of the audit map file:: [audit] ... audit_map_file = /etc/octavia/octavia_api_audit_map.conf The default audit map file location is /etc/octavia/octavia_api_audit_map.conf. #. Copy the audit map file from the octavia/etc/audit directory to the location specified in the previous step. A sample file has been provided in octavia/etc/audit/octavia_api_audit_map.conf.sample. #. Optionally specify the REST HTTP methods you do not want to audit:: [audit] ... ignore_req_list = #. Specify the driver to use for sending the audit notifications:: [audit_middleware_notifications] ... driver = log Driver options are: messaging, messagingv2, routing, log, noop #. Optionally specify the messaging topic:: [audit_middleware_notifications] ... topics = #. Optionally specify the messaging transport URL:: [audit_middleware_notifications] ... transport_url = #. Restart your Octavia API processes. Sampe Audit Events ================== Request ------- .. code-block:: json { "event_type": "audit.http.request", "timestamp": "2018-10-11 22:42:22.721025", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "eventTime": "2018-10-11T22:42:22.720112+0000", "target": { "id": "octavia", "typeURI": "service/load-balancer/loadbalancers", "addresses": [{ "url": "http://10.21.21.53/load-balancer", "name": "admin" }, { "url": "http://10.21.21.53/load-balancer", "name": "private" }, { "url": "http://10.21.21.53/load-balancer", "name": "public" }], "name": "octavia" }, "observer": { "id": "target" }, "tags": ["correlation_id?value=e5b34bc3-4837-54fa-9892-8e65a9a2e73a"], "eventType": "activity", "initiator": { "typeURI": "service/security/account/user", "name": "admin", "credential": { "token": "***", "identity_status": "Confirmed" }, "host": { "agent": "openstacksdk/0.17.2 keystoneauth1/3.11.0 python-requests/2.19.1 CPython/2.7.12", "address": "10.21.21.53" }, "project_id": "90168d185e504b5580884a235ba31612", "id": "2af901396a424d5ca9dffa725226e8c7" }, "action": "read/list", "outcome": "pending", "id": "8cf14af5-246e-5739-a11e-513ca13b7d36", "requestPath": "/load-balancer/v2.0/lbaas/loadbalancers" }, "priority": "INFO", "publisher_id": "uwsgi", "message_id": "63264e0e-e60f-4adc-a656-0d87ab5d6329" } Response -------- .. code-block:: json { "event_type": "audit.http.response", "timestamp": "2018-10-11 22:42:22.853129", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "eventTime": "2018-10-11T22:42:22.720112+0000", "target": { "id": "octavia", "typeURI": "service/load-balancer/loadbalancers", "addresses": [{ "url": "http://10.21.21.53/load-balancer", "name": "admin" }, { "url": "http://10.21.21.53/load-balancer", "name": "private" }, { "url": "http://10.21.21.53/load-balancer", "name": "public" }], "name": "octavia" }, "observer": { "id": "target" }, "tags": ["correlation_id?value=e5b34bc3-4837-54fa-9892-8e65a9a2e73a"], "eventType": "activity", "initiator": { "typeURI": "service/security/account/user", "name": "admin", "credential": { "token": "***", "identity_status": "Confirmed" }, "host": { "agent": "openstacksdk/0.17.2 keystoneauth1/3.11.0 python-requests/2.19.1 CPython/2.7.12", "address": "10.21.21.53" }, "project_id": "90168d185e504b5580884a235ba31612", "id": "2af901396a424d5ca9dffa725226e8c7" }, "reason": { "reasonCode": "200", "reasonType": "HTTP" }, "reporterchain": [{ "reporterTime": "2018-10-11T22:42:22.852613+0000", "role": "modifier", "reporter": { "id": "target" } }], "action": "read/list", "outcome": "success", "id": "8cf14af5-246e-5739-a11e-513ca13b7d36", "requestPath": "/load-balancer/v2.0/lbaas/loadbalancers" }, "priority": "INFO", "publisher_id": "uwsgi", "message_id": "7cd89dce-af6e-40c5-8634-e87d1ed32a3c" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/flavors.rst0000664000175000017500000001427000000000000020541 0ustar00zuulzuul00000000000000.. Copyright 2018 Rackspace, US Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============== Octavia Flavors =============== Octavia flavors are a powerful tool for operators to bring enhanced load balancing capabilities to their users. An Octavia flavor is a predefined set of provider configuration options that are created by the operator. When an user requests a load balancer they can request the load balancer be built with one of the defined flavors. Flavors are defined per provider driver and expose the unique capabilities of each provider. This document is intended to explain the flavors capability for operators that wish to create flavors for their users. There are three steps to creating a new Octavia flavor: #. Decide on the provider flavor capabilities that will be configured in the flavor. #. Create the flavor profile with the flavor capabilities. #. Create the user facing flavor. Provider Capabilities ===================== .. _provider driver flavor capabilities: https://docs.openstack.org/api-ref/load-balancer/v2/index.html##show-provider-flavor-capabilities To start the process of defining a flavor, you will want to look at the flavor capabilities that the provider driver exposes. To do this you can use the `provider driver flavor capabilities`_ API or the OpenStack client. .. code-block:: bash openstack loadbalancer provider capability list With the default RBAC policy, this command is only available to administrators. This will list all of the flavor capabilities the provider supports and may be configured via a flavor. As an example, the amphora provider supports the `loadbalancer_topology` capability, among many others:: +-----------------------+---------------------------------------------------+ | name | description | +-----------------------+---------------------------------------------------+ | loadbalancer_topology | The load balancer topology. One of: SINGLE - One | | | amphora per load balancer. ACTIVE_STANDBY - Two | | | amphora per load balancer. | | ... | ... | +-----------------------+---------------------------------------------------+ Flavor Profiles =============== .. _flavor profile: https://docs.openstack.org/api-ref/load-balancer/v2/index.html#create-flavor-profile The next step in the process of creating a flavor is to define a flavor profile. The flavor profile includes the provider and the flavor data. The flavor capabilities are the supported flavor data settings for a given provider. A flavor profile can be created using the `flavor profile`_ API or the OpenStack client. For example, to create a flavor for the amphora provider, we would create the following flavor profile: .. code-block:: bash openstack loadbalancer flavorprofile create --name amphora-single-profile --provider amphora --flavor-data '{"loadbalancer_topology": "SINGLE"}' With the default RBAC policy, this command is only available to administrators. This will create a flavor profile for the amphora provider that creates a load balancer with a single amphora. When you create a flavor profile, the settings are validated with the provider to make sure the provider can support the capabilities specified. The output of the command above is:: +---------------+--------------------------------------+ | Field | Value | +---------------+--------------------------------------+ | id | 72b53ac2-b191-48eb-8f73-ed012caca23a | | name | amphora-single-profile | | provider_name | amphora | | flavor_data | {"loadbalancer_topology": "SINGLE"} | +---------------+--------------------------------------+ Flavors ======= .. _flavor: https://docs.openstack.org/api-ref/load-balancer/v2/index.html#create-flavor Finally we will create the user facing Octavia flavor. This defines the information users will see and use to create a load balancer with an Octavia flavor. The name of the flavor is the term users can use when creating a load balancer. We encourage you to include a detailed description for users to clearly understand the capabilities of the flavor you are providing. To continue the example above, to create a flavor with the flavor profile we created in the previous step we call: .. code-block:: bash openstack loadbalancer flavor create --name standalone-lb --flavorprofile amphora-single-profile --description "A non-high availability load balancer for testing." --enable This will create a user visible Octavia flavor that will create a load balancer that uses one amphora and is not highly available. Users can specify this flavor when creating a new load balancer. Disabled flavors are still visible to users, but they will not be able to create a load balancer using the flavor. The output of the command above is:: +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | id | 25cda2d8-f735-4744-b936-d30405c05359 | | name | standalone-lb | | flavor_profile_id | 72b53ac2-b191-48eb-8f73-ed012caca23a | | enabled | True | | description | A non-high availability load | | | balancer for testing. | +-------------------+--------------------------------------+ At this point, the flavor is available for use by users creating new load balancers. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/admin/guides/0000775000175000017500000000000000000000000017607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/guides/certificates.rst0000664000175000017500000002653200000000000023016 0ustar00zuulzuul00000000000000.. Copyright 2018 Rackspace, US Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================= Octavia Certificate Configuration Guide ======================================= This document is intended for Octavia administrators setting up certificate authorities for the two-way TLS authentication used in Octavia for command and control of :term:`Amphora`. This guide does not apply to the configuration of `TERMINATED_TLS` listeners on load balancers. See the `Load Balancing Cookbook`_ for instructions on creating `TERMINATED_TLS` listeners. .. _Load Balancing Cookbook: ../../user/guides/basic-cookbook.html#deploy-a-tls-terminated-https-load-balancer Two-way TLS Authentication in Octavia ===================================== The Octavia controller processes communicate with the Amphora over a TLS connection much like an HTTPS connection to a website. However, Octavia validates that both sides are trusted by doing a two-way TLS authentication. .. note:: This is a simplification of the full TLS handshake process. See the `TLS 1.3 RFC 8446 `_ for the full handshake. Phase One --------- When a controller process, such as the Octavia worker process, connects to an Amphora, the Amphora will present its `server` certificate to the controller. The controller will then validate it against the `server` Certificate Authority (CA) certificate stored on the controller. If the presented certificate is validated against the `server` CA certificate, the connection goes into phase two of the two-way TLS authentication. Phase Two --------- Once phase one is complete, the controller will present its `client` certificate to the Amphora. The Amphora will then validate the certificate against the `client` CA certificate stored inside the Amphora. If this certificate is successfully validated, the rest of the TLS handshake will continue to establish the secure communication channel between the controller and the Amphora. Certificate Lifecycles ---------------------- The `server` certificates are uniquely generated for each amphora by the controller using the `server` certificate authority certificates and keys. These `server` certificates are automatically rotated by the Octavia housekeeping controller process as they near expiration. The `client` certificates are used for the Octavia controller processes. These are managed by the operator and due to their use on the control plane of the cloud, typically have a long lifetime. See the `Operator Maintenance Guide `_ for more information about the certificate lifecycles. Creating the Certificate Authorities ==================================== As discussed above, this configuration uses two certificate authorities; one for the `server` certificates, and one for the `client` certificates. .. note:: Technically Octavia can be run using just one certificate authority by using it to issue certificates for both roles. However, this weakens the security as a `server` certificate from an amphora could be used to impersonate a controller. We recommend you use two certificate authorities for all deployments outside of testing. For this document we are going to setup simple OpenSSL based certificate authorities. However, any standards compliant certificate authority software can be used to create the required certificates. 1. Create a working directory for the certificate authorities. Make sure to set the proper permissions on this directory such that others cannot access the private keys, random bits, etc. being generated here. .. code-block:: bash $ mkdir certs $ chmod 700 certs $ cd certs 2. Create the OpenSSL configuration file. This can be shared between the two certificate authorities. .. code-block:: bash $ vi openssl.cnf .. literalinclude:: sample-configs/openssl.cnf :language: ini 3. Make any locally required configuration changes to the openssl.cnf. Some settings to consider are: * The default certificate lifetime is 10 years. * The default bit length is 2048. 4. Make directories for the two certificate authorities. .. code-block:: bash $ mkdir client_ca $ mkdir server_ca 5. Starting with the `server` certificate authority, prepare the CA. .. code-block:: bash $ cd server_ca $ mkdir certs crl newcerts private $ chmod 700 private $ touch index.txt $ echo 1000 > serial 6. Create the `server` CA key. * You will need to specify a passphrase to protect the key file. .. code-block:: bash $ openssl genrsa -aes256 -out private/ca.key.pem 4096 $ chmod 400 private/ca.key.pem 7. Create the `server` CA certificate. * You will need to specify the passphrase used in step 6. * You will also be asked to provide details for the certificate. These are up to you and should be appropriate for your organization. * You may want to mention this is the `server` CA in the common name field. * Since this is the CA certificate, you might want to give it a very long lifetime, such as twenty years shown in this example command. .. code-block:: bash $ openssl req -config ../openssl.cnf -key private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out certs/ca.cert.pem 8. Moving to the `client` certificate authority, prepare the CA. .. code-block:: bash $ cd ../client_ca $ mkdir certs crl csr newcerts private $ chmod 700 private $ touch index.txt $ echo 1000 > serial 9. Create the `client` CA key. * You will need to specify a passphrase to protect the key file. .. code-block:: bash $ openssl genrsa -aes256 -out private/ca.key.pem 4096 $ chmod 400 private/ca.key.pem 10. Create the `client` CA certificate. * You will need to specify the passphrase used in step 9. * You will also be asked to provide details for the certificate. These are up to you and should be appropriate for your organization. * You may want to mention this is the `client` CA in the common name field. * Since this is the CA certificate, you might want to give it a very long lifetime, such as twenty years shown in this example command. .. code-block:: bash $ openssl req -config ../openssl.cnf -key private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out certs/ca.cert.pem 11. Create a key for the `client` certificate to use. * You can create one certificate and key to be used by all of the controllers or you can create a unique certificate and key for each controller. * You will need to specify a passphrase to protect the key file. .. code-block:: bash $ openssl genrsa -aes256 -out private/client.key.pem 2048 12. Create the certificate request for the `client` certificate used on the controllers. * You will need to specify the passphrase used in step 11. * You will also be asked to provide details for the certificate. These are up to you and should be appropriate for your organization. * You must fill in the common name field. * You may want to mention this is the `client` certificate in the common name field, or the individual controller information. .. code-block:: bash $ openssl req -config ../openssl.cnf -new -sha256 -key private/client.key.pem -out csr/client.csr.pem 13. Sign the `client` certificate request. * You will need to specify the CA passphrase used in step 9. * Since this certificate is used on the control plane, you might want to give it a very long lifetime, such as twenty years shown in this example command. .. code-block:: bash $ openssl ca -config ../openssl.cnf -extensions usr_cert -days 7300 -notext -md sha256 -in csr/client.csr.pem -out certs/client.cert.pem 14. Create a concatenated `client` certificate and key file. * You will need to specify the CA passphrase used in step 11. .. code-block:: bash $ openssl rsa -in private/client.key.pem -out private/client.cert-and-key.pem $ cat certs/client.cert.pem >> private/client.cert-and-key.pem Configuring Octavia =================== In this section we will configure Octavia to use the certificates and keys created during the `Creating the Certificate Authorities`_ section. 1. Copy the required files over to your Octavia controllers. * Only the Octavia worker, health manager, and housekeeping processes will need access to these files. * The first command should return you to the "certs" directory created in step 1 of the `Creating the Certificate Authorities`_ section. * These commands assume you are running the octavia processes under the "octavia" user. * Note, some of these steps should be run with "sudo" and are indicated by the "#" prefix. .. code-block:: bash $ cd .. # mkdir /etc/octavia/certs # chmod 700 /etc/octavia/certs # cp server_ca/private/ca.key.pem /etc/octavia/certs/server_ca.key.pem # chmod 700 /etc/octavia/certs/server_ca.key.pem # cp server_ca/certs/ca.cert.pem /etc/octavia/certs/server_ca.cert.pem # cp client_ca/certs/ca.cert.pem /etc/octavia/certs/client_ca.cert.pem # cp client_ca/private/client.cert-and-key.pem /etc/octavia/certs/client.cert-and-key.pem # chmod 700 /etc/octavia/certs/client.cert-and-key.pem # chown -R octavia.octavia /etc/octavia/certs 2. Configure the [certificates] section of the octavia.conf file. * Only the Octavia worker, health manager, and housekeeping processes will need these settings. * The "" should be replaced with the passphrase that was used in step 6 of the `Creating the Certificate Authorities`_ section. .. code-block:: ini [certificates] cert_generator = local_cert_generator ca_certificate = /etc/octavia/certs/server_ca.cert.pem ca_private_key = /etc/octavia/certs/server_ca.key.pem ca_private_key_passphrase = 3. Configure the [controller_worker] section of the octavia.conf file. * Only the Octavia worker, health manager, and housekeeping processes will need these settings. .. code-block:: ini [controller_worker] client_ca = /etc/octavia/certs/client_ca.cert.pem 4. Configure the [haproxy_amphora] section of the octavia.conf file. * Only the Octavia worker, health manager, and housekeeping processes will need these settings. .. code-block:: ini [haproxy_amphora] client_cert = /etc/octavia/certs/client.cert-and-key.pem server_ca = /etc/octavia/certs/server_ca.cert.pem 5. Start the controller processes. .. code-block:: bash # systemctl start octavia-worker # systemctl start octavia-healthmanager # systemctl start octavia-housekeeping ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/guides/operator-maintenance.rst0000664000175000017500000004272700000000000024470 0ustar00zuulzuul00000000000000.. Copyright (c) 2017 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================== Operator Maintenance Guide ====================================== This document is intended for operators. For a developer guide see the :doc:`../../contributor/guides/dev-quick-start` in this documentation repository. For an end-user guide, please see the :doc:`../../user/guides/basic-cookbook` in this documentation repository. Monitoring ========== Monitoring Load Balancer Amphora -------------------------------- Octavia will monitor the load balancing amphorae itself and initiate failovers and/or replacements if they malfunction. Therefore, most installations won't need to monitor the amphorae running the load balancer. Octavia will log each failover to the corresponding health manager logs. It is advisable to use log analytics to monitor failover trends to notice problems in the OpenStack installation early. We have seen neutron (network) connectivity issues, Denial of Service attacks, and nova (compute) malfunctions lead to a higher than normal failover rate. Alternatively, the monitoring of the other services showed problems as well, so depending on your overall monitoring strategy this might be optional. If additional monitoring is necessary, review the corresponding calls on the amphora agent REST interface (see :doc:`../../contributor/api/haproxy-amphora-api`) Monitoring Pool Members ----------------------- Octavia will use the health information from the underlying load balancing application to determine the health of members. This information will be streamed to the Octavia database and made available via the status tree or other API methods. For critical applications we recommend to poll this information in regular intervals. Monitoring Load Balancers ------------------------- For critical applications, we recommend to monitor the access to the application with a tool which polls the application from various points on the Internet and measures response times. Alerts should be triggered when response times become too high. An additional check might be to monitor the provisioning status of a load balancer (see `Load Balance Status Codes `_) and alert depending on the application if the provisioning status is not ACTIVE. For some applications other states might not lead to alerts: For instance if an application is making regular changes to the pool several PENDING stages should not alert as well. In most cases, when a load balancer is in states other than ACTIVE it will still be passing traffic, which is why the response time check mentioned above is recommended. However, even if the load balancer is still functioning, it is advisable to investigate and potentially recreate it if it is stuck in a non-ACTIVE state. Monitoring load balancer functionality -------------------------------------- For production sites we recommend to use outside monitoring services. They will use servers distributed around the globe to not only monitor if the site is up but also parts of the system outside the visibility of Octavia like routers, network connectivity, etc. .. _Monasca Octavia plugin: https://github.com/openstack/monasca-agent/blob/master/monasca_setup/detection/plugins/octavia.py Monitoring Octavia Control Plane -------------------------------- To monitor the Octavia control plane we recommend process monitoring of the main Octavia processes: * octavia-api * octavia-worker * octavia-health-manager * octavia-housekeeping The Monasca project has a plugin for such monitoring (see `Monasca Octavia plugin`_). Please refer to this project for further information. Octavia's control plane components are shared nothing and can be scaled linearly. For high availability of the control plane we recommend to run at least one set of components in each availability zone. Furthermore, the octavia-api endpoint could be behind a load balancer or other HA technology. That said, if one or more components fail the system will still be available (though potentially degraded). For instance if you have installed one set of components in three availability zones even if you lose a whole zone Octavia will still be responsive and available - only if you lose the Octavia control plane in all three zones will the service be unavailable. Please note this only addresses control plane availability; the availability of the load balancing function depends highly on the chosen topology and the anti-affinity settings. See our forthcoming HA guide for more details. Additionally, we recommend to monitor the Octavia API endpoint(s). There currently is no special url to use so just polling the root URL in regular intervals is sufficient. There is a host of information in the log files which can be used for log analytics. A few examples of what could be monitored are: * Amphora Build Rate - to determine load of the system * Amphora Build Time - to determine how long it takes to build an amphora * Failures/Errors - to be notified of system problems early .. _rotating_amphora: Rotating the Amphora Images =========================== Octavia will start load balancers with a pre-built image which contain the amphora agent, a load balancing application, and are seeded with cryptographic certificates through the config drive at start up. Rotating the image means making a load balancer amphora running with an old image failover to an amphora with a new image. This should be without any measurable interruption in the load balancing functionality when using ACTIVE/STANDBY topology. Standalone load balancers might experience a short outage. Here are some reasons you might need to rotate the amphora image: * There has been a (security) update to the underlying operating system * You want to deploy a new version of the amphora agent or haproxy * The cryptographic certificates and/or keys on the amphora have been compromised. * Though not related to rotating images, this procedure might be invoked if you are switching to a different flavor for the underlying virtual machine. Preparing a New Amphora Image ----------------------------- To prepare a new amphora image you will need to use diskimage-create.sh as described in the README in the diskimage-create directory. For instance, in the ``octavia/diskimage-create`` directory, run: .. code-block:: bash ./diskimage-create.sh Once you have created a new image you will need to upload it into glance. The following shows how to do this if you have set the image tag in the Octavia configuration file. Make sure to use a user with the same tenant as the Octavia service account: .. code-block:: bash openstack image create --file amphora-x64-haproxy.qcow2 \ --disk-format qcow2 --tag --private \ --container-format bare /var/lib/octavia/amphora-x64-haproxy.qcow2 If you didn't configure image tags and instead configured an image id, you will need to update the Octavia configuration file with the new id and restart the Octavia services (except octavia-api). Rotating spare Amphorae ----------------------- If the spare pool is enabled in Octavia, spare amphorae must be rotated first, so a new load balancer will use the new amphora image from a newly spawned spare amphora. To rotate spare amphorae, list the IDs of all amphorae in ``READY`` status: .. code-block:: bash openstack loadbalancer amphora list -c id -f value --status READY Then, for each ID, perform the failover on the amphora: .. code-block:: bash openstack loadbalancer amphora failover Spare amphorae now use the new amphora image, and those spare amphorae will be used when creating a new load balancer or when performing a failover of a load balancer. Generating a List of Load Balancers to Rotate --------------------------------------------- The easiest way to generate a list, is to just list the IDs of all load balancers: .. code-block:: bash openstack loadbalancer list -c id -f value Take note of the IDs. Rotating a Load Balancer ------------------------ Octavia has an API call to initiate the failover of a load balancer: .. code-block:: bash openstack loadbalancer failover You can observe the failover by querying octavia ``openstack load balancer show `` until the load balancer goes ``ACTIVE`` again. .. _best_practice: Best Practices/Optimizations ---------------------------- To speed up the failovers, the spare pool can be temporarily increased to accommodate the rapid failover of the amphora. In this case after the new image has been loaded into glance, shut down or initiate a failover of the amphora in the spare pool. They can be found by listing amphorae in ``READY`` status: .. code-block:: bash openstack loadbalancer amphora list --status READY After you have increased the spare pool size and restarted all Octavia services, failovers will be greatly accelerated. To preserve resources, restore the old settings and restart the Octavia services. Since Octavia won't terminate superfluous spare amphora on its own, they can be left in the system and will automatically be used up as new load balancers are created and/or load balancers in error state are failed over. .. warning:: If you are using the anti-affinity feature please be aware that it is not compatible with spare pools and you are risking both the ACTIVE and BACKUP amphora being scheduled on the same host. It is recommended to not increase the spare pool during fail overs in this case (and not to use the spare pool at all). Since a failover puts significant load on the OpenStack installation by creating new virtual machines and ports, it should either be done at a very slow pace, during a time with little load, or with the right throttling enabled in Octavia. The throttling will make sure to prioritize failovers higher than other operations and depending on how many failovers are initiated this might crowd out other operations. Rotating Cryptographic Certificates =================================== Octavia secures the communication between the amphora agent and the control plane with two-way SSL encryption. To accomplish that, several certificates are distributed in the system: * Control plane: * Amphora certificate authority (CA) certificate: Used to validate amphora certificates if Octavia acts as a Certificate Authority to issue new amphora certificates * Client certificate: Used to authenticate with the amphora * Amphora: * Client CA certificate: Used to validate control plane client certificate * Amphora certificate: Presented to control plane processes to prove amphora identity. The heartbeat UDP packets emitted from the amphora are secured with a symmetric encryption key. This is set by the configuration option `heartbeat_key` in the `health_manager` section. We recommend setting it to a random string of a sufficient length. .. _rotate-amphora-certs: Rotating Amphora Certificates ----------------------------- For the server part Octavia will act as a certificate authority itself to issue amphora certificates to be used by each amphora. Octavia will also monitor those certificates and refresh them before they expire. There are three ways to initiate a rotation manually: * Change the expiration date of the certificate in the database. Octavia will then rotate the amphora certificates with newly issued ones. This requires the following: * Client CA certificate hasn't expired or the corresponding client certificate on the control plane hasn't been issued by a different client CA (in case the authority was compromised) * The Amphora CA certificate on the control plane didn't change in any way which jeopardizes validation of the amphora certificate (e.g. the certificate was reissued with a new private/public key) * If the amphora CA changed in a way which jeopardizes validation of the amphora certificate an operator can manually upload newly issued amphora certificates by switching off validation of the old amphora certificate. This requires a client certificate which can be validated by the client CA file on the amphora. Refer to :doc:`../../contributor/api/haproxy-amphora-api` for more details. * If the client certificate on the control plane changed in a way that it can't be validated by the client certificate authority certificate on the amphora, a failover (see :ref:`rotate-amphora-certs`) of all amphorae needs to be initiated. Until the failover is completed the amphorae can't be controlled by the control plane. Rotating the Certificate Authority Certificates ----------------------------------------------- If there is a compromise of the certificate authorities' certificates, or they expired, new ones need to be installed into the system. If Octavia is not acting as the certificate authority only the certificate authority's cert needs to be changed in the system so amphora can be authenticated again. * Issue new certificates (see the script in the bin folder of Octavia if Octavia is acting as the certificate authority) or follow the instructions of the third-party certificate authority. Copy the certificate and the private key (if Octavia acts as a certificate authority) where Octavia can find them. * If the previous certificate files haven't been overridden, adjust the paths to the new certs in the configuration file and restart all Octavia services (except octavia-api). Review :ref:`rotate-amphora-certs` above to determine if and how the amphora certificates needs to be rotated. Rotating Client Certificates ---------------------------- If the client certificates expired new ones need to be issued and installed on the system: * Issue a new client certificate (see the script in the bin folder of Octavia if self signed certificates are used) or use the ones provided to you by your certificate authority. * Copy the new cert where Octavia can find it. * If the previous certificate files haven't been overridden, adjust the paths to the new certs in the configuration file. In all cases restart all Octavia services except octavia-api. If the client CA certificate has been replaced in addition to rotating the client certificate the new client CA certificate needs to be installed in the system. After that initiate a failover of all amphorae to distribute the new client CA cert. Until the failover is completed the amphorae can't be controlled by the control plane. Changing The Heartbeat Encryption Key ------------------------------------- Special caution needs to be taken to replace the heartbeat encryption key. Once this is changed Octavia can't read any heartbeats and will assume all amphora are in an error state and initiate an immediate failover. In preparation, read the chapter on :ref:`best_practice` in the Failover section. In particular, it is advisable if the throttling enhancement (available in Pike) doesn't exist to create a sufficient number of spare amphorae to mitigate the stress on the OpenStack installation when Octavia starts to replace all amphora immediately. Given the risks involved with changing this key it should not be changed during routine maintenance but only when a compromise is strongly suspected. .. note:: For future versions of Octavia an "update amphora" API is planned which will allow this key to be changed without failover. At that time there would be a procedure to halt health monitoring while the keys are rotated and then resume health monitoring. Handling a VM Node Failure -------------------------- If a node fails which is running amphora, Octavia will automatically failover the amphora to a different node (capacity permitting). In some cases, the node can be recovered (e.g. through a hard reset) and the hypervisor might bring back the amphora vms. In this case, an operator should manually delete all amphora on this specific node since Octavia assumes they have been deleted as part of the failover and will not touch them again. .. note:: As a safety measure an operator can, prior to deleting, manually check if the VM is in use. First, use the Amphora API to obtain the current list of amphorae, then match the nova instance ID to the compute_id column in the amphora API response (it is not currently possible to filter amphora by compute_id). If there are any matches where the amphora status is not 'DELETED', the amphora is still considered to be in use. Evacuating a Specific Amphora from a Host ----------------------------------------- In some cases an amphora needs to be evacuated either because the host is being shutdown for maintenance or as part of a failover. Octavia has a rich amphora API to do that. First use the amphora API to find the specific amphora. Then, if not already performed, disable scheduling to this host in nova. Lastly, initiate a failover of the specific amphora with the failover command on the amphora API. Alternatively, a live migration might also work if it happens quick enough for Octavia not to notice a stale amphora (the default configuration is 60s). ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/admin/guides/sample-configs/0000775000175000017500000000000000000000000022516 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/guides/sample-configs/openssl.cnf0000664000175000017500000000635500000000000024702 0ustar00zuulzuul00000000000000# OpenSSL root CA configuration file. [ ca ] # `man ca` default_ca = CA_default [ CA_default ] # Directory and file locations. dir = ./ certs = $dir/certs crl_dir = $dir/crl new_certs_dir = $dir/newcerts database = $dir/index.txt serial = $dir/serial RANDFILE = $dir/private/.rand # The root key and root certificate. private_key = $dir/private/ca.key.pem certificate = $dir/certs/ca.cert.pem # For certificate revocation lists. crlnumber = $dir/crlnumber crl = $dir/crl/ca.crl.pem crl_extensions = crl_ext default_crl_days = 30 # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 name_opt = ca_default cert_opt = ca_default default_days = 3650 preserve = no policy = policy_strict [ policy_strict ] # The root CA should only sign intermediate certificates that match. # See the POLICY FORMAT section of `man ca`. countryName = match stateOrProvinceName = match organizationName = match organizationalUnitName = optional commonName = supplied emailAddress = optional [ req ] # Options for the `req` tool (`man req`). default_bits = 2048 distinguished_name = req_distinguished_name string_mask = utf8only # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 # Extension to add when the -x509 option is used. x509_extensions = v3_ca [ req_distinguished_name ] # See . countryName = Country Name (2 letter code) stateOrProvinceName = State or Province Name localityName = Locality Name 0.organizationName = Organization Name organizationalUnitName = Organizational Unit Name commonName = Common Name emailAddress = Email Address # Optionally, specify some defaults. countryName_default = US stateOrProvinceName_default = Oregon localityName_default = 0.organizationName_default = OpenStack organizationalUnitName_default = Octavia emailAddress_default = commonName_default = example.org [ v3_ca ] # Extensions for a typical CA (`man x509v3_config`). subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer basicConstraints = critical, CA:true keyUsage = critical, digitalSignature, cRLSign, keyCertSign [ usr_cert ] # Extensions for client certificates (`man x509v3_config`). basicConstraints = CA:FALSE nsCertType = client, email nsComment = "OpenSSL Generated Client Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth, emailProtection [ server_cert ] # Extensions for server certificates (`man x509v3_config`). basicConstraints = CA:FALSE nsCertType = server nsComment = "OpenSSL Generated Server Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer:always keyUsage = critical, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth [ crl_ext ] # Extension for CRLs (`man x509v3_config`). authorityKeyIdentifier=keyid:always ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/guides/upgrade.rst0000664000175000017500000001052300000000000021771 0ustar00zuulzuul00000000000000.. Copyright 2018 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================== Load Balancing Service Upgrade Guide ==================================== This document outlines steps and notes for operators for reference when upgrading their Load Balancing service from previous versions of OpenStack. Plan the upgrade ================ Before jumping right in to the upgrade process, there are a few considerations operators should observe: * Carefully read the release notes, particularly the upgrade section. * Upgrades are only supported between sequential releases. For example, upgrading from Pike to Queens is supported while from Pike to Rocky is not. * It is expected that each Load Balancing provider provides its own upgrade documentation. Please refer to it for upgrade instructions. * The Load Balancing service builds on top of other OpenStack services, e.g. Compute, Networking, Image and Identify. On a staging environment, upgrade the Load Balancing service and verify it works as expected. For example, a good indicator would be the successful run of `Octavia Tempest tests `. Cold upgrade ============ In a cold upgrade (also known as offline upgrade and non-rolling upgrade), the Load Balancing service is not available because all the control plane services have to be taken down. No data plane disruption should result during the course of upgrading. In the case of the Load Balancing service, it means no downtime nor reconfiguration of service-managed resources (e.g. load balancers, listeners, pools and members). #. Run the :ref:`octavia-status upgrade check ` command to validate that Octavia is ready for upgrade. #. Gracefully stop all Octavia processes. We recommend in this order: Housekeeping, Health manager, API, Worker. #. Optional: Make a backup of the database. #. Upgrade all Octavia control plane nodes to the next release. Remember to also upgrade library dependencies (e.g. octavia-lib). If upgrading Octavia from distribution packages, your system package manager is expected to handle this automatically. #. Verify that all configuration option names are up-to-date with latest Octavia version. For example, pay special attention to deprecated configurations. #. Run ``octavia-db-manage upgrade head`` from any Octavia node to upgrade the database and run any corresponding database migrations. #. Start all Octavia processes. #. Build a new image and upload it to the Image service. Do not forget to tag the image. We recommend updating images frequently to include latest bug fixes and security issues on installed software (operating system, amphora agent and its dependencies). Amphorae upgrade ================ Amphorae upgrade may be required in the advent of API incompatibility between the running amphora agent (old version) and Octavia services (new version). Octavia will automatically recover by failing over amphorae and thus new amphora instances will be running on latest amphora agent code. The drawback in that case is data plane downtime during failover. API breakage is a very rare case, and would be highlighted in the release notes if this scenario occurs. Upgrade testing =============== `Grenade `_ is an OpenStack test harness project that validates upgrade scenarios between releases. It uses DevStack to initially perform a base OpenStack install and then upgrade to a target version. Octavia has a `Grenade plugin `_ and a CI gate job that validates cold upgrades of an OpenStack deployment with Octavia enabled. The plugin creates load balancing resources and verifies that resources are still working during and after upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/healthcheck.rst0000664000175000017500000004320500000000000021330 0ustar00zuulzuul00000000000000.. Copyright 2020 Red Hat, Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================= Octavia API Health Monitoring ============================= The Octavia API provides a health monitoring endpoint that can be used by external load balancers to manage the Octavia API pool. When properly configured, the health monitoring endpoint will reflect the full operational status of the Octavia API. The Octavia API health monitoring endpoint extends the `OpenStack Oslo middleware healthcheck `_ library to test the Octavia Pecan API framework and associated services. Oslo Healthcheck Queries ======================== Oslo middleware healthcheck supports HTTP **"GET"** and **"HEAD"** methods. The response from Oslo middleware healthcheck can be customized by specifying the acceptable response type for the request. Oslo middleware healthcheck currently supports the following types: * text/plain * text/html * application/json If the requested type is not one of the above, it defaults to text/plain. .. note:: The content of the response "reasons" will vary based on the backend plugins enabled in Oslo middleware healthcheck. It is a best practice to only rely on the HTTP status code for Octavia API health monitoring. Example Responses ----------------- Example passing output for text/plain with *detailed* False: .. code-block:: bash $ curl -i http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 200 OK Date: Mon, 16 Mar 2020 18:10:27 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/plain; charset=UTF-8 Content-Length: 2 x-openstack-request-id: req-9c6f4303-63a7-4f30-8afc-39340658702f Connection: close Vary: Accept-Encoding OK Example failing output for text/plain with *detailed* False: .. code-block:: bash $ curl -i http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 503 Service Unavailable Date: Mon, 16 Mar 2020 18:42:12 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/plain; charset=UTF-8 Content-Length: 36 x-openstack-request-id: req-84024269-2dfb-41ad-bfda-b3e1da138bba Connection: close Example passing output for text/html with *detailed* False: .. code-block:: bash $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 200 OK Date: Mon, 16 Mar 2020 18:25:11 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/html; charset=UTF-8 Content-Length: 239 x-openstack-request-id: req-b212d619-146f-4b50-91a3-5da16051badc Connection: close Vary: Accept-Encoding Healthcheck Status

Result of 1 checks:

Reason
OK

Example failing output for text/html with *detailed* False: .. code-block:: bash $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 503 Service Unavailable Date: Mon, 16 Mar 2020 18:42:22 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/html; charset=UTF-8 Content-Length: 273 x-openstack-request-id: req-c91dd214-85ca-4d33-9fa3-2db81566d9e5 Connection: close Healthcheck Status

Result of 1 checks:

Reason
The Octavia database is unavailable.

Example passing output for application/json with *detailed* False: .. code-block:: bash $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck HTTP/1.1 200 OK Date: Mon, 16 Mar 2020 18:34:42 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: application/json Content-Length: 62 x-openstack-request-id: req-417dc85c-e64e-496e-a461-494a3e6a5479 Connection: close { "detailed": false, "reasons": [ "OK" ] } Example failing output for application/json with *detailed* False: .. code-block:: bash $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck HTTP/1.1 503 Service Unavailable Date: Mon, 16 Mar 2020 18:46:28 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: application/json Content-Length: 96 x-openstack-request-id: req-de50b057-6105-4fca-a758-c872ef28bbfa Connection: close { "detailed": false, "reasons": [ "The Octavia database is unavailable." ] } Example Detailed Responses -------------------------- Example passing output for text/plain with *detailed* True: .. code-block:: bash $ curl -i http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 200 OK Date: Mon, 16 Mar 2020 18:10:27 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/plain; charset=UTF-8 Content-Length: 2 x-openstack-request-id: req-9c6f4303-63a7-4f30-8afc-39340658702f Connection: close Vary: Accept-Encoding OK Example failing output for text/plain with *detailed* True: .. code-block:: bash $ curl -i http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 503 Service Unavailable Date: Mon, 16 Mar 2020 23:41:23 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/plain; charset=UTF-8 Content-Length: 36 x-openstack-request-id: req-2cd046cb-3a6c-45e3-921d-5f4a9e65c63e Connection: close Example passing output for text/html with *detailed* True: .. code-block:: bash $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 200 OK Date: Mon, 16 Mar 2020 22:11:54 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/html; charset=UTF-8 Content-Length: 9927 x-openstack-request-id: req-ae7404c9-b183-46dc-bb1b-e5f4e4984a57 Connection: close Vary: Accept-Encoding Healthcheck Status

Server status

Server hostname:
devstack2
Current time:
2020-03-16 22:11:54.320529
Python version:
3.6.9 (default, Nov  7 2019, 10:44:02)
     [GCC 8.3.0]
Platform:
Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic

Garbage collector:

Counts:
(28, 10, 4)
Thresholds:
(700, 10, 10)

Result of 1 checks:

Kind Reason Details
OctaviaDBCheckResult OK

1 greenthread(s) active:

 <...> 

1 thread(s) active:

 <...> 
Example failing output for text/html with *detailed* True: .. code-block:: bash $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck HTTP/1.1 503 Service Unavailable Date: Mon, 16 Mar 2020 23:43:52 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: text/html; charset=UTF-8 Content-Length: 10211 x-openstack-request-id: req-39b65058-6dc3-4069-a2d5-8a9714dba61d Connection: close Healthcheck Status

Server status

Server hostname:
devstack2
Current time:
2020-03-16 23:43:52.411127
Python version:
3.6.9 (default, Nov  7 2019, 10:44:02)
     [GCC 8.3.0]
Platform:
Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic

Garbage collector:

Counts:
(578, 10, 4)
Thresholds:
(700, 10, 10)

Result of 1 checks:

Kind Reason Details
OctaviaDBCheckResult The Octavia database is unavailable. Database health check failed due to: (pymysql.err.OperationalError) (2003, "Can't connect to MySQL server on '127.0.0.1' ([Errno 111] Connection refused)") [SQL: SELECT 1] (Background on this error at: http://sqlalche.me/e/e3q8).

1 greenthread(s) active:

 <...> 

1 thread(s) active:

 <...> 
Example passing output for application/json with *detailed* True: .. code-block:: bash $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck HTTP/1.1 200 OK Date: Mon, 16 Mar 2020 22:05:26 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: application/json Content-Length: 9298 x-openstack-request-id: req-d3913655-6e3f-4086-a252-8bb297ea5fd6 Connection: close { "detailed": true, "gc": { "counts": [ 27, 10, 4 ], "threshold": [ 700, 10, 10 ] }, "greenthreads": [ <...> ], "now": "2020-03-16 22:05:26.431429", "platform": "Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic", "python_version": "3.6.9 (default, Nov 7 2019, 10:44:02) \n[GCC 8.3.0]", "reasons": [ { "class": "OctaviaDBCheckResult", "details": "", "reason": "OK" } ], "threads": [ <...> ] } Example failing output for application/json with *detailed* True: .. code-block:: bash $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck HTTP/1.1 503 Service Unavailable Date: Mon, 16 Mar 2020 23:56:43 GMT Server: Apache/2.4.29 (Ubuntu) Content-Type: application/json Content-Length: 9510 x-openstack-request-id: req-3d62ea04-9bdb-4e19-b218-1a81ff7d7337 Connection: close { "detailed": true, "gc": { "counts": [ 178, 0, 5 ], "threshold": [ 700, 10, 10 ] }, "greenthreads": [ <...> ], "now": "2020-03-16 23:58:23.361209", "platform": "Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic", "python_version": "3.6.9 (default, Nov 7 2019, 10:44:02) \n[GCC 8.3.0]", "reasons": [ { "class": "OctaviaDBCheckResult", "details": "(pymysql.err.OperationalError) (2003, \"Can't connect to MySQL server on '127.0.0.1' ([Errno 111] Connection refused)\")\n(Background on this error at: http://sqlalche.me/e/e3q8)", "reason": "The Octavia database is unavailable." } ], "threads": [ <...> ] } Oslo Healthcheck Plugins ======================== The Octavia API health monitoring endpoint, implemented with Oslo middleware healthcheck, is extensible using optional backend plugins. There are currently plugins provided by the Oslo middleware library and plugins provided by Octavia. **Oslo middleware provided plugins** * `disable_by_file `_ * `disable_by_files_ports `_ **Octavia provided plugins** * `octavia_db_check`_ .. warning:: Some plugins may have long timeouts. It is a best practice to configure your healthcheck query to have connection, read, and/or data timeouts. The appropriate values will be unique to each deployment depending on the cloud performance, number of plugins, etc. Enabling Octavia API Health Monitoring ====================================== To enable the Octavia API health monitoring endpoint, the proper configuration file settings need to be updated and the Octavia API processes need to be restarted. Start by enabling the endpoint: .. code-block:: ini [api_settings] healthcheck_enabled = True When the healthcheck_enabled setting is *False*, queries of the /healthcheck will receive an HTTP 404 Not Found response. You will then need to select the desired monitoring backend plugins: .. code-block:: ini [healthcheck] backends = octavia_db_check .. note:: When no plugins are configured, the behavior of Oslo middleware healthcheck changes. Not only does it not run any tests, it will return 204 results instead of 200. The Octavia API health monitoring endpoint does not require a keystone token for access to allow external load balancers to query the endpoint. For this reason we recommend you restrict access to it on your external load balancer to prevent abuse. As an additional protection, the API will cache results for a configurable period of time. This means that queries to the health monitoring endpoint will return cached results until the refresh interval has expired, at which point the health check plugin will rerun the check. By default, the refresh interval is five seconds. This can be configured by adjusting the healthcheck_refresh_interval setting in the Octavia configuration file: .. code-block:: ini [api_settings] healthcheck_refresh_interval = 5 Optionally you can enable the "detailed" mode in Oslo middleware healthcheck. This will cause Oslo middleware healthcheck to return additional information about the API instance. It will also provide exception details if one was raised during the health check. This setting is False and disabled by default in the Octavia API. .. code-block:: ini [healthcheck] detailed = True .. warning:: Enabling the 'detailed' setting will expose sensitive details about the API process. Do not enabled this unless you are sure it will not pose a **security risk** to your API instances. We highly recommend you do not enable this. Using Octavia API Health Monitoring =================================== The Octavia API health monitoring endpoint can be accessed via the /healthmonitor path on the `Octavia API endpoint `_. For example, if your Octavia (load-balancer) endpoint in keystone is: .. code-block:: bash https://10.21.21.78/load-balancer You would access the Octavia API health monitoring endpoint via: .. code-block:: bash https://10.21.21.78/load-balancer/healthcheck A keystone token is not required to access this endpoint. Octavia Plugins =============== octavia_db_check ---------------- The octavia_db_check plugin validates the API instance has a working connection to the Octavia database. It executes a SQL no-op query, 'SELECT 1;', against the database. .. note:: Many OpenStack services and libraries, such as oslo.db and sqlalchemy, also use the no-op query, 'SELECT 1;' for health checks. The possible octavia_db_check results are: +---------+--------+-------------+--------------------------------------+ | Request | Result | Status Code | "reason" Message | +=========+========+=============+======================================+ | GET | Pass | 200 | OK | +---------+--------+-------------+--------------------------------------+ | HEAD | Pass | 204 | | +---------+--------+-------------+--------------------------------------+ | GET | Fail | 503 | The Octavia database is unavailable. | +---------+--------+-------------+--------------------------------------+ | HEAD | Fail | 503 | | +---------+--------+-------------+--------------------------------------+ When running Oslo middleware healthcheck in "detailed" mode, the "details" field will have additional information about the error encountered, including the exception details if they were available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/index.rst0000664000175000017500000000216500000000000020174 0ustar00zuulzuul00000000000000====================== Octavia Administration ====================== Getting Started --------------- .. toctree:: :maxdepth: 1 ../reference/introduction.rst ../reference/glossary.rst ../contributor/guides/dev-quick-start.rst Installation and Configuration Guides ------------------------------------- .. toctree:: :maxdepth: 1 amphora-image-build guides/certificates.rst ../configuration/configref.rst ../configuration/policy.rst Optional Installation and Configuration Guides ---------------------------------------------- .. toctree:: :maxdepth: 1 providers/index.rst log-offloading.rst api-audit.rst healthcheck.rst flavors.rst apache-httpd.rst Maintanence and Operations -------------------------- .. toctree:: :maxdepth: 1 guides/operator-maintenance.rst octavia-status guides/upgrade.rst Operator Reference ------------------ .. toctree:: :maxdepth: 1 Octavia API Reference ../contributor/api/haproxy-amphora-api.rst .. only:: html Indices and Search ------------------ * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/log-offloading.rst0000664000175000017500000002543400000000000021760 0ustar00zuulzuul00000000000000.. Copyright 2019 Red Hat, Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================== Octavia Amphora Log Offloading ============================== The default logging configuration will store the logs locally, on the amphora filesystem with file rotation. Octavia Amphorae can offload their log files via the syslog protocol to syslog receivers via the load balancer management network (lb-mgmt-net). This allows log aggregation of both administrative logs and also tenant traffic flow logs. The syslog receivers can either be local to the load balancer management network or routable via the load balancer management network. By default any syslog receiver that supports UDP or TCP syslog protocol can be used, however the operator also has the option to create an override rsyslog configuration template to enable other features or protocols their Amphora image may support. This guide will discuss the features of :term:`Amphora` log offloading and how to configure them. Administrative Logs =================== The administrative log offloading feature of the :term:`Amphora` covers all of the system logging inside the :term:`Amphora` except for the tenant flow logs. Tenant flow logs can be sent to and processed by the same syslog receiver used by the administrative logs, but they are configured seperately. All administrative log messages will be sent using the native log format for the application sending the message. Enabling Administrative Log Offloading -------------------------------------- One or more syslog receiver endpoints must be configured in the Octavia configuration file to enable administrative log offloading. The first endpoint will be the primary endpoint to receive the syslog packets. Should the first endpoint become unavailable, the additional endpoints listed will be tried one at a time. .. note:: Secondary syslog endpoints will only be used if the log_protocol is configured for TCP. With the UDP syslog protocol, rsyslog is unable to detect if the primary endpoint has failed. To configure administrative log offloading, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [amphora_agent] admin_log_targets = 192.0.2.1:10514, 2001:db8:1::10:10514 In this example, the primary syslog receiver will be 192.0.2.1 on port 10514. The backup syslog receiver will be 2001:db8:1::10 on port 10514. .. note:: Make sure your syslog receiver endpoints are accessible from the load balancer management network and you have configured the required security group or firewall rules to allow the traffic. These endpoints can be routable addresses from the load balancer management network. The load balancer related administrative logs will be sent using a LOG_LOCAL[0-7] facility. The facility number defaults to 1, but is configurable using the administrative_log_facility setting in the Octavia configuration file. To configure administrative log facility, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [amphora_agent] administrative_log_facility = 1 Forwarding All Administrative Logs ---------------------------------- By default, the Amphorae will only forward load balancer related administrative logs, such as the haproxy admin logs, keepalived, and :term:`Amphora` agent logs. You can optionally configure the Amphorae to send all of the administrative logs from the :term:`Amphora`, such as the kernel, system, and security logs. Even with this setting the tenant flow logs will not be included. You can configure tenant flow log forwarding in the `Tenant Flow Logs`_ section. The load balancer related administrative logs will be sent using the LOG_LOCAL[0-7] configured using the administrative_log_facility setting. All other administrative log messages will use their native syslog facilities. To configure the Amphorae to forward all administrative logs, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [amphora_agent] forward_all_logs = True Tenant Flow Logs ================ Enabling Tenant Flow Log Offloading ----------------------------------- One or more syslog receiver endpoints must be configured in the Octavia configuration file to enable tenant flow log offloading. The first endpoint will be the primary endpoint to receive the syslog packets. Should the first endpoint become unavailable, the additional endpoints listed will be tried one at a time. The endpoints configured for tenant flow log offloading may be the same endpoints as the administrative log offloading configuration. .. warning:: Tenant flow logging can produce a large number of syslog messages depending on how many connections the load balancers are receiving. Tenant flow logging produces one log entry per connection to the load balancer. We recommend you monitor, size, and configure your syslog receivers appropriately based on the expected number of connections your load balancers will be handling. .. note:: Secondary syslog endpoints will only be used if the log_protocol is configured for TCP. With the UDP syslog protocol, rsyslog is unable to detect if the primary endpoint has failed. To configure tenant flow log offloading, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [amphora_agent] tenant_log_targets = 192.0.2.1:10514, 2001:db8:1::10:10514 In this example, the primary syslog receiver will be 192.0.2.1 on port 10514. The backup syslog receiver will be 2001:db8:1::10 on port 10514. .. note:: Make sure your syslog receiver endpoints are accessible from the load balancer management network and you have configured the required security group or firewall rules to allow the traffic. These endpoints can be routable addresses from the load balancer management network. The load balancer related tenant flow logs will be sent using a LOG_LOCAL[0-7] facility. The facility number defaults to 0, but is configurable using the user_log_facility setting in the Octavia configuration file. To configure the tenant flow log facility, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [amphora_agent] user_log_facility = 0 Tenant Flow Log Format ---------------------- The default tenant flow log format is: .. code-block:: project_id loadbalancer_id listener_id client_ip client_port data_time request_string http_status bytes_read bytes_uploaded client_certificate_verify(0 or 1) client_certificate_distinguised_name pool_id member_id processing_time(ms) termination_state Any field that is unknown or not applicable to the connection will have a '-' character in its place. An example log entry when using rsyslog as the syslog receiver is: .. note:: The prefix[1] in this example comes from the rsyslog receiver and is not part of the syslog message from the amphora. [1] "Jun 12 00:44:13 amphora-3e0239c3-5496-4215-b76c-6abbe18de573 haproxy[1644]:" .. code-block:: Jun 12 00:44:13 amphora-3e0239c3-5496-4215-b76c-6abbe18de573 haproxy[1644]: 5408b89aa45b48c69a53dca1aaec58db fd8f23df-960b-4b12-ba62-2b1dff661ee7 261ecfc2-9e8e-4bba-9ec2-3c903459a895 172.24.4.1 41152 12/Jun/2019:00:44:13.030 "GET / HTTP/1.1" 200 76 73 - "" e37e0e04-68a3-435b-876c-cffe4f2138a4 6f2720b3-27dc-4496-9039-1aafe2fee105 4 -- Custom Tenant Flow Log Format ----------------------------- You can optionally specify a custom log format for the tenant flow logs. This string follows the HAProxy log format variables with the exception of the "{{ project_id }}" and "{{ lb_id }}" variables that will be replaced by the Octavia :term:`Amphora` driver. These custom variables are optional. See the HAProxy documentation for `Custom log format `_ variable definitions. To configure a custom log format, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [haproxy_amphora] user_log_format = '{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST %B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt %tsc' Disabling Logging ================= There may be cases where you need to disable logging inside the :term:`Amphora`, such as complying with regulatory standards. Octavia provides multiple options for disabling :term:`Amphora` logging. Disable Local Log Storage ------------------------- This setting stops log entries from being written to the disk inside the :term:`Amphora`. Logs can still be sent via :term:`Amphora` log offloading if log offloading is configured for the Amphorae. Enabling this setting may provide a performance benefit to the load balancer. .. warning:: This feature disables ALL log storage in the :term:`Amphora`, including kernel, system, and security logging. .. note:: If you enable this setting and are not using :term:`Amphora` log offloading, we recommend you also `Disable Tenant Flow Logging`_ to improve load balancing performance. To disable local log storage in the :term:`Amphora`, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [amphora_agent] disable_local_log_storage = True Disable Tenant Flow Logging --------------------------- This setting allows you to disable tenant flow logging irrespective of the other logging configuration settings. It will take precedent over the other settings. When this setting is enabled, no tenant flow (connection) logs will be written to the disk inside the :term:`Amphora` or be sent via the :term:`Amphora` log offloading. .. note:: Disabling tenant flow logging can also improve the load balancing performance of the amphora. Due to the potential performance improvement, we recommend you enable this setting when using the `Disable Local Log Storage`_ setting. To disable tenant flow logging, set the following setting in your Octavia configuration file for all of the controllers and restart them: .. code-block:: ini [haproxy_amphora] connection_logging = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/octavia-status.rst0000664000175000017500000000365600000000000022042 0ustar00zuulzuul00000000000000============== octavia-status ============== ----------------------------------------- CLI interface for Octavia status commands ----------------------------------------- Synopsis ======== :: octavia-status [] Description =========== :program:`octavia-status` is a tool that provides routines for checking the status of a Octavia deployment. Options ======= The standard pattern for executing a :program:`octavia-status` command is:: octavia-status [] Run without arguments to see a list of available command categories:: octavia-status Categories are: * ``upgrade`` Detailed descriptions are below: You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: octavia-status upgrade These sections describe the available categories and arguments for :program:`octavia-status`. Upgrade ~~~~~~~ .. _octavia-status-checks: ``octavia-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. For example, missing or changed configuration options, incompatible object states, or other conditions that could lead to failures while upgrading. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **4.0.0 (Stein)** * Sample check to be filled in with checks as they are added in Stein. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/admin/providers/0000775000175000017500000000000000000000000020344 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/a10.rst0000664000175000017500000000167500000000000021470 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. A10 Networks OpenStack Octavia Driver ===================================== A10 Networks Octavia Driver for Thunder, vThunder and AX Series Appliances. Default provider name: **a10** The driver source: https://github.com/a10networks/a10-octavia/ The documentation: https://github.com/a10networks/a10-octavia/ Where to report issues with the driver: Contact A10 Networks ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/amphora.rst0000664000175000017500000000215200000000000022525 0ustar00zuulzuul00000000000000.. Copyright 2018 Rackspace, US Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Amphora ======= This is the reference driver for Octavia, meaning it is used for testing the Octavia code base. It is an open source, scalable, and highly available load balancing provider. Default provider name: **amphora** The driver package: https://pypi.org/project/octavia/ The driver source: https://opendev.org/openstack/octavia/ The documentation: https://docs.openstack.org/octavia/latest/ Where to report issues with the driver: https://storyboard.openstack.org/#!/project/openstack/octavia ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/amphorav2.rst0000664000175000017500000000222700000000000023000 0ustar00zuulzuul00000000000000.. Copyright 2020 Mirantis Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Amphorav2 ========= This is extension of the reference driver for Octavia. It adopts taskflow jobboard feature and saves task states into the persistence backend, this allows to continue task execution if controller work was interrupted. Default provider name: **amphorav2** The driver package: https://pypi.org/project/octavia/ The driver source: https://opendev.org/openstack/octavia/ The documentation: https://docs.openstack.org/octavia/latest/ Where to report issues with the driver: https://storyboard.openstack.org/#!/project/openstack/octavia ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/f5.rst0000664000175000017500000000164700000000000021420 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. F5 Networks Provider Driver for OpenStack Octavia by SAP SE ============================================================== F5 Networks Provider Driver for OpenStack Octavia provided by SAP SE. Default provider name: **f5** The driver source: https://github.com/sapcc/octavia-f5-provider-driver Where to report issues with the driver: Contact SAP SE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/index.rst0000664000175000017500000000433100000000000022206 0ustar00zuulzuul00000000000000.. Copyright 2018 Rackspace, US Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================== Available Provider Drivers ========================== Octavia supports enabling multiple provider drivers via the Octavia v2 API. Drivers, other than the reference Amphora driver, exist outside of the Octavia repository and are not maintained by the Octavia team. This list is intended to provide a place for operators to discover and find available load balancing provider drivers. This list is a **"best effort"** to keep updated, so please check with your favorite load balancer provider to see if they support OpenStack load balancing. If they don't, make a request for support! .. Note:: The provider drivers listed here may not be maintained by the OpenStack LBaaS (Octavia) team. Please submit bugs for these projects through their respective bug tracking systems. Drivers are installed on all of your Octavia API instances using pip and automatically integrated with Octavia using `setuptools entry points`_. Once installed, operators can enable the provider by adding the provider to the Octavia configuration file `enabled_provider_drivers`_ setting in the [api_settings] section. Be sure to install and enable the provider on all of your Octavia API instances. .. _setuptools entry points: http://setuptools.readthedocs.io/en/latest/pkg_resources.html?#entry-points .. _enabled_provider_drivers: https://docs.openstack.org/octavia/latest/configuration/configref.html#api_settings.enabled_provider_drivers .. include:: a10.rst .. include:: amphora.rst .. include:: amphorav2.rst .. include:: f5.rst .. include:: ovn.rst .. include:: radware.rst .. include:: vmware-nsx.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/ovn.rst0000664000175000017500000000221500000000000021700 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. OVN Octavia Provider Driver =========================== OVN provides virtual networking for Open vSwitch and is a component of the Open vSwitch project. This project provides integration between OpenStack Octavia and OVN. Default provider name: **ovn** The driver package: https://pypi.org/project/ovn-octavia-provider/ The driver source: https://opendev.org/openstack/ovn-octavia-provider The documentation: https://docs.openstack.org/ovn-octavia-provider/latest/ Where to report issues with the driver: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/radware.rst0000664000175000017500000000170600000000000022527 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Radware Provider Driver for OpenStack Octavia ============================================= Radware provider driver for OpenStack Octavia. Default provider name: **radware** The driver package: https://pypi.org/project/radware_octavia_rocky_driver/ The documentation: https://pypi.org/project/radware_octavia_rocky_driver/ Where to report issues with the driver: Contact Radware ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/admin/providers/vmware-nsx.rst0000664000175000017500000000153500000000000023211 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. VMware NSX ========== VMware NSX Octavia Driver. Default provider name: **vmwareedge** The driver package: https://pypi.org/project/vmware-nsx/ The driver source: https://opendev.org/x/vmware-nsx Where to report issues with the driver: https://bugs.launchpad.net/vmware-nsx ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/cli/0000775000175000017500000000000000000000000016006 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/cli/index.rst0000664000175000017500000000053600000000000017653 0ustar00zuulzuul00000000000000============================== Octavia Command Line Interface ============================== Octavia has an OpenStack Client plugin available as the native Command Line Interface (CLI). Please see the `python-octaviaclient documentation `_ for documentation on installing and using the CLI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/conf.py0000664000175000017500000002543300000000000016545 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # Octavia documentation build configuration file, created by # sphinx-quickstart on Tue May 21 17:43:32 2013. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import datetime import os import sys from pydotplus import graphviz import sadisplay import octavia.db.models as models sys.path.insert(0, os.path.abspath('../..')) sys.path.insert(0, os.path.abspath('.')) from tools import create_flow_docs # Generate our flow diagrams create_flow_docs.generate( 'tools/flow-list.txt', 'doc/source/contributor/devref/flow_diagrams') create_flow_docs.generate( 'tools/flow-list-v2.txt', 'doc/source/contributor/devref/flow_diagrams_v2') # Generate entity relationship diagram desc = sadisplay.describe( [getattr(models, attr) for attr in dir(models)], show_methods=True, show_properties=True, show_indexes=True, ) graph = graphviz.graph_from_dot_data(sadisplay.dot(desc).encode('utf-8')) graph.write('contributor/devref/erd.svg', format='svg') # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinxcontrib.blockdiag', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', 'sphinxcontrib.nwdiag', 'sphinx.ext.graphviz', 'sphinx_feature_classification.support_matrix', 'openstackdocstheme', 'oslo_config.sphinxext', 'oslo_policy.sphinxpolicygen', 'sphinxcontrib.apidoc', 'sphinxcontrib.rsvgconverter', ] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2014-2019, OpenStack Octavia Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # Version info from octavia.version import version_info as octavia_version release = octavia_version.release_string() # The short X.Y version. version = octavia_version.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '_build', 'contributor/specs/skeleton.rst', 'contributor/specs/template.rst' ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['octavia.'] # -- Options for man page output ---------------------------------------------- man_pages = [] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} html_theme_options = {'show_other_versions': True} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Octavia-Specsdoc' # -- Options for LaTeX output ------------------------------------------------- # Fix Unicode character for sphinx_feature_classification # Sphinx default latex engine (pdflatex) doesn't know much unicode latex_preamble = r""" \usepackage{newunicodechar} \newunicodechar{✖}{\sffamily X} \setcounter{tocdepth}{2} \authoraddress{\textcopyright %s OpenStack Foundation} """ % datetime.datetime.now().year latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # openany: Skip blank pages in generated PDFs 'extraclassoptions': 'openany,oneside', 'makeindex': '', 'printindex': '', 'preamble': latex_preamble } # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 # Some distros are missing xindy latex_use_xindy = False # Fix missing apostrophe smartquotes_excludes = {'builders': ['latex']} # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [( 'index', 'doc-octavia.tex', u'Octavia Documentation', u'OpenStack Octavia Team', 'manual' )] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [( 'index', 'Octavia-specs', u'Octavia Design Specs', u'OpenStack Octavia Team', 'octavia-specs', 'Design specifications for the Octavia project.', 'Miscellaneous' )] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Octavia Specs' epub_author = u'OpenStack Octavia Team' epub_publisher = u'OpenStack Octavia Team' epub_copyright = u'2014, OpenStack Octavia Team' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. # epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. # epub_exclude_files = [] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # RBAC sample policy file generation policy_generator_config_file = '../../etc/policy/octavia-policy-generator.conf' sample_policy_basename = 'configuration/_static/octavia' repository_name = 'openstack/octavia' bug_project = '908' bug_tag = 'docs' apidoc_output_dir = 'contributor/modules' apidoc_module_dir = '../../octavia' apidoc_excluded_paths = [ 'tests', 'db/migration' ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/configuration/0000775000175000017500000000000000000000000020106 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/configuration/configref.rst0000664000175000017500000000152100000000000022601 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Octavia Configuration Options ============================= .. contents:: Table of Contents :depth: 2 .. show-options:: keystonemiddleware.auth_token octavia oslo.db oslo.log oslo.messaging cotyledon ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/configuration/index.rst0000664000175000017500000000017000000000000021745 0ustar00zuulzuul00000000000000===================== Octavia Configuration ===================== .. toctree:: :maxdepth: 1 configref policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/configuration/policy.rst0000664000175000017500000000466400000000000022151 0ustar00zuulzuul00000000000000================ Octavia Policies ================ The default policy is to not allow access unless the auth_strategy is 'noauth'. Users must be a member of one of the following roles to have access to the load-balancer API: .. glossary:: role:load-balancer_observer User has access to load-balancer read-only APIs. role:load-balancer_global_observer User has access to load-balancer read-only APIs including resources owned by others. role:load-balancer_member User has access to load-balancer read and write APIs. role:load-balancer_quota_admin User is considered an admin for quota APIs only. role:load-balancer_admin User is considered an admin for all load-balnacer APIs including resources owned by others. role:admin User is admin to all APIs. .. note:: 'is_admin:True' is a policy rule that takes into account the auth_strategy == noauth configuration setting. It is equivalent to 'rule:context_is_admin or {auth_strategy == noauth}' if that would be valid syntax. An alternate policy file has been provided in octavia/etc/policy called admin_or_owner-policy.json that removes the load-balancer RBAC role requirement. Please see the README.rst in that directory for more information. Sample File Generation ---------------------- To generate a sample policy.json file from the Octavia defaults, run the oslo policy generation script:: oslopolicy-sample-generator --config-file etc/policy/octavia-policy-generator.conf --output-file policy.json.sample Merged File Generation ---------------------- This will output a policy file which includes all registered policy defaults and all policies configured with a policy file. This file shows the effective policy in use by the project:: oslopolicy-policy-generator --config-file etc/policy/octavia-policy-generator.conf This tool uses the output_file path from the config-file. List Redundant Configurations ----------------------------- This will output a list of matches for policy rules that are defined in a configuration file where the rule does not differ from a registered default rule. These are rules that can be removed from the policy file with no change in effective policy:: oslopolicy-list-redundant --config-file etc/policy/octavia-policy-generator.conf Default Octavia Policies ------------------------ .. literalinclude:: _static/octavia.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/contributor/0000775000175000017500000000000000000000000017611 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/CONSTITUTION.rst0000664000175000017500000000477700000000000022324 0ustar00zuulzuul00000000000000==================== Octavia Constitution ==================== This document defines the guiding principles that project leadership will be following in creating, improving and maintaining the Octavia project. Octavia is an OpenStack project ------------------------------- This means we try to run things the same way other "canonized" OpenStack projects operate from a procedural perspective. This is because we hope that Octavia will eventually become a standard part of any OpenStack deployment. Octavia is as open as OpenStack ------------------------------- Octavia tries to follow the same standards for openness that the OpenStack project also strives to follow: https://wiki.openstack.org/wiki/Open We are committed to open design, development, and community. Octavia is "free" ----------------- We mean that both in the "beer" and in the "speech" sense. That is to say, the reference implementation for Octavia should be made up only of open source components that share the same kind of unencumbered licensing that OpenStack uses. Note that this does not mean we are against having vendors develop products which can replace some of the components within Octavia. (For example, the Octavia VM images might be replaced by a vendor's proprietary VM image.) Rather, it means that: * The reference implementation should always be open source and unencumbered. * We are typically not interested in making design compromises in order to work with a vendor's proprietary product. If a vendor wants to develop a component for Octavia, then the vendor should bend to Octavia's needs, not the other way around. Octavia is a load balancer for large operators ---------------------------------------------- That's not to say that small operators can't use it. (In fact, we expect it to work well for small deployments, too.) But what we mean here is that if in creating, improving or maintaining Octavia we somehow make it unable to meet the needs of a typical large operator (or that operator's users), then we have failed. Octavia follows the best coding and design conventions ------------------------------------------------------ For the most part, Octavia tries to follow the coding standards set forth for the OpenStack project in general: https://docs.openstack.org/hacking/latest More specific additional standards can be found in the HACKING.rst file in the same directory as this constitution. Any exceptions should be well justified and documented. (Comments in or near the breach in coding standards are usually sufficient documentation.) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/HACKING.rst0000664000175000017500000001641500000000000021416 0ustar00zuulzuul00000000000000.. _octavia-style-commandments: Octavia Style Commandments ========================== This project was ultimately spawned from work done on the Neutron project. As such, we tend to follow Neutron conventions regarding coding style. - We follow the OpenStack Style Commandments: https://docs.openstack.org/hacking/latest Octavia Specific Commandments ----------------------------- - [O316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B). - [O318] Change assert(Not)Equal(A, None) or assert(Not)Equal(None, A) by optimal assert like assertIs(Not)None(A). - [O319] Validate that debug level logs are not translated. - [O321] Validate that jsonutils module is used instead of json - [O322] Don't use author tags - [O323] Change assertEqual(True, A) or assertEqual(False, A) to the more specific assertTrue(A) or assertFalse(A) - [O324] Method's default argument shouldn't be mutable - [O338] Change assertEqual(A in B, True), assertEqual(True, A in B), assertEqual(A in B, False) or assertEqual(False, A in B) to the more specific assertIn/NotIn(A, B) - [O339] LOG.warn() is not allowed. Use LOG.warning() - [O340] Don't use xrange() - [O341] Don't translate logs. - [0342] Exception messages should be translated - [O343] Python 3: do not use basestring. - [O344] Python 3: do not use dict.iteritems. - [O345] Usage of Python eventlet module not allowed - [O346] Don't use backslashes for line continuation. - [O347] Taskflow revert methods must have \*\*kwargs. Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. Everything is python -------------------- Although OpenStack apparently allows either python or C++ code, at this time we don't envision needing anything other than python (and standard, supported open source modules) for anything we intend to do in Octavia. Idempotency ----------- With as much as is going on inside Octavia, its likely that certain messages and commands will be repeatedly processed. It's important that this doesn't break the functionality of the load balancing service. Therefore, as much as possible, algorithms and interfaces should be made as idempotent as possible. Centralize intelligence, de-centralize workload ----------------------------------------------- This means that tasks which need to be done relatively infrequently but require either additional knowledge about the state of other components in the Octavia system, advanced logic behind decisions, or otherwise a high degree of intelligence should be done by centralized components (ex. controllers) within the Octavia system. Examples of this might include: * Generating haproxy configuration files * Managing the lifecycle of Octavia amphorae * Moving a loadbalancer instance from one Octavia amphora to another. On the other hand, tasks done extremely often, or which entail a significant load on the system should be pushed as far out to the most horizontally scalable components as possible. Examples of this might include: * Serving actual client requests to end-users (ie. running haproxy) * Monitoring pool members for failure and sending notifications about this * Processing log files There will often be a balance that needs to be struck between these two design considerations for any given task for which an algorithm needs to be designed. In considering how to strike this balance, always consider the conditions that will be present in a large operator environment. Also, as a secondary benefit of centralizing intelligence, minor feature additions and bugfixes can often be accomplished in a large operator environment without having to touch every Octavia amphora running in said environment. All APIs are versioned ---------------------- This includes "internal" APIs between Octavia components. Experience coding in the Neutron LBaaS project has taught us that in a large project with many heterogeneous parts, throughout the lifecycle of this project, different parts will evolve at different rates. It is important that these components are allowed to do so without hindering or being hindered by parallel development in other components. It is also likely that in very large deployments, there might be tens- or hundreds-of-thousands of individual instances of a given component deployed (most likely, the Octavia amphorae). It is unreasonable to expect a large operator to update all of these components at once. Therefore it is likely that for a significant amount of time during a roll-out of a new version, both the old and new versions of a given component must be able to be controlled or otherwise interfaced with by the new components. Both of the above considerations can be allowed for if we use versioning of APIs where components interact with each other. Octavia must also keep in mind Neutron LBaaS API versions. Octavia must have the ability to support multiple simultaneous Neutron LBaaS API versions in an effort to allow for Neutron LBaaS API deprecation of URIs. The rationale is that Neutron LBaaS API users should have the ability to transition from one version to the next easily. Scalability and resilience are as important as functionality ------------------------------------------------------------ Octavia is meant to be an *operator scale* load balancer. As such, it's usually not enough just to get something working: It also needs to be scalable. For most components, "scalable" implies horizontally scalable. In any large operational environment, resilience to failures is a necessity. Practically speaking, this means that all components of the system that make up Octavia should be monitored in one way or another, and that where possible automatic recovery from the most common kinds of failures should become a standard feature. Where automatic recovery is not an option, then some form of notification about the failure should be implemented. Avoid premature optimization ---------------------------- Understand that being "high performance" is often not the same thing as being "scalable." First get the thing to work in an intelligent way. Only worry about making it fast if speed becomes an issue. Don't repeat yourself --------------------- Octavia strives to follow DRY principles. There should be one source of truth, and repetition of code should be avoided. Security is not an afterthought ------------------------------- The load balancer is often both the most visible public interface to a given user application, but load balancers themselves often have direct access to sensitive components and data within the application environment. Security bugs will happen, but in general we should not approve designs which have known significant security problems, or which could be made more secure by better design. Octavia should follow industry standards ---------------------------------------- By "industry standards" we either mean RFCs or well-established best practices. We are generally not interested in defining new standards if a prior open standard already exists. We should also avoid doing things which directly or indirectly contradict established standards. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/contributor/api/0000775000175000017500000000000000000000000020362 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/api/haproxy-amphora-api.rst0000664000175000017500000006704500000000000025016 0ustar00zuulzuul00000000000000=========================== Octavia HAProxy Amphora API =========================== Introduction ============ This document describes the API interface between the reference haproxy driver and its corresponding haproxy-based amphorae. Octavia reference haproxy amphorae use a web service API for configuration and control. This API should be secured through the use of TLS encryption as well as bi-directional verification of client- and server-side certificates. (The exact process for generating and distributing these certificates should be covered in another document.) In addition to the web service configuration and control interface, the amphorae may use an HMAC-signed UDP protocol for communicating regular, less- vital information to the controller (ex. statistics updates and health checks). Information on this will also be covered in another document. If a given loadbalancer is being serviced by multiple haproxy amphorae at the same time, configuration and control actions should be made on all these amphorae at approximately the same time. (Amphorae do not communicate directly with each other, except in an active-standby topology, and then this communication is limited to fail-over protocols.) .. contents:: Versioning ---------- All Octavia APIs (including internal APIs like this one) are versioned. For the purposes of this document, the initial version of this API shall be 1.0. Response codes -------------- Typical response codes are: * 200 OK - Operation was completed as requested. * 201 Created - Operation successfully resulted in the creation / processing of a file. * 202 Accepted - Command was accepted but is not completed. (Note that this is used for asynchronous processing.) * 400 Bad Request - API handler was unable to complete request. * 401 Unauthorized - Authentication of the client certificate failed. * 404 Not Found - The requested file was not found. * 500 Internal Server Error - Usually indicates a permissions problem * 503 Service Unavailable - Usually indicates a change to a listener was attempted during a transition of amphora topology. A note about storing state -------------------------- In the below API, it will become apparent that at times the amphora will need to be aware of the state of things (topology-wise, or simply in terms running processes on the amphora). When it comes to storing or gathering this data, we should generally prefer to try to resolve these concerns in the following order. Note also that not every kind of state data will use all of the steps in this list: 1. Get state information by querying running processes (ex. parsing haproxy status page or querying iptables counters, etc.) 2. Get state by consulting on-disk cache generated by querying running processes. (In the case where state information is relatively expensive to collect-- eg. package version listings.) 3. Get state by consulting stored configuration data as sent by the controller. (ex. amphora topology, haproxy configuration or TLS certificate data) 4. Get state by querying a controller API (not described here). In no case should the amphora assume it ever has direct access to the Octavia database. Also, sensitive data (like TLS certificates) should be stored in a secure way (ex. memory filesystem). API === Get amphora info ---------------- * **URL:** /info * **Method:** GET * **URL params:** none * **Data params:** none * **Success Response:** * Code: 200 * Content: JSON formatted listing of several basic amphora data. * **Error Response:** * none JSON Response attributes: * *hostname* - amphora hostname * *uuid* - amphora UUID * *haproxy_version* - Version of the haproxy installed * *api_version* - Version of haproxy amphora API in use **Notes:** The data in this request is used by the controller for determining the amphora and API version numbers. It's also worth noting that this is the only API command that doesn't have a version string prepended to it. **Examples:** * Success code 200: :: { 'hostname': 'octavia-haproxy-img-00328.local', 'uuid': '6e2bc8a0-2548-4fb7-a5f0-fb1ef4a696ce', 'haproxy_version': '1.5.11', 'api_version': '0.1', } Get amphora details ------------------- * **URL:** /1.0/details * **Method:** GET * **URL params:** none * **Data params:** none * **Success Response:** * Code: 200 * Content: JSON formatted listing of various amphora statistics. * **Error Response:** * none JSON Response attributes: * *hostname* - amphora hostname * *uuid* - amphora UUID * *haproxy_version* - Version of the haproxy installed * *api_version* - Version of haproxy amphora API/agent in use * *network_tx* - Current total outbound bandwidth in bytes/sec (30-second snapshot) * *network_rx* - Current total inbound bandwidth in bytes/sec (30-second snapshot) * *active* - Boolean (is amphora in an "active" role?) * *haproxy_count* - Number of running haproxy processes * *cpu* - list of percent CPU usage broken down into: * total * user * system * soft_irq * *memory* - memory usage in kilobytes broken down into: * total * free * available * buffers * cached * swap_used * shared * slab * committed_as * *disk* - disk usage in kilobytes for root filesystem, listed as: * used * available * *load* - System load (list) * *topology* - One of SINGLE, ACTIVE-STANDBY, ACTIVE-ACTIVE * *topology_status* - One of OK, TOPOLOGY-CHANGE * *listeners* - list of listener UUIDs being serviced by this amphora * *packages* - list of load-balancing related packages installed with versions (eg. OpenSSL, haproxy, nginx, etc.) **Notes:** The data in this request is meant to provide intelligence for an auto-scaling orchestration controller (heat) in order to determine whether additional (or fewer) virtual amphorae are necessary to handle load. As such, we may add additional parameters to the JSON listing above if they prove to be useful for making these decisions. The data in this request is also used by the controller for determining overall health of the amphora, currently-configured topology and role, etc. **Examples** * Success code 200: :: { 'hostname': 'octavia-haproxy-img-00328.local', 'uuid': '6e2bc8a0-2548-4fb7-a5f0-fb1ef4a696ce', 'haproxy_version': '1.5.11', 'api_version': '0.1', 'networks': { 'eth0': { 'network_tx': 3300138, 'network_rx': 982001, }} 'active': 'TRUE', 'haproxy_count': 3, 'cpu':{ 'total': 0.43, 'user': 0.30, 'system': 0.05, 'soft_irq': 0.08, }, 'memory':{ 'total': 4087402, 'free': 760656, 'available': 2655901, 'buffers': 90980, 'cached': 1830143, 'swap_used': 943, 'shared': 105792, 'slab': 158819, 'committed_as': 2643480, }, 'disk':{ 'used': 1234567, 'available': 5242880, }, 'load': [0.50, 0.45, 0.47], 'tolopogy': 'SINGLE', 'topology_status': 'OK', 'listeners':[ '02d0da8d-fc65-4bc4-bc46-95cadb2315d2', '98e706a7-d22c-422f-9632-499fd83e12c0', ], 'packages':[ {'haproxy': '1.5.1'}, {'bash': '4.3.23'}, {'lighttpd': '1.4.33-1'}, {'openssl': '1.0.1f'}, ], } Get interface ------------- * **URL:** /1.0/interface/*:ip* * **Method:** GET * **URL params:** * *:ip* = the ip address to find the interface name * **Data params:** none * **Success Response:** * Code: 200 * Content: OK * Content: JSON formatted interface * **Error Response:** * Code: 400 * Content: Bad IP address version * Code: 404 * Content: Error interface not found for IP address * **Response:** | OK | eth1 **Examples:** * Success code 200: :: GET URL: https://octavia-haproxy-img-00328.local/1.0/interface/10.0.0.1 JSON Response: { 'message': 'OK', 'interface': 'eth1' } * Error code 404: :: GET URL: https://octavia-haproxy-img-00328.local/1.0/interface/10.5.0.1 JSON Response: { 'message': 'Error interface not found for IP address', } * Error code 404: :: GET URL: https://octavia-haproxy-img-00328.local/1.0/interface/10.6.0.1.1 JSON Response: { 'message': 'Bad IP address version', } Get all listeners' statuses --------------------------- * **URL:** /1.0/listeners * **Method:** GET * **URL params:** none * **Data params:** none * **Success Response:** * Code: 200 * Content: JSON-formatted listing of each listener's status * **Error Response:** * none JSON Response attributes: Note that the command will return an array of *all* listeners' statuses. Each listener status contains the following attributes: * *status* - One of the operational status: ACTIVE, STOPPED, ERROR - future versions might support provisioning status: PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED * *uuid* - Listener UUID * *type* - One of: TCP, HTTP, TERMINATED_HTTPS **Notes:** Note that this returns a status if: the pid file exists, the stats socket exists, or an haproxy configuration is present (not just if there is a valid haproxy configuration). **Examples** * Success code 200: :: [{ 'status': 'ACTIVE', 'uuid': 'e2dfddc0-5b9e-11e4-8ed6-0800200c9a66', 'type': 'HTTP', }, { 'status': 'STOPPED', 'uuid': '19d45130-5b9f-11e4-8ed6-0800200c9a66', 'type': 'TERMINATED_HTTPS', }] Start or Stop a load balancer ----------------------------- * **URL:** /1.0/loadbalancer/*:object_id*/*:action* * **Method:** PUT * **URL params:** * *:object_id* = Object UUID * *:action* = One of: start, stop, reload * **Data params:** none * **Success Response:** * Code: 202 * Content: OK * *(Also contains preliminary results of attempt to start / stop / soft \ restart (reload) the haproxy daemon)* * **Error Response:** * Code: 400 * Content: Invalid request * Code: 404 * Content: Listener Not Found * Code: 500 * Content: Error starting / stopping / reload_config haproxy * *(Also contains error output from attempt to start / stop / soft \ restart (reload) haproxy)* * Code: 503 * Content: Topology transition in progress * **Response:** | OK | Configuration file is valid | haproxy daemon for 85e2111b-29c4-44be-94f3-e72045805801 started (pid 32428) **Examples:** * Success code 201: :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/start JSON Response: { 'message': 'OK', 'details': 'Configuration file is valid\nhaproxy daemon for 85e2111b-29c4-44be-94f3-e72045805801 started', } * Error code 400: :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/BAD_TEST_DATA JSON Response: { 'message': 'Invalid Request', 'details': 'Unknown action: BAD_TEST_DATA', } * Error code 404: :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/04bff5c3-5862-4a13-b9e3-9b440d0ed50a/stop JSON Response: { 'message': 'Listener Not Found', 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', } * Error code 500: :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/stop Response: { 'message': 'Error stopping haproxy', 'details': 'haproxy process with PID 3352 not found', } * Error code 503: :: Response: { 'message': 'Topology transition in progress', } Delete a listener ----------------- * **URL:** /1.0/listeners/*:listener* * **Method:** DELETE * **URL params:** * *:listener* = Listener UUID * **Data params:** none * **Success Response:** * Code: 200 * Content: OK * **Error Response:** * Code: 404 * Content: Not Found * Code: 503 * Content: Topology transition in progress * **Response:** | OK * **Implied actions:** * Stop listener * Delete IPs, iptables accounting rules, etc. from this amphora if they're no longer in use. * Clean up listener configuration directory. * Delete listener's SSL certificates * Clean up logs (ship final logs to logging destination if configured) * Clean up stats socket. **Examples** * Success code 200: :: DELETE URL: https://octavia-haproxy-img-00328.local/1.0/listeners/04bff5c3-5862-4a13-b9e3-9b440d0ed50a JSON Response: { 'message': 'OK' } * Error code 404: :: DELETE URL: https://octavia-haproxy-img-00328.local/1.0/listeners/04bff5c3-5862-4a13-b9e3-9b440d0ed50a JSON Response: { 'message': 'Listener Not Found', 'details': 'No listener with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', } * Error code 503: :: Response: { 'message': 'Topology transition in progress', } Upload SSL certificate PEM file ------------------------------- * **URL:** /1.0/loadbalancer/*:loadbalancer_id*/certificates/*:filename.pem* * **Method:** PUT * **URL params:** * *:loadbalancer_id* = Load balancer UUID * *:filename* = PEM filename (see notes below for naming convention) * **Data params:** Certificate data. (PEM file should be a concatenation of unencrypted RSA key, certificate and chain, in that order) * **Success Response:** * Code: 201 * Content: OK * **Error Response:** * Code: 400 * Content: No certificate found * Code: 400 * Content: No RSA key found * Code: 400 * Content: Certificate and key do not match * Code: 404 * Content: Not Found * Code: 503 * Content: Topology transition in progress * **Response:** | OK **Notes:** * filename.pem should match the primary CN for which the certificate is valid. All-caps WILDCARD should be used to replace an asterisk in a wildcard certificate (eg. a CN of '\*.example.com' should have a filename of 'WILDCARD.example.com.pem'). Filenames must also have the .pem extension. * In order for the new certificate to become effective the haproxy needs to be explicitly restarted **Examples:** * Success code 201: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem (Put data should contain the certificate information, concatenated as described above) JSON Response: { 'message': 'OK' } * Error code 400: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem (If PUT data does not contain a certificate) JSON Response: { 'message': 'No certificate found' } * Error code 400: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem (If PUT data does not contain an RSA key) JSON Response: { 'message': 'No RSA key found' } * Error code 400: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem (If the first certificate and the RSA key do not have the same modulus.) JSON Response: { 'message': 'Certificate and key do not match' } * Error code 404: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem JSON Response: { 'message': 'Listener Not Found', 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', } * Error code 503: :: Response: { 'message': 'Topology transition in progress', } Get SSL certificate md5sum -------------------------- * **URL:** /1.0/loadbalancer/*:loadbalancer_id*/certificates/*:filename.pem* * **Method:** GET * **URL params:** * *:loadbalancer_id* = Load balancer UUID * *:filename* = PEM filename (see notes below for naming convention) * **Data params:** none * **Success Response:** * Code: 200 * Content: PEM file md5sum * **Error Response:** * Code: 404 * Content: Not Found * **Response:** | * **Implied actions:** none **Notes:** The md5sum is the sum from the raw certificate data as stored on the amphora (which will usually include the RSA key, certificate and chain concatenated together). Note that we don't return any actual raw certificate data as the controller should already know this information, and unnecessarily disclosing it over the wire from the amphora is a security risk. **Examples:** * Success code 200: :: JSON response: { 'md5sum': 'd8f6629d5e3c6852fa764fb3f04f2ffd', } * Error code 404: :: JSON Response: { 'message': 'Listener Not Found', 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', } * Error code 404: :: JSON Response: { 'message': 'Certificate Not Found', 'details': 'No certificate with file name: www.example.com.pem', } Delete SSL certificate PEM file ------------------------------- * **URL:** /1.0/loadbalancer/*:loadbalancer_id*/certificates/*:filename.pem* * **Method:** DELETE * **URL params:** * *:loadbalancer_id* = Load balancer UUID * *:filename* = PEM filename (see notes below for naming convention) * **Data params:** none * **Success Response:** * Code: 200 * Content: OK * **Error Response:** * Code: 404 * Content: Not found * Code: 503 * Content: Topology transition in progress * **Implied actions:** * Clean up listener configuration directory if it's now empty. **Examples:** * Success code 200: :: DELETE URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem JSON Response: { 'message': 'OK' } * Error code 404: :: DELETE URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem JSON Response: { 'message': 'Certificate Not Found', 'details': 'No certificate with file name: www.example.com.pem', } * Error code 503: :: Response: { 'message': 'Topology transition in progress', } Upload load balancer haproxy configuration ------------------------------------------ * **URL:** /1.0/loadbalancer/*:amphora_id*/*:loadbalancer_id*/haproxy * **Method:** PUT * **URL params:** * *:loadbalancer_id* = Load Balancer UUID * *:amphora_id* = Amphora UUID * **Data params:** haproxy configuration file for the listener * **Success Response:** * Code: 201 * Content: OK * **Error Response:** * Code: 400 * Content: Invalid configuration * *(Also includes error output from configuration check command)* * Code: 503 * Content: Topology transition in progress * **Response:** | OK | Configuration file is valid * **Implied actions:** * Do a syntax check on haproxy configuration file prior to an attempt to run it. * Add resources needed for stats, logs, and connectivity **Notes:** The uploaded configuration file should be a complete and syntactically-correct haproxy config. The amphora does not have intelligence to generate these itself and has only rudimentary ability to parse certain features out of the configuration file (like bind addresses and ports for purposes of setting up stats, and specially formatted comments meant to indicate pools and members that will be parsed out of the haproxy daemon status interface for tracking health and stats). **Examples:** * Success code 201: :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/d459b1c8-54b0-4030-9bec-4f449e73b1ef/85e2111b-29c4-44be-94f3-e72045805801/haproxy (Upload PUT data should be a raw haproxy.conf file.) JSON Response: { 'message': 'OK' } * Error code 400: :: JSON Response: { 'message': 'Invalid request', 'details': '[ALERT] 300/013045 (28236) : parsing [haproxy.cfg:4]: unknown keyword 'BAD_LINE' out of section.\n[ALERT] 300/013045 (28236) : Error(s) found in configuration file : haproxy.cfg\n[ALERT] 300/013045 (28236) : Fatal errors found in configuration.', } * Error code 503: :: Response: { 'message': 'Topology transition in progress', } Get loadbalancer haproxy configuration -------------------------------------- * **URL:** /1.0/loadbalancer/*:loadbalancer_id*/haproxy * **Method:** GET * **URL params:** * *:loadbalancer_id* = Load balancer UUID * **Data params:** none * **Success Response:** * Code: 200 * Content: haproxy configuration file for the listener * **Error Response:** * Code: 404 * Content: Not found * **Response:** | # Config file for 85e2111b-29c4-44be-94f3-e72045805801 | (cut for brevity) * **Implied actions:** none **Examples:** * Success code 200: :: GET URL: https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/haproxy Response is the raw haproxy.cfg: # Config file for 85e2111b-29c4-44be-94f3-e72045805801 (cut for brevity) * Error code 404: :: JSON Response: { 'message': 'Loadbalancer Not Found', 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', } Plug VIP -------- * **URL:** /1.0/plug/vip/*:ip* * **Method:** Post * **URL params:** * *:ip* = the vip's ip address * **Data params:** * *subnet_cidr*: The vip subnet in cidr notation * *gateway*: The vip subnet gateway address * *mac_address*: The mac address of the interface to plug * **Success Response:** * Code: 202 * Content: OK * **Error Response:** * Code: 400 * Content: Invalid IP * Content: Invalid subnet information * Code: 404 * Content: No suitable network interface found * Code: 500 * Content: Error plugging VIP * (Also contains error output from the ip up command) * Code: 503 * Content: Topology transition in progress * **Response:** | OK | VIP ip plugged on interface * **Implied actions:** * Look for an interface marked as down (recently added port) * Assign VIP * Bring that interface up **Examples:** * Success code 202: :: POST URL: https://octavia-haproxy-img-00328.local/1.0/plug/vip/203.0.113.2 JSON POST parameters: { 'subnet_cidr': '203.0.113.0/24', 'gateway': '203.0.113.1', 'mac_address': '78:31:c1:ce:0b:3c' } JSON Response: { 'message': 'OK', 'details': 'VIP 203.0.113.2 plugged on interface eth1' } * Error code 400: :: JSON Response: { 'message': 'Invalid VIP', } * Error code 404: :: JSON Response: { 'message': 'No suitable network interface found', } Plug Network ------------ * **URL:** /1.0/plug/network/ * **Method:** POST * **URL params:** none * **Data params:** * *mac_address*: The mac address of the interface to plug * **Success Response:** * Code: 202 * Content: OK * **Error Response:** * Code: 404 * Content: No suitable network interface found * Code: 500 * Content: Error plugging Port * (Also contains error output from the ip up command) * Code: 503 * Content: Topology transition in progress * **Response:** | OK | Plugged interface **Examples:** * Success code 202: :: POST URL: https://octavia-haproxy-img-00328.local/1.0/plug/network/ JSON POST parameters: { 'mac_address': '78:31:c1:ce:0b:3c' } JSON Response: { 'message': 'OK', 'details': 'Plugged interface eth1' } * Error code 404: :: JSON Response: { 'message': 'No suitable network interface found', } Upload SSL server certificate PEM file for Controller Communication ------------------------------------------------------------------- * **URL:** /1.0/certificate * **Method:** PUT * **Data params:** Certificate data. (PEM file should be a concatenation of unencrypted RSA key, certificate and chain, in that order) * **Success Response:** * Code: 202 * Content: OK * **Error Response:** * Code: 400 * Content: No certificate found * Code: 400 * Content: No RSA key found * Code: 400 * Content: Certificate and key do not match * **Response:** | OK **Notes:** Since certificates might be valid for a time smaller than the amphora is in existence this add a way to rotate them. Once the certificate is uploaded the agent is being recycled so depending on the implementation the service might not be available for some time. **Examples:** * Success code 202: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/certificate (Put data should contain the certificate information, concatenated as described above) JSON Response: { 'message': 'OK' } * Error code 400: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/certificates (If PUT data does not contain a certificate) JSON Response: { 'message': 'No certificate found' } * Error code 400: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/certificate (If PUT data does not contain an RSA key) JSON Response: { 'message': 'No RSA key found' } * Error code 400: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/certificate (If the first certificate and the RSA key do not have the same modulus.) JSON Response: { 'message': 'Certificate and key do not match' } Upload keepalived configuration ------------------------------- * **URL:** /1.0/vrrp/upload * **Method:** PUT * **URL params:** none * **Data params:** none * **Success Response:** * Code: 200 * Content: OK * **Error Response:** * Code: 500 * Content: Failed to upload keepalived configuration. * **Response:** OK **Examples:** * Success code 200: :: PUT URI: https://octavia-haproxy-img-00328.local/1.0/vrrp/upload JSON Response: { 'message': 'OK' } Start, Stop, or Reload keepalived --------------------------------- * **URL:** /1.0/vrrp/*:action* * **Method:** PUT * **URL params:** * *:action* = One of: start, stop, reload * **Data params:** none * **Success Response:** * Code: 202 * Content: OK * **Error Response:** * Code: 400 * Content: Invalid Request * Code: 500 * Content: Failed to start / stop / reload keepalived service: * *(Also contains error output from attempt to start / stop / \ reload keepalived)* * **Response:** | OK | keepalived started **Examples:** * Success code 202: :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/vrrp/start JSON Response: { 'message': 'OK', 'details': 'keepalived started', } * Error code: 400 :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/vrrp/BAD_TEST_DATA JSON Response: { 'message': 'Invalid Request', 'details': 'Unknown action: BAD_TEST_DATA', } * Error code: 500 :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/vrrp/stop JSON Response: { 'message': 'Failed to stop keepalived service: keeepalived process with PID 3352 not found', 'details': 'keeepalived process with PID 3352 not found', } Update the amphora agent configuration -------------------------------------- * **URL:** /1.0/config * **Method:** PUT * **Data params:** A amphora-agent configuration file * **Success Response:** * Code: 202 * Content: OK * **Error Response:** * Code: 500 * message: Unable to update amphora-agent configuration. * details: *(The exception details)* * **Response:** | OK * **Implied actions:** * The running amphora-agent configuration file is mutated. **Notes:** Only options that are marked mutable in the oslo configuration will be updated. **Examples:** * Success code 202: :: PUT URL: https://octavia-haproxy-img-00328.local/1.0/config (Upload PUT data should be a raw amphora-agent.conf file.) JSON Response: { 'message': 'OK' } * Error code 500: :: JSON Response: { 'message': 'Unable to update amphora-agent configuration.', 'details': *(The exception output)*, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/contributing.rst0000664000175000017500000001743600000000000023065 0ustar00zuulzuul00000000000000So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Octavia. Communication ~~~~~~~~~~~~~ IRC People working on the Octavia project may be found in the ``#openstack-lbaas`` channel on Freenode during working hours in their timezone. The channel is logged, so if you ask a question when no one is around, you can check the log to see if it's been answered: http://eavesdrop.openstack.org/irclogs/%23openstack-lbaas/ Weekly Meeting The Octavia team meets weekly on freenode IRC. Please see the OpenStack meetings page for the current meeting details and ICS file: http://eavesdrop.openstack.org/#Octavia_Meeting Meetings are logged: http://eavesdrop.openstack.org/meetings/octavia/ Mailing List We use the openstack-discuss@lists.openstack.org mailing list for asynchronous discussions or to communicate with other OpenStack teams. Use the prefix ``[octavia]`` in your subject line (it's a high-volume list, so most people use email filters). More information about the mailing list, including how to subscribe and read the archives, can be found at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss Virtual Meet-ups From time to time, the Octavia project will have video meetings to address topics not easily covered by the above methods. These are announced well in advance at the weekly meeting and on the mailing list. Physical Meet-ups The Octavia project usually has a presence at the OpenDev/OpenStack Project Team Gathering that takes place at the beginning of each development cycle. Planning happens on an etherpad whose URL is announced at the weekly meetings and on the mailing list. Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ The octavia-core team is an active group of contributors who are responsible for directing and maintaining the Octavia project. As a new contributor, your interaction with this group will be mostly through code reviews, because only members of octavia-core can approve a code change to be merged into the code repository. .. note:: Although your contribution will require reviews by members of octavia-core, these aren't the only people whose reviews matter. Anyone with a gerrit account can post reviews, so you can ask other developers you know to review your code ... and you can review theirs. (A good way to learn your way around the codebase is to review other people's patches.) If you're thinking, "I'm new at this, how can I possibly provide a helpful review?", take a look at `How to Review Changes the OpenStack Way `_. There are also some Octavia project specific reviewing guidelines in the :ref:`octavia-style-commandments` section of the Octavia Contributor Guide. You can learn more about the role of core reviewers in the OpenStack governance documentation: https://docs.openstack.org/contributors/common/governance.html#core-reviewer The membership list of octavia-core is maintained in gerrit: https://review.opendev.org/#/admin/groups/370,members You can also find the members of the octavia-core team at the Octavia weekly meetings. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ The Octavia team use both Request For Enhancement (RFE) and Specifications (specs) processes for new features. RFE When a feature being proposed is easy to understand and will have limited scope, the requester will create an RFE in Storyboard. This is a story that includes the tag **[RFE]** in the subject prefix and has the "**rfe**" tag added to the story. Once an RFE story is created, a core reviewer or the Project Team Lead (PTL) will approved the RFE by adding the "**rfe-approved**" tag. This signals that the core team understands the feature being proposed and enough detail has been provided to make sure the core team understands the goal of the change. specs If the new feature is a major change or additon to Octavia that will need a detailed design to be successful, the Octavia team requires a specification (spec) proposal be submitted as a patch. Octavia specification documents are stored in the /octavia/specs directory in the main Octavia git repository: https://opendev.org/openstack/octavia/src/branch/master/specs This directory includes a `template.rst `_ file that includes instructions for creating a new Octavia specification. These specification documents are then rendered and included in the `Project Specifications `_ section of the Octavia Contributor Guide. Feel free to ask in ``#openstack-lbaas`` or at the weekly meeting if you have an idea you want to develop and you're not sure whether it requires an RFE or a specification. The Octavia project observes the OpenStack-wide deadlines, for example, final release of non-client libraries (octavia-lib), final release for client libraries (python-octaviaclient), feature freeze, etc. These are noted and explained on the release schedule for the current development cycle available at: https://releases.openstack.org/ Task Tracking ~~~~~~~~~~~~~ We track our tasks in `Storyboard `_. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag. When you start working on a bug, make sure you assign it to yourself. Otherwise someone else may also start working on it, and we don't want to duplicate efforts. Also, if you find a bug in the code and want to post a fix, make sure you file a bug (and assign it to yourself!) just in case someone else comes across the problem in the meantime. Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so on `Storyboard `_. Please remember to include the following information: * The version of Octavia and OpenStack you observed the issue in. * Steps to reproduce. * Expected behavior. * Observed behavior. * The log snippet that contains any error information. Please include the lines directly before the error message(s) as they provide context for the error. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ The Octavia project policy is that a patch must have two +2s reviews from the core reviewers before it can be merged. Patches for Octavia projects must include unit and functional tests that cover the new code. Octavia projects include the "openstack-tox-cover" testing job to help identify test coverage gaps in a patch. This can also be run locally by running "tox -e cover". In addition, some changes may require a release note. Any patch that changes functionality, adds functionality, or addresses a significant bug should have a release note. Release notes can be created using the "reno" tool by running "reno new ". Keep in mind that the best way to make sure your patches are reviewed in a timely manner is to review other people's patches. We're engaged in a cooperative enterprise here. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ All common PTL duties are enumerated in the `PTL guide `_. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3222165 octavia-6.2.2/doc/source/contributor/design/0000775000175000017500000000000000000000000021062 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3582165 octavia-6.2.2/doc/source/contributor/design/version0.5/0000775000175000017500000000000000000000000022772 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/design/version0.5/component-design.rst0000664000175000017500000005734000000000000027006 0ustar00zuulzuul00000000000000============================= Octavia v0.5 Component Design ============================= Please refer to the following diagram of the Octavia v0.5 components: .. graphviz:: v0.5-component-design.dot This milestone release of Octavia concentrates on making the service delivery scalable (though individual listeners are not horizontally scalable at this stage), getting API and other interfaces between major components correct, without worrying about making the command and control layer scalable. Note that this design is not yet "operator grade" but is a good first step to achieving operator grade (which will occur with version 1 of Octavia). ================ LBaaS Components ================ The entities in this section describe components that are part of the Neutron LBaaS project, with which Octavia interfaces to deliver load balancing services. USER API HANDLER ---------------- This is the front-end that users (and user GUIs or what have you) talk to manipulate load balancing services. **Notes:** * All implementation details are hidden from the user in this interface * Performs a few simple sanity checks on user-supplied data, but otherwise looks to a driver provide more detail around whether what the user is asking for is possible on the driver's implementation. * Any functionality that the user asks for that their back-end flavor / driver doesn't support will be met with an error when the user attempts to configure services this way. (There may be multiple kinds of errors: "incomplete configuration" would be non-fatal and allow DB objects to be created / altered. "incompatible configuration" would be fatal and disallow DB objects from being created / associations made.) Examples of this include: UDP protocol for a listener on a driver/flavor that uses only haproxy as its back-end. * Drivers should also be able to return 'out of resources' or 'some other error occurred' errors (hopefully with helpful error messages). * This interface is stateless, though drivers may keep state information in a database. In any case, this interface should be highly scalable. * Talks some "intermediate driver interface" with the driver. This takes the form of python objects passed directly within the python code to the driver. ========================= LBaaS / Octavia Crossover ========================= The entities in this section are "glue" components which allow Octavia to interface with other services in the OpenStack environment. The idea here is that we want Octavia to be as loosely-coupled as possible with those services with which it must interact in order to keep these interfaces as clean as possible. Initially, all the components in this section will be entirely under the purview of the Octavia project. Over time some of these components might be eliminated entirely, or reduced in scope as these third-party services evolve and increase in cleanly-consumable functionality. DRIVER ------ This is the part of the load balancing service that actually interfaces between the (sanitized) user and operator configuration and the back-end load balancing appliances or other "service providing entity." **Notes:** * Configuration of the driver is handled via service profile definitions in association with the Neutron flavor framework. Specifically, a given flavor has service profiles associated with it, and service profiles which specify the Octavia driver will include meta-data (in the form of JSON configuration) which is used by the driver to define implementation specifics (for example, HA configuration and other details). * Driver will be loaded by the daemon that does the user API and operator API. It is not, in and of itself, its own daemon, though a given vendor's back-end may contain its own daemons or other services that the driver interfaces with. * It is thought that the driver front-end should be stateless in order to make it horizontally scalable and to preserves the statelessness of the user and operator API handlers. Note that the driver may interface with back-end components which need not be stateless. * It is also possible for multiple instances of the driver will talk to the same amphora at the same time. Emphasis on the idempotency of the update algorithms used should help minimize the issues this can potentially cause. NETWORK DRIVER -------------- In order to keep Octavia's design more clean as a pure consumer of network services, yet still be able to develop Octavia at a time when it is impossible to provide the kind of load balancing services we need to provide without "going around" the existing Neutron API, we have decided to write a "network driver" component which does those dirty back-end configuration commands via an API we write, until these can become a standard part of Neutron. This component should be as loosely coupled with Octavia as Octavia will be with Neutron and present a standard interface to Octavia for accomplishing network configuration tasks (some of which will simply be a direct correlation with existing Neutron API commands). **Notes:** * This is a daemon or "unofficial extension", presumably living on a Neutron network node which should have "back door" access to all things Neutron and exposes an API that should only be used by Octavia. * Exactly what API will be provided by this driver will be defined as we continue to build out the reference implementation for Octavia. * Obviously, as we discover missing functionality in the Neutron API, we should work with the Neutron core devs to get these added to the API in a timely fashion: We want the Network driver to be as lightweight as possible. ================== Octavia Components ================== Everything from here down are entities that have to do with the Octavia driver and load balancing system. Other vendor drivers are unlikely to have the same components and internal structure. It is planned that Octavia will become the new reference implementation for LBaaS, though it of course doesn't need to be the only one. (In fact, a given operator should be able to use multiple vendors with potentially multiple drivers and multiple driver configurations through the Neutron Flavor framework.) OPERATOR API HANDLER -------------------- This is exactly like the USER API HANDLER in function, except that implementation details are exposed to the operator, and certain admin-level features are exposed (ex. listing a given tenant's loadbalancers, & etc.) It's also anticipated that the Operator API needs will vary enough from implementation to implementation that no single Operator API will be sufficient for the needs of all vendor implementations. (And operators will definitely have implementation-specific concerns.) Also, we anticipate that most vendors will already have an operator API or other interface which is controlled and configured outsite the purview of OpenStack in general. As such it makes sense for Octavia to have its own operator API / interface. **Notes:** * This interface is stateless. State should be managed by the controller, and stored in a highly available database. CONTROLLER ---------- This is the component providing all the command and control for the amphorae. On the front end, it takes its commands and controls from the LBaaS driver. It should be noted that in later releases of Octavia, the controller functions will be split across several components. At this stage we are less concerned with how this internal communication will happen, and are most concerned with ensuring communication with amphorae, the amphora LB driver, and the Network driver are all made as perfect as possible. Among the controller's responsibilities are: * Sending configuration and certificate information to an amphora LB driver, which in the reference implementation will be generating configuration files for haproxy and PEM-formatted user certificates and sending these to individual amphorae. Configuration files will be generated from jinja templates kept in an template directory specific to the haproxy driver. * Processing the configuration updates that need to be applied to individual amphorae, as sent by the amphora LB driver. * Interfacing with network driver to plumb additional interfaces on the amphorae as necessary. * Monitoring the health of all amphorae (via a driver interface). * Receiving and routing certain kinds of notifications originating on the amphorae (ex. "member down") * This is a stateful service, and should keep its state in a central, highly available database of some sort. * Respecting colocation / apolocation requirements of loadbalancers as set forth by users. * Receiving notifications, statistics data and other short, regular messages from amphorae and routing them to the appropriate entity. * Responding to requests from amphorae for configuration data. * Responding to requests from the user API or operator API handler driver for data about specific loadbalancers or sub-objects, their status, and statistics. * Amphora lifecycle management, including interfacing with Nova and Neutron to spin up new amphorae as necessary and handle initial configuration and network plumbing for their LB network interface, and cleaning this up when an amphora is destroyed. * Maintaining a pool of spare amphorae (ie. spawning new ones as necessary and deleting ones from the pool when we have too much inventory here.) * Gracefully spinning down "dirty old amphorae" * Loading and calling configured amphora drivers. **Notes:** * Almost all the intelligence around putting together and validating loadbalancer configurations will live here-- the Amphora API is meant to be as simple as possible so that minor feature improvements do not necessarily entail pushing out new amphorae across an entire installation. * The size of the spare amphora pool should be determined by the flavor being offered. * The controller also handles spinning up amphorae in the case of a true active/standby topology (ie. where the spares pool is effectively zero.) It should have enough intelligence to communicate to Nova that these amphorae should not be on the same physical host in this topology. * It also handles spinning up new amphorae when one fails in the above topology. * Since spinning up a new amphora is a task that can take a long time, the controller should spawn a job or child process which handles this highly asynchronous request. AMPHORA LOAD BALANCER (LB) DRIVER --------------------------------- This is the abstraction layer that the controller talks to for communicating with the amphorae. Since we want to keep Octavia flexible enough so that certain components (like the amphora) can be replaced by third party products if the operator so desires, it's important to keep many of the implementation-specific details contained within driver layers. An amphora LB driver also gives the operator the ability to have different open-source amphorae with potentially different capabilities (accessed via different flavors) which can be handy for, for example, field-testing a new amphora image. The reference implementation for the amphora LB driver will be for the amphora described below. Responsibilities of the amphora LB driver include: * Generating configuration files for haproxy and PEM-formatted user certificates and sending these to individual amphorae. Configuration files will be generated from jinja templates kept in an template directory specific to the haproxy driver. * Handling all communication to and from amphorae. LB NETWORK ---------- This is the subnet that controllers will use to communicate with amphorae. This means that controllers must have connectivity (either layer 2 or routed) to this subnet in order to function, and vice versa. Since amphorae will be communicating on it, this means the network is not part of the "undercloud." **Notes:** * As certain sensitive data (TLS private keys, for example) will be transmitted over this communication infrastructure, all messages carrying a sensitive payload should be done via encrypted and authenticated means. Further, we recommend that messages to and from amphorae be signed regardless of the sensitivity of their content. AMPHORAE ---------- This is a Nova VM which actually provides the load balancing services as configured by the user. Responsibilities of these entities include: * Actually accomplishing the load balancing services for user-configured loadbalancers using haproxy. * Sending regular heartbeats (which should include some status information). * Responding to specific requests from the controller for very basic loadbalancer or sub-object status data, including statistics. * Doing common high workload, low intelligence tasks that we don't want to burden the controller with. (ex. Shipping listener logs to a swift data store, if configured.) * Sending "edge" notifications (ie. status changes) to the controller when members go up and down, when listeners go up and down, etc. **Notes:** * Each amphora will generally need its own dedicated LB network IP address, both so that we don't accidentally bind to any IP:port the user wants to use for loadbalancing services, and so that an amphora that is not yet in use by any loadbalancer service can still communicate on the network and receive commands from its controller. Whether this IP address exists on the same subnet as the loadbalancer services it hosts is immaterial, so long as front-end and back-end interfaces can be plumbed after an amphora is launched. * Since amphorae speak to controllers in a "trusted" way, it's important to ensure that users do not have command-line access to the amphorae. In other words, the amphorae should be a black box from the users' perspective. * Amphorae will be powered using haproxy 1.5 initially. We may decide to use other software (especially for TLS termination) later on. * The "glue scripts" which communicate with the controller should be as lightweight as possible: Intelligence about how to put together an haproxy config, for example, should not live on the amphora. Rather, the amphora should perform simple syntax checks, start / restart haproxy if the checks pass, and report success/failure of the haproxy restart. * With few exceptions, most of the API commands the amphora will ever do should be safely handled synchronously (ie. nothing should take longer than a second or two to complete). * Connection logs, and other things anticipated to generate a potential large amount of data should be communicated by the amphora directly to which ever service is going to consume that data. (for example, if logs are being shunted off to swift on a nightly basis, the amphora should handle this directly and not go through the controller.) INTERNAL HEALTH MONITORS ------------------------ There are actually a few of these, all of which need to be driven by some daemon(s) which periodically check that heartbeats from monitored entities are both current and showing "good" status, if applicable. Specifically: * Controllers need to be able to monitor the availability and overall health of amphorae they control. For active amphorae, this check should happen pretty quickly: About once every 5 seconds. For spare amphorae, the check can happen much more infrequently (say, once per minute). The idea here is that internal health monitors will monitor a periodic heartbeat coming from the amphorae, and take appropriate action (assuming these are down) if they fail to check in with a heartbeat frequently enough. This means that internal health monitors need to take the form of a daemon which is constantly checking for and processing heartbeat requests (and updating controller or amphorae statuses, and triggering other events as appropriate). ====================================================== Some notes on Controller <-> Amphorae communications ====================================================== In order to keep things as scalable as possible, the thought was that short, periodic and arguably less vital messages being emitted by the amphora and associated controller would be done via HMAC-signed UDP, and more vital, more sensitive, and potentially longer transactional messages would be handled via a RESTful API on the controller, accessed via bi-directionally authenticated HTTPS. Specifically, we should expect the following to happen over UDP: * heartbeats from the amphora VM to the controller * stats data from the amphora to the controller * "edge" alert notifications (change in status) from the amphora to the controller * Notification of pending tasks in queue from controller to amphora And the following would happen over TCP: * haproxy / tls certificate configuration changes ================================================= Supported Amphora Virtual Appliance Topologies ================================================= Initially, I propose we support two topologies with version 0.5 of Octavia: Option 1: "Single active node + spares pool" -------------------------------------------- * This is similar to what HP is doing right now with Libra: Each amphora is stand-alone with a frequent health-check monitor in place and upon failure, an already-spun-up amphora is moved from the spares pool and configured to take the old one's place. This allows for acceptable recovery times on amphora failure while still remaining efficient, as far as VM resource utilization is concerned. Option 2: "True Active / Standby" --------------------------------- * This is similar to what Blue Box is doing right now where amphorae are deployed in pairs and use corosync / pacemaker to monitor each other's health and automatically take over (usually in less than 5 seconds) if the "active" node fails. This provides for the fastest possible recovery time on hardware failure, but is much less efficient, as far as VM resource utilization is concerned. * In this topology a floating IP address (different from a Neutron floating IP!) is used to determine which amphora is the "active" one at any given time. * In this topology, both amphorae need to be colocated on the same subnet. As such a "spares pool" doesn't make sense for this type of layout, unless all spares are on the same management network with the active nodes. We considered also supporting "Single node" topology, but this turns out to be the same thing as option 1 above with a spares pool size of zero. ============================ Supported Network Topologies ============================ This is actually where things get tricky, as far as amphora plumbing is concerned. And it only grows trickier when we consider that front-end connectivity (ie. to the 'loadbalancer' vip_address) and back-end connectivity (ie. to members of a loadbalancing pool) can be handled in different ways. Having said this, we can break things down into LB network, front-end and back-end topology to discuss the various possible permutations here. LB Network ---------- Each amphora needs to have a connection to a LB network. And each controller needs to have access to this management network (this could be layer-2 or routed connectivity). Command and control will happen via the amphorae's LB network IP. Front-end topologies -------------------- There are generally two ways to handle the amphorae's connection to the front-end IP address (this is the vip_address of the loadbalancer object): **Option 1: Layer-2 connectivity** The amphora can have layer-2 connectivity to the neutron network which is host to the subnet on which the loadbalancer vip_address resides. In this scenario, the amphora would need to send ARP responses to requests for the vip_address, and therefore amphorae need to have interfaces plumbed on said vip_address subnets which participate in ARP. Note that this is somewhat problematic for active / standby virtual appliance topologies because the vip_address for a given load balancer effectively becomes a highly-available IP address (a true floating VIP), which means on service failover from active to standby, the active amphora needs to relinquish all the vip_addresses it has, and the standby needs to take them over *and* start up haproxy services. This is OK if a given amphora only has a few load balancers, but can lead to several minutes' down-time during a graceful failover if there are a dozen or more load balancers on the active/standby amphora pair. It's also more risky: The standby node might not be able to start up all the haproxy services during such a failover. What's more, most types of VRRP-like services which handle floating IPs require amphorae to have an additional IP address on the subnet housing the floating vip_address in order for the standby amphora to monitor the active amphora. Also note that in this topology, amphorae need an additional virtual network interface plumbed when new front-end loadbalancer vip_addresses are assigned to them which exist on subnets to which they don't already have access. **Option 2: Routed (layer-3) connectivity** In this layout, static routes are injected into the routing infrastructure (Neutron) which essentially allow traffic destined for any given loadbalancer vip_address to be routed to an IP address which lives on the amphora. (I would recommend this be something other than the LB network IP.) In this topology, it's actually important that the loadbalancer vip_address does *not* exist in any subnet with potential front-end clients because in order for traffic to reach the loadbalancer, it must pass through the routing infrastructure (and in this case, front-end clients would attempt layer-2 connectivity to the vip_address). This topology also works much better for active/standby configurations, because both the active and standby amphorae can bind to the vip_addresses of all their assigned loadbalancer objects on a dummy, non-ARPing interface, both can be running all haproxy services at the same time, and keep the standby server processes from interfering with active loadbalancer traffic through the use of fencing scripts on the amphorae. Static routing is accomplished to a highly available floating "routing IP" (using some VRRP-like service for just this IP) which becomes the trigger for the fencing scripts on the amphora. In this scenario, fail-overs are both much more reliable, and can be accomplished in usually < 5 seconds. Further, in this topology, amphorae do not need any additional virtual interfaces plumbed when new front-end loadbalancer vip_addresses are assigned to them. Back-end topologies ------------------- There are also two ways that amphorae can potentially talk to back-end member IP addresses. Unlike the front-end topologies (where option 1 and option 2 are basically mutually exclusive, if not practically exclusive) both of these types of connectivity can be used on a single amphora, and indeed, within a single loadbalancer configuration. **Option 1: Layer-2 connectivity** This is layer-2 connectivity to back-end members, and is implied when a member object has a subnet_id assigned to it. In this case, the existence of the subnet_id implies amphorae need to have layer-2 connectivity to that subnet, which means they need to have a virtual interface plumbed to it, as well as an IP address on the subnet. This type of connectivity is useful for "secure" back-end subnets that exist behind a NATing firewall where PAT is not in use on the firewall. (In this way it effectively bypasses the firewall.) We anticipate this will be the most common form of back-end connectivity in use by most OpenStack users. **Option 2: Routed (layer-3) connectivity** This is routed connectivity to back-end members. This is implied when a member object does not have a subnet_id specified. In this topology, it is assumed that member ip_addresses are reachable through standard neutron routing, and therefore connections to them can be initiated from the amphora's default gateway. No new virtual interfaces need to be plumbed for this type of connectivity to members. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/design/version0.5/v0.5-component-design.dot0000664000175000017500000001664600000000000027456 0ustar00zuulzuul00000000000000/* * Copyright 2014 OpenStack Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ /* * Requires graphviz version 2.37 or later to render correctly */ digraph G { labelloc="t"; label="Octavia component design (v0.5)"; rankdir=LR; splines=false; subgraph cluster_Neutron { label="Neutron"; color="green"; NetworkingServices [shape=rectangle label="Networking Services"]; subgraph cluster_LBaaS { label="LBaaS"; color="yellow"; UserAPIHandler [shape=none margin=0 label= <

User API Handler
Driver
>]; }; }; subgraph cluster_Octavia { label="Octavia"; color="blue"; NetworkingDriver [shape=rectangle label="Networking Driver"]; OperatorAPIHandler [shape=none margin=0 label= <

Operator API Handler
>]; subgraph cluster_Controller { label=""; color=white; Controller [shape=none margin=0 label= <
Controller
Health Monitor
Amphora LB Driver
>]; Database [shape=cylinder]; }; subgraph cluster_Amphoras2 { label=""; color="white"; id="AmphoraMVs2"; Amphora3 [shape=none margin=0 label= <
Amphora
>]; Amphora4 [shape=none margin=0 label= <
Amphora
>]; Amphora5 [shape=none margin=0 label= <
Amphora
>]; }; subgraph cluster_Network { label="LB Network"; color="gray"; bgcolor="gray"; Stuff [style=invis shape=none margin=0 label= <
Stuff
>]; LBNetwork [shape=none margin=0 label=""]; Things [style=invis shape=none margin=0 label= <
Things

>]; }; OperatorAPIHandler -> Controller [dir=none]; Controller -> LBNetwork [style=invis]; LBNetwork -> {Amphora3 Amphora4 Amphora5} [style=invis]; Controller -> {Amphora3 Amphora4 Amphora5} [constraint=false dir=none]; Controller -> NetworkingDriver [constraint=false dir=none]; Controller -> Database [constraint=false dir=none]; }; UserAPIHandler:driver -> Controller [dir=none]; NetworkingServices -> NetworkingDriver [dir=none]; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/doc/source/contributor/devref/0000775000175000017500000000000000000000000021064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/devref/erd.rst0000664000175000017500000000102300000000000022364 0ustar00zuulzuul00000000000000 =================================== Octavia Entity Relationship Diagram =================================== Below is the current Octavia database data model. * Solid stars are primary key columns. * Hollow stars are foreign key columns. * Items labeled as "PROPERTY" are data model relationships and are not present in the database. .. only:: html Click to enlarge the diagram. .. image:: erd.svg :width: 660px :target: ../../_images/erd.svg .. only:: latex .. image:: erd.svg :width: 660px ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/devref/flows.rst0000664000175000017500000000177400000000000022761 0ustar00zuulzuul00000000000000======================== Octavia Controller Flows ======================== Octavia uses OpenStack TaskFlow to orchestrate the actions the Octavia controller needs to take while managing load balancers. This document is meant as a reference for the key flows used in the Octavia controller. .. toctree:: :maxdepth: 1 flow_diagrams/AmphoraFlows.rst flow_diagrams/HealthMonitorFlows.rst flow_diagrams/L7PolicyFlows.rst flow_diagrams/L7RuleFlows.rst flow_diagrams/ListenerFlows.rst flow_diagrams/LoadBalancerFlows.rst flow_diagrams/MemberFlows.rst flow_diagrams/PoolFlows.rst The following are flow diagrams for the **amphora V2** driver. .. toctree:: :maxdepth: 1 flow_diagrams_v2/AmphoraFlows.rst flow_diagrams_v2/HealthMonitorFlows.rst flow_diagrams_v2/L7PolicyFlows.rst flow_diagrams_v2/L7RuleFlows.rst flow_diagrams_v2/ListenerFlows.rst flow_diagrams_v2/LoadBalancerFlows.rst flow_diagrams_v2/MemberFlows.rst flow_diagrams_v2/PoolFlows.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/devref/gmr.rst0000664000175000017500000000560600000000000022412 0ustar00zuulzuul00000000000000.. Copyright (c) 2015 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Octavia contains a mechanism whereby developers and system administrators can generate a report about the state of a running Octavia executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Octavia process with support (see below). The *GMR* will then be outputted as standard error for that particular process. For example, suppose that ``octavia-api`` has process id ``8675``, and was run with ``2>/var/log/octavia/octavia-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/octavia/octavia-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information. Threads Shows stack traces and thread ids for each of the threads within this process. Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids). Configuration Lists all the configuration options currently accessible via the CONF object for the current process. Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from octavia import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation under :mod:`oslo.reports` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/doc/source/contributor/guides/0000775000175000017500000000000000000000000021071 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/guides/dev-quick-start.rst0000664000175000017500000005347700000000000024666 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 IBM Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================== Developer / Operator Quick Start Guide ====================================== This document is intended for developers and operators. For an end-user guide, please see the end-user quick-start guide and cookbook in this documentation repository. Running Octavia in devstack =========================== tl;dr ----- * 8GB RAM minimum * "vmx" or "svm" in ``/proc/cpuinfo`` * Ubuntu 18.04 or later * On that host, copy and run as root: ``octavia/devstack/contrib/new-octavia-devstack.sh`` System requirements ------------------- Octavia in devstack with a default (non-HA) configuration will deploy one amphora VM per loadbalancer deployed. The current default amphora image also requires at least 1GB of RAM to run effectively. As such it is important that your devstack environment has enough resources dedicated to it to run all its necessary components. For most devstack environments, the limiting resource will be RAM. At the present time, we recommend at least 12GB of RAM for the standard devstack defaults, or 8GB of RAM if cinder and swift are disabled. More is recommended if you also want to run a couple of application server VMs (so that Octavia has something to load balance within your devstack environment). Also, because the current implementation of Octavia delivers load balancing services using amphorae that run as Nova virtual machines, it is effectively mandatory to enable nested virtualization. The software will work with software emulated CPUs, but be unusably slow. The idea is to make sure the BIOS of the systems you're running your devstack on have virtualization features enabled (Intel VT-x, AMD-V, etc.), and the virtualization software you're using exposes these features to the guest VM (sometimes called nested virtualization). For more information, see: `Configure DevStack with KVM-based Nested Virtualization `__ The devstack environment we recommend should be running Ubuntu Linux 18.04 or later. These instructions may work for other Linux operating systems or environments. However, most people doing development on Octavia are using Ubuntu for their test environment, so you will probably have the easiest time getting your devstack working with that OS. Deployment ---------- 1. Deploy an Ubuntu 18.04 or later Linux host with at least 8GB of RAM. (This can be a VM, but again, make sure you have nested virtualization features enabled in your BIOS and virtualization software.) 2. Copy ``devstack/contrib/new-octavia-devstack.sh`` from this source repository onto that host. 3. Run new-octavia-devstack.sh as root. 4. Deploy loadbalancers, listeners, etc. Running Octavia in production ============================= Notes ----- Disclaimers ___________ This document is not a definitive guide for deploying Octavia in every production environment. There are many ways to deploy Octavia depending on the specifics and limitations of your situation. For example, in our experience, large production environments often have restrictions, hidden "features" or other elements in the network topology which mean the default Neutron networking stack (with which Octavia was designed to operate) must be modified or replaced with a custom networking solution. This may also mean that for your particular environment, you may need to write your own custom networking driver to plug into Octavia. Obviously, instructions for doing this are beyond the scope of this document. We hope this document provides the cloud operator or distribution creator with a basic understanding of how the Octavia components fit together practically. Through this, it should become more obvious how components of Octavia can be divided or duplicated across physical hardware in a production cloud environment to aid in achieving scalability and resiliency for the Octavia load balancing system. In the interest of keeping this guide somewhat high-level and avoiding obsolescence or operator/distribution-specific environment assumptions by specifying exact commands that should be run to accomplish the tasks below, we will instead just describe what needs to be done and leave it to the cloud operator or distribution creator to "do the right thing" to accomplish the task for their environment. If you need guidance on specific commands to run to accomplish the tasks described below, we recommend reading through the plugin.sh script in devstack subdirectory of this project. The devstack plugin exercises all the essential components of Octavia in the right order, and this guide will mostly be an elaboration of this process. Environment Assumptions _______________________ The scope of this guide is to provide a basic overview of setting up all the components of Octavia in a production environment, assuming that the default in-tree drivers and components (including a "standard" Neutron install) are going to be used. For the purposes of this guide, we will therefore assume the following core components have already been set up for your production OpenStack environment: * Nova * Neutron * Glance * Barbican (if TLS offloading functionality is enabled) * Keystone * Rabbit * MySQL Production Deployment Walkthrough --------------------------------- Create Octavia User ___________________ By default Octavia will use the 'octavia' user for keystone authentication, and the admin user for interactions with all other services. You must: * Create 'octavia' user. * Add the 'admin' role to this user. Load Balancer Network Configuration ___________________________________ Octavia makes use of an "LB Network" exclusively as a management network that the controller uses to talk to amphorae and vice versa. All the amphorae that Octavia deploys will have interfaces and IP addresses on this network. Therefore, it's important that the subnet deployed on this network be sufficiently large to allow for the maximum number of amphorae and controllers likely to be deployed throughout the lifespan of the cloud installation. At the present time, though IPv4 subnets are used by default for the LB Network (for example: 172.16.0.0/12), IPv6 subnets can be used for the LB Network. The LB Network is isolated from tenant networks on the amphorae by means of network namespaces on the amphorae. Therefore, operators need not be concerned about overlapping subnet ranges with tenant networks. You must also create a Neutron security group which will be applied to amphorae created on the LB network. It needs to allow amphorae to send UDP heartbeat packets to the health monitor (by default, UDP port 5555), and ingress on the amphora's API (by default, TCP port 9443). It can also be helpful to allow SSH access to the amphorae from the controller for troubleshooting purposes (ie. TCP port 22), though this is not strictly necessary in production environments. Amphorae will send periodic health checks to the controller's health manager. Any firewall protecting the interface on which the health manager listens must allow these packets from amphorae on the LB Network (by default, UDP port 5555). Finally, you need to add routing or interfaces to this network such that the Octavia controller (which will be described below) is able to communicate with hosts on this network. This also implies you should have some idea where you're going to run the Octavia controller components. You must: * Create the 'lb-mgmt-net'. * Assign the 'lb-mgmt-net' to the admin tenant. * Create a subnet and assign it to the 'lb-mgmt-net'. * Create neutron security group for amphorae created on the 'lb-mgmt-net'. which allows appropriate access to the amphorae. * Update firewall rules on the host running the octavia health manager to allow health check messages from amphorae. * Add appropriate routing to / from the 'lb-mgmt-net' such that egress is allowed, and the controller (to be created later) can talk to hosts on this network. Create Amphora Image ____________________ Octavia deploys amphorae based on a virtual machine disk image. By default we use the OpenStack diskimage-builder project for this. Scripts to accomplish this are within the diskimage-create directory of this repository. In addition to creating the disk image, configure a Nova flavor to use for amphorae, and upload the disk image to glance. You must: * Create amphora disk image using OpenStack diskimage-builder. * Create a Nova flavor for the amphorae. * Add amphora disk image to glance. * Tag the above glance disk image with 'amphora'. Install Octavia Controller Software ___________________________________ This seems somewhat obvious, but the important things to note here are that you should put this somewhere on the network where it will have access to the database (to be initialized below), the oslo messaging system, and the LB network. Octavia uses the standard python setuptools, so installation of the software itself should be straightforward. Running multiple instances of the individual Octavia controller components on separate physical hosts is recommended in order to provide scalability and availability of the controller software. The Octavia controller presently consists of several components which may be split across several physical machines. For the 4.0 release of Octavia, the important (and potentially separable) components are the controller worker, housekeeper, health manager and API controller. Please see the component diagrams elsewhere in this repository's documentation for detailed descriptions of each. Please use the following table for hints on which controller components need access to outside resources: +-------------------+----------------------------------------+ | **Component** | **Resource** | +-------------------+------------+----------+----------------+ | | LB Network | Database | OSLO messaging | +===================+============+==========+================+ | API | No | Yes | Yes | +-------------------+------------+----------+----------------+ | controller worker | Yes | Yes | Yes | +-------------------+------------+----------+----------------+ | health monitor | Yes | Yes | No | +-------------------+------------+----------+----------------+ | housekeeper | Yes | Yes | No | +-------------------+------------+----------+----------------+ In addition to talking to each other via Oslo messaging, various controller components must also communicate with other OpenStack components, like nova, neutron, barbican, etc. via their APIs. You must: * Pick appropriate host(s) to run the Octavia components. * Install the dependencies for Octavia. * Install the Octavia software. Create Octavia Keys and Certificates ____________________________________ Octavia presently allows for one method for the controller to communicate with amphorae: The amphora REST API. Both amphora API and Octavia controller do bi-directional certificate-based authentication in order to authenticate and encrypt communication. You must therefore create appropriate TLS certificates which will be used for key signing, authentication, and encryption. There is a detailed :doc:`../../admin/guides/certificates` to guide you through this process. Please note that certificates created with this guide may not meet your organization's security policies, since they are self-signed certificates with arbitrary bit lengths, expiration dates, etc. Operators should obviously follow their own security guidelines in creating these certificates. In addition to the above, it can sometimes be useful for cloud operators to log into running amphorae to troubleshoot problems. The standard method for doing this is to use SSH from the host running the controller worker. In order to do this, you must create an SSH public/private key pair specific to your cloud (for obvious security reasons). You must add this keypair to nova. You must then also update octavia.conf with the keypair name you used when adding it to nova so that amphorae are initialized with it on boot. See the Troubleshooting Tips section below for an example of how an operator can SSH into an amphora. You must: * Create TLS certificates for communicating with the amphorae. * Create SSH keys for communicating with the amphorae. * Add the SSH keypair to nova. Configuring Octavia ___________________ Going into all of the specifics of how Octavia can be configured is actually beyond the scope of this document. For full documentation of this, please see the configuration reference: :doc:`../../configuration/configref` A configuration template can be found in ``etc/octavia.conf`` in this repository. It's also important to note that this configuration file will need to be updated with UUIDs of the LB network, amphora security group, amphora image tag, SSH key path, TLS certificate path, database credentials, etc. At a minimum, the configuration should specify the following, beyond the defaults. Your specific environment may require more than this: +-----------------------+-------------------------------+ | Section | Configuration parameter | +=======================+===============================+ | DEFAULT | transport_url | +-----------------------+-------------------------------+ | database | connection | +-----------------------+-------------------------------+ | certificates | ca_certificate | +-----------------------+-------------------------------+ | certificates | ca_private_key | +-----------------------+-------------------------------+ | certificates | ca_private_key_passphrase | +-----------------------+-------------------------------+ | controller_worker | amp_boot_network_list | +-----------------------+-------------------------------+ | controller_worker | amp_flavor_id | +-----------------------+-------------------------------+ | controller_worker | amp_image_owner_id | +-----------------------+-------------------------------+ | controller_worker | amp_image_tag | +-----------------------+-------------------------------+ | controller_worker | amp_secgroup_list | +-----------------------+-------------------------------+ | controller_worker | amp_ssh_key_name [#]_ | +-----------------------+-------------------------------+ | controller_worker | amphora_driver | +-----------------------+-------------------------------+ | controller_worker | compute_driver | +-----------------------+-------------------------------+ | controller_worker | loadbalancer_topology | +-----------------------+-------------------------------+ | controller_worker | network_driver | +-----------------------+-------------------------------+ | haproxy_amphora | client_cert | +-----------------------+-------------------------------+ | haproxy_amphora | server_ca | +-----------------------+-------------------------------+ | health_manager | bind_ip | +-----------------------+-------------------------------+ | health_manager | controller_ip_port_list | +-----------------------+-------------------------------+ | health_manager | heartbeat_key | +-----------------------+-------------------------------+ | house_keeping | spare_amphora_pool_size | +-----------------------+-------------------------------+ | keystone_authtoken | admin_password | +-----------------------+-------------------------------+ | keystone_authtoken | admin_tenant_name | +-----------------------+-------------------------------+ | keystone_authtoken | admin_user | +-----------------------+-------------------------------+ | keystone_authtoken | www_authenticate_uri | +-----------------------+-------------------------------+ | keystone_authtoken | auth_version | +-----------------------+-------------------------------+ | oslo_messaging | topic | +-----------------------+-------------------------------+ | oslo_messaging_rabbit | rabbit_host | +-----------------------+-------------------------------+ | oslo_messaging_rabbit | rabbit_userid | +-----------------------+-------------------------------+ | oslo_messaging_rabbit | rabbit_password | +-----------------------+-------------------------------+ .. [#] This is technically optional, but extremely useful for troubleshooting. You must: * Create or update ``/etc/octavia/octavia.conf`` appropriately. Spares pool considerations ^^^^^^^^^^^^^^^^^^^^^^^^^^ One configuration directive deserves some extra consideration in this document: Depending on the specifics of your production environment, you may decide to run Octavia with a non-empty "spares pool." Since the time it takes to spin up a new amphora can be non-trivial in some cloud environments (and the reliability of such operations can sometimes be less than ideal), this directive instructs Octavia to attempt to maintain a certain number of amphorae running in an idle, unconfigured state. These amphora will run base amphora health checks and wait for configuration from the Octavia controller. The overall effect of this is to greatly reduce the time it takes and increase the reliability of deploying a new load balancing service on demand. This comes at the cost of having a number of deployed amphorae which consume resources but are not actively providing load balancing services, and at the cost of not being able to use Nova anti-affinity features for ACTIVE-STANDBY load balancer topologies. Initialize Octavia Database ___________________________ This is controlled through alembic migrations under the octavia/db directory in this repository. A tool has been created to aid in the initialization of the octavia database. This should be available under ``/usr/local/bin/octavia-db-manage`` on the host on which the octavia controller worker is installed. Note that this tool looks at the ``/etc/octavia/octavia.conf`` file for its database credentials, so initializing the database must happen after Octavia is configured. It's also important to note here that all of the components of the Octavia controller will need direct access to the database (including the API handler), so you must ensure these components are able to communicate with whichever host is housing your database. You must: * Create database credentials for Octavia. * Add these to the ``/etc/octavia/octavia.conf`` file. * Run ``/usr/local/bin/octavia-db-manage upgrade head`` on the controller worker host to initialize the octavia database. Launching the Octavia Controller ________________________________ We recommend using upstart / systemd scripts to ensure the components of the Octavia controller are all started and kept running. It of course doesn't hurt to first start by running these manually to ensure configuration and communication is working between all the components. You must: * Make sure each Octavia controller component is started appropriately. Install Octavia extension in Horizon _____________________________________________ This isn't strictly necessary for all cloud installations, however, if yours makes use of the Horizon GUI interface for tenants, it is probably also a good idea to make sure that it is configured with the Octavia extension. You may: * Install the octavia GUI extension in Horizon Test deployment _______________ If all of the above instructions have been followed, it should now be possible to deploy load balancing services using the OpenStack CLI, communicating with the Octavia v2 API. Example: :: # openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet # openstack loadbalancer show lb1 # openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 Upon executing the above, log files should indicate that an amphora is deployed to house the load balancer, and that this load balancer is further modified to include a listener. The amphora should be visible to the octavia or admin tenant using the ``openstack server list`` command, and the listener should respond on the load balancer's IP on port 80 (with an error 503 in this case, since no pool or members have been defined yet—but this is usually enough to see that the Octavia load balancing system is working). For more information on configuring load balancing services as a tenant, please see the end-user quick-start guide and cookbook. Troubleshooting Tips ==================== The troubleshooting hints in this section are meant primarily for developers or operators troubleshooting underlying Octavia components, rather than end-users or tenants troubleshooting the load balancing service itself. SSH into Amphorae ----------------- If you are using the reference amphora image, it may be helpful to log into running amphorae when troubleshooting service problems. To do this, first discover the ``lb_network_ip`` address of the amphora you would like to SSH into by looking in the ``amphora`` table in the octavia database. Then from the host housing the controller worker, run: :: ssh -i /etc/octavia/.ssh/octavia_ssh_key ubuntu@[lb_network_ip] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/guides/providers.rst0000664000175000017500000033146700000000000023656 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================= Provider Driver Development Guide ================================= This document is intended as a guide for developers creating provider drivers for the Octavia API. This guide is intended to be an up to date version of the `provider driver specification`_ previously approved. .. _provider driver specification: ../specs/version1.1/enable-provider-driver.html How Provider Drivers Integrate ============================== Available drivers will be enabled by entries in the Octavia configuration file. Drivers will be loaded via stevedore and Octavia will communicate with drivers through a standard class interface defined below. Most driver functions will be asynchronous to Octavia, and Octavia will provide a library of functions that give drivers a way to update status and statistics. Functions that are synchronous are noted below. Octavia API functions not listed here will continue to be handled by the Octavia API and will not call into the driver. Examples would be show, list, and quota requests. In addition, drivers may provide a provider agent that the Octavia driver-agent will launch at start up. This is a long-running process that is intended to support the provider driver. Driver Entry Points ------------------- Provider drivers will be loaded via `stevedore `_. Drivers will have an entry point defined in their setup tools configuration using the Octavia driver namespace "octavia.api.drivers". This entry point name will be used to enable the driver in the Octavia configuration file and as the "provider" parameter users specify when creating a load balancer. An example for the octavia reference driver would be: .. code-block:: python amphora = octavia.api.drivers.amphora_driver.driver:AmphoraProviderDriver In addition, provider drivers may provide a provider agent also defined by a setup tools entry point. The provider agent namespace is "octavia.driver_agent.provider_agents". This will be called once, at Octavia driver-agent start up, to launch a long-running process. Provider agents must be enabled in the Octavia configuration file. An example provider agent entry point would be: .. code-block:: python amphora_agent = octavia.api.drivers.amphora_driver.agent:AmphoraProviderAgent Stable Provider Driver Interface ================================ Provider drivers should only access the following Octavia APIs. All other Octavia APIs are not considered stable or safe for provider driver use and may change at any time. * octavia_lib.api.drivers.data_models * octavia_lib.api.drivers.driver_lib * octavia_lib.api.drivers.exceptions * octavia_lib.api.drivers.provider_base * octavia_lib.common.constants Octavia Provider Driver API =========================== Provider drivers will be expected to support the full interface described by the Octavia API, currently v2.0. If a driver does not implement an API function, drivers should fail a request by raising a ``NotImplementedError`` exception. If a driver implements a function but does not support a particular option passed in by the caller, the driver should raise an ``UnsupportedOptionError``. It is recommended that drivers use the `jsonschema `_ package or `voluptuous `_ to validate the request against the current driver capabilities. See the `Exception Model`_ below for more details. .. note:: Driver developers should refer to the official `Octavia API reference`_ document for details of the fields and expected outcome of these calls. .. _Octavia API reference: https://docs.openstack.org/api-ref/load-balancer/v2/index.html Load balancer ------------- Create ^^^^^^ Creates a load balancer. Octavia will pass in the load balancer object with all requested settings. The load balancer will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the load balancer to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The load balancer python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The provider will be removed as this is used for driver selection. 2. The flavor will be expanded from the provided ID to be the full dictionary representing the flavor metadata. **Load balancer object** As of the writing of this specification the create load balancer object may contain the following: +-------------------+--------+-----------------------------------------------+ | Name | Type | Description | +===================+========+===============================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-------------------+--------+-----------------------------------------------+ | description | string | A human-readable description for the resource.| +-------------------+--------+-----------------------------------------------+ | flavor | dict | The flavor keys and values. | +-------------------+--------+-----------------------------------------------+ | availability_zone | dict | The availability zone keys and values. | +-------------------+--------+-----------------------------------------------+ | listeners | list | A list of `Listener objects`_. | +-------------------+--------+-----------------------------------------------+ | loadbalancer_id | string | ID of load balancer to create. | +-------------------+--------+-----------------------------------------------+ | name | string | Human-readable name of the resource. | +-------------------+--------+-----------------------------------------------+ | pools | list | A list of `Pool object`_. | +-------------------+--------+-----------------------------------------------+ | project_id | string | ID of the project owning this resource. | +-------------------+--------+-----------------------------------------------+ | vip_address | string | The IP address of the Virtual IP (VIP). | +-------------------+--------+-----------------------------------------------+ | vip_network_id | string | The ID of the network for the VIP. | +-------------------+--------+-----------------------------------------------+ | vip_port_id | string | The ID of the VIP port. | +-------------------+--------+-----------------------------------------------+ | vip_qos_policy_id | string | The ID of the qos policy for the VIP. | +-------------------+--------+-----------------------------------------------+ | vip_subnet_id | string | The ID of the subnet for the VIP. | +-------------------+--------+-----------------------------------------------+ The driver is expected to validate that the driver supports the request and raise an exception if the request cannot be accepted. **VIP port creation** Some provider drivers will want to create the Neutron port for the VIP, and others will want Octavia to create the port instead. In order to support both use cases, the create_vip_port() method will ask provider drivers to create a VIP port. If the driver expects Octavia to create the port, the driver will raise a NotImplementedError exception. Octavia will call this function before calling loadbalancer_create() in order to determine if it should create the VIP port. Octavia will call create_vip_port() with a loadbalancer ID and a partially defined VIP dictionary. Provider drivers that support port creation will create the port and return a fully populated VIP dictionary. **VIP dictionary** +-----------------+--------+-----------------------------------------------+ | Name | Type | Description | +=================+========+===============================================+ | project_id | string | ID of the project owning this resource. | +-----------------+--------+-----------------------------------------------+ | vip_address | string | The IP address of the Virtual IP (VIP). | +-----------------+--------+-----------------------------------------------+ | vip_network_id | string | The ID of the network for the VIP. | +-----------------+--------+-----------------------------------------------+ | vip_port_id | string | The ID of the VIP port. | +-----------------+--------+-----------------------------------------------+ |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | +-----------------+--------+-----------------------------------------------+ | vip_subnet_id | string | The ID of the subnet for the VIP. | +-----------------+--------+-----------------------------------------------+ **Creating a Fully Populated Load Balancer** If the "listener" option is specified, the provider driver will iterate through the list and create all of the child objects in addition to creating the load balancer instance. Delete ^^^^^^ Removes an existing load balancer. Octavia will pass in the load balancer object and cascade boolean as parameters. The load balancer will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. The API includes an option for cascade delete. When cascade is set to True, the provider driver will delete all child objects of the load balancer. Failover ^^^^^^^^ Performs a failover of a load balancer. Octavia will pass in the load balancer ID as a parameter. The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the load balancer to either ``ACTIVE`` if successfully failed over, or ``ERROR`` if not failed over. Failover can mean different things in the context of a provider driver. For example, the Octavia driver replaces the current amphora(s) with another amphora. For another provider driver, failover may mean failing over from an active system to a standby system. Update ^^^^^^ Modifies an existing load balancer using the values supplied in the load balancer object. Octavia will pass in the original load balancer object which is the baseline for the update, and a load balancer object with the fields to be updated. Fields not updated by the user will contain "Unset" as defined in the data model. As of the writing of this specification the update load balancer object may contain the following: +-----------------+--------+-----------------------------------------------+ | Name | Type | Description | +=================+========+===============================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------+--------+-----------------------------------------------+ | description | string | A human-readable description for the resource.| +-----------------+--------+-----------------------------------------------+ | loadbalancer_id | string | ID of load balancer to update. | +-----------------+--------+-----------------------------------------------+ | name | string | Human-readable name of the resource. | +-----------------+--------+-----------------------------------------------+ |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | +-----------------+--------+-----------------------------------------------+ The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the load balancer to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def create_vip_port(self, loadbalancer_id, vip_dictionary): """Creates a port for a load balancer VIP. If the driver supports creating VIP ports, the driver will create a VIP port and return the vip_dictionary populated with the vip_port_id. If the driver does not support port creation, the driver will raise a NotImplementedError. :param: loadbalancer_id (string): ID of loadbalancer. :param: vip_dictionary (dict): The VIP dictionary. :returns: VIP dictionary with vip_port_id. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support creating VIP ports. """ raise NotImplementedError() def loadbalancer_create(self, loadbalancer): """Creates a new load balancer. :param loadbalancer (object): The load balancer object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support create. :raises UnsupportedOptionError: The driver does not support one of the configuration options. """ raise NotImplementedError() def loadbalancer_delete(self, loadbalancer, cascade=False): """Deletes a load balancer. :param loadbalancer (object): The load balancer object. :param cascade (bool): If True, deletes all child objects (listeners, pools, etc.) in addition to the load balancer. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def loadbalancer_failover(self, loadbalancer_id): """Performs a fail over of a load balancer. :param loadbalancer_id (string): ID of the load balancer to failover. :return: Nothing if the failover request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises: NotImplementedError if driver does not support request. """ raise NotImplementedError() def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): """Updates a load balancer. :param old_loadbalancer (object): The baseline load balancer object. :param new_loadbalancer (object): The updated load balancer object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support request. :raises UnsupportedOptionError: The driver does not support one of the configuration options. """ raise NotImplementedError() Listener -------- Create ^^^^^^ Creates a listener for a load balancer. Octavia will pass in the listener object with all requested settings. The listener will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the listener to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The listener python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. 2. The default_tls_container_ref will be expanded and provided to the driver in PEM format. 3. The sni_container_refs will be expanded and provided to the driver in PEM format. .. _Listener objects: **Listener object** As of the writing of this specification the create listener object may contain the following: +------------------------------+--------+-------------------------------------+ | Name | Type | Description | +==============================+========+=====================================+ | admin_state_up | bool | Admin state: True if up, False if | | | | down. | +------------------------------+--------+-------------------------------------+ | client_authentication | string | The TLS client authentication mode. | | | | One of the options ``NONE``, | | | | ``OPTIONAL`` or ``MANDATORY``. | +------------------------------+--------+-------------------------------------+ | client_ca_tls_container_data | string | A PEM encoded certificate. | +------------------------------+--------+-------------------------------------+ | client_ca_tls_container_ref | string | The reference to the secrets | | | | container. | +------------------------------+--------+-------------------------------------+ | client_crl_container_data | string | A PEM encoded CRL file. | +------------------------------+--------+-------------------------------------+ | client_crl_container_ref | string | The reference to the secrets | | | | container. | +------------------------------+--------+-------------------------------------+ | connection_limit | int | The max number of connections | | | | permitted for this listener. Default| | | | is -1, which is infinite | | | | connections. | +------------------------------+--------+-------------------------------------+ | default_pool | object | A `Pool object`_. | +------------------------------+--------+-------------------------------------+ | default_pool_id | string | The ID of the pool used by the | | | | listener if no L7 policies match. | +------------------------------+--------+-------------------------------------+ | default_tls_container_data | dict | A `TLS container`_ dict. | +------------------------------+--------+-------------------------------------+ | default_tls_container_refs | string | The reference to the secrets | | | | container. | +------------------------------+--------+-------------------------------------+ | description | string | A human-readable description for the| | | | listener. | +------------------------------+--------+-------------------------------------+ | insert_headers | dict | A dictionary of optional headers to | | | | insert into the request before it is| | | | sent to the backend member. See | | | | `Supported HTTP Header Insertions`_.| | | | Keys and values are specified as | | | | strings. | +------------------------------+--------+-------------------------------------+ | l7policies | list | A list of `L7policy objects`_. | +------------------------------+--------+-------------------------------------+ | listener_id | string | ID of listener to create. | +------------------------------+--------+-------------------------------------+ | loadbalancer_id | string | ID of load balancer. | +------------------------------+--------+-------------------------------------+ | name | string | Human-readable name of the listener.| +------------------------------+--------+-------------------------------------+ | project_id | string | ID of the project owning this | | | | resource. | +------------------------------+--------+-------------------------------------+ | protocol | string | Protocol type: One of HTTP, HTTPS, | | | | TCP, or TERMINATED_HTTPS. | +------------------------------+--------+-------------------------------------+ | protocol_port | int | Protocol port number. | +------------------------------+--------+-------------------------------------+ | sni_container_data | list | A list of `TLS container`_ dict. | +------------------------------+--------+-------------------------------------+ | sni_container_refs | list | A list of references to the SNI | | | | secrets containers. | +------------------------------+--------+-------------------------------------+ | timeout_client_data | int | Frontend client inactivity timeout | | | | in milliseconds. | +------------------------------+--------+-------------------------------------+ | timeout_member_connect | int | Backend member connection timeout in| | | | milliseconds. | +------------------------------+--------+-------------------------------------+ | timeout_member_data | int | Backend member inactivity timeout in| | | | milliseconds. | +------------------------------+--------+-------------------------------------+ | timeout_tcp_inspect | int | Time, in milliseconds, to wait for | | | | additional TCP packets for content | | | | inspection. | +------------------------------+--------+-------------------------------------+ | allowed_cidrs | list | List of IPv4 or IPv6 CIDRs. | +------------------------------+--------+-------------------------------------+ .. _TLS container: As of the writing of this specification the TLS container dictionary contains the following: +---------------+--------+------------------------------------------------+ | Key | Type | Description | +===============+========+================================================+ | certificate | string | The PEM encoded certificate. | +---------------+--------+------------------------------------------------+ | intermediates | List | A list of intermediate PEM certificates. | +---------------+--------+------------------------------------------------+ | passphrase | string | The private_key passphrase. | +---------------+--------+------------------------------------------------+ | primary_cn | string | The primary common name of the certificate. | +---------------+--------+------------------------------------------------+ | private_key | string | The PEM encoded private key. | +---------------+--------+------------------------------------------------+ .. _Supported HTTP Header Insertions: As of the writing of this specification the Supported HTTP Header Insertions are: +-----------------------+--------+--------------------------------------------+ | Key | Type | Description | +=======================+========+============================================+ | X-Forwarded-For | bool | When True a X-Forwarded-For header is | | | | inserted into the request to the backend | | | | member that specifies the client IP | | | | address. | +-----------------------+--------+--------------------------------------------+ | X-Forwarded-Port | int | A X-Forwarded-Port header is inserted into | | | | the request to the backend member that | | | | specifies the integer provided. Typically | | | | this is used to indicate the port the | | | | client connected to on the load balancer. | +-----------------------+--------+--------------------------------------------+ | X-Forwarded-Proto | bool | A X-Forwarded-Proto header is inserted into| | | | the end of request to the backend member. | | | | HTTP for the HTTP listener protocol type, | | | | HTTPS for the TERMINATED_HTTPS listener | | | | protocol type. | +-----------------------+--------+--------------------------------------------+ | X-SSL-Client-Verify | string | When "``true``" a ``X-SSL-Client-Verify`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains 0 if the | | | | client authentication was successful, or an| | | | result error number greater than 0 that | | | | align to the openssl veryify error codes. | +-----------------------+--------+--------------------------------------------+ | X-SSL-Client-Has-Cert | string | When "``true``" a ``X-SSL-Client-Has-Cert``| | | | header is inserted into the request to the | | | | backend ``member`` that is ''true'' if a | | | | client authentication certificate was | | | | presented, and ''false'' if not. Does not | | | | indicate validity. | +-----------------------+--------+--------------------------------------------+ | X-SSL-Client-DN | string | When "``true``" a ``X-SSL-Client-DN`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains the full | | | | Distinguished Name of the certificate | | | | presented by the client. | +-----------------------+--------+--------------------------------------------+ | X-SSL-Client-CN | string | When "``true``" a ``X-SSL-Client-CN`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains the Common| | | | Name from the full Distinguished Name of | | | | the certificate presented by the client. | +-----------------------+--------+--------------------------------------------+ | X-SSL-Issuer | string | When "``true``" a ``X-SSL-Issuer`` header | | | | is inserted into the request to the backend| | | | ``member`` that contains the full | | | | Distinguished Name of the client | | | | certificate issuer. | +-----------------------+--------+--------------------------------------------+ | X-SSL-Client-SHA1 | string | When "``true``" a ``X-SSL-Client-SHA1`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains the SHA-1 | | | | fingerprint of the certificate presented by| | | | the client in hex string format. | +-----------------------+--------+--------------------------------------------+ |X-SSL-Client-Not-Before| string | When "``true``" a | | | | ``X-SSL-Client-Not-Before`` | | | | header is inserted into the request to the | | | | backend ``member`` that contains the start | | | | date presented by the client as a formatted| | | | string YYMMDDhhmmss[Z]. | +-----------------------+--------+--------------------------------------------+ |X-SSL-Client-Not-After | string | When "``true``" a | | | | ``X-SSL-Client-Not-After`` header is | | | | inserted into the request to the | | | | backend ``member`` that contains the end | | | | date presented by the client as a formatted| | | | string YYMMDDhhmmss[Z]. | +-----------------------+--------+--------------------------------------------+ **Creating a Fully Populated Listener** If the "default_pool" or "l7policies" option is specified, the provider driver will create all of the child objects in addition to creating the listener instance. Delete ^^^^^^ Deletes an existing listener. Octavia will pass the listener object as a parameter. The listener will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. Update ^^^^^^ Modifies an existing listener using the values supplied in the listener object. Octavia will pass in the original listener object which is the baseline for the update, and a listener object with the fields to be updated. Fields not updated by the user will contain "Unset" as defined in the data model. As of the writing of this specification the update listener object may contain the following: +----------------------------+--------+-------------------------------------+ | Name | Type | Description | +============================+========+=====================================+ | admin_state_up | bool | Admin state: True if up, False if | | | | down. | +----------------------------+--------+-------------------------------------+ | client_authentication | string | The TLS client authentication mode. | | | | One of the options ``NONE``, | | | | ``OPTIONAL`` or ``MANDATORY``. | +----------------------------+--------+-------------------------------------+ |client_ca_tls_container_data| string | A PEM encoded certificate. | +----------------------------+--------+-------------------------------------+ | client_ca_tls_container_ref| string | The reference to the secrets | | | | container. | +----------------------------+--------+-------------------------------------+ | client_crl_container_data | string | A PEM encoded CRL file. | +----------------------------+--------+-------------------------------------+ | client_crl_container_ref | string | The reference to the secrets | | | | container. | +----------------------------+--------+-------------------------------------+ | connection_limit | int | The max number of connections | | | | permitted for this listener. Default| | | | is -1, which is infinite | | | | connections. | +----------------------------+--------+-------------------------------------+ | default_pool_id | string | The ID of the pool used by the | | | | listener if no L7 policies match. | +----------------------------+--------+-------------------------------------+ | default_tls_container_data | dict | A `TLS container`_ dict. | +----------------------------+--------+-------------------------------------+ | default_tls_container_refs | string | The reference to the secrets | | | | container. | +----------------------------+--------+-------------------------------------+ | description | string | A human-readable description for | | | | the listener. | +----------------------------+--------+-------------------------------------+ | insert_headers | dict | A dictionary of optional headers to | | | | insert into the request before it is| | | | sent to the backend member. See | | | | `Supported HTTP Header Insertions`_.| | | | Keys and values are specified as | | | | strings. | +----------------------------+--------+-------------------------------------+ | listener_id | string | ID of listener to update. | +----------------------------+--------+-------------------------------------+ | name | string | Human-readable name of the listener.| +----------------------------+--------+-------------------------------------+ | sni_container_data | list | A list of `TLS container`_ dict. | +----------------------------+--------+-------------------------------------+ | sni_container_refs | list | A list of references to the SNI | | | | secrets containers. | +----------------------------+--------+-------------------------------------+ | timeout_client_data | int | Frontend client inactivity timeout | | | | in milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_member_connect | int | Backend member connection timeout in| | | | milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_member_data | int | Backend member inactivity timeout in| | | | milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_tcp_inspect | int | Time, in milliseconds, to wait for | | | | additional TCP packets for content | | | | inspection. | +----------------------------+--------+-------------------------------------+ | allowed_cidrs | list | List of IPv4 or IPv6 CIDRs. | +----------------------------+--------+-------------------------------------+ The listener will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the listener to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def listener_create(self, listener): """Creates a new listener. :param listener (object): The listener object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def listener_delete(self, listener): """Deletes a listener. :param listener (object): The listener object. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def listener_update(self, old_listener, new_listener): """Updates a listener. :param old_listener (object): The baseline listener object. :param new_listener (object): The updated listener object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Pool ---- Create ^^^^^^ Creates a pool for a load balancer. Octavia will pass in the pool object with all requested settings. The pool will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the pool to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The pool python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. .. _Pool object: **Pool object** As of the writing of this specification the create pool object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | ca_tls_container_data | string | A PEM encoded certificate. | +-----------------------+--------+------------------------------------------+ | ca_tls_container_ref | string | The reference to the secrets | | | | container. | +-----------------------+--------+------------------------------------------+ | crl_container_data | string | A PEM encoded CRL file. | +-----------------------+--------+------------------------------------------+ | crl_container_ref | string | The reference to the secrets | | | | container. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | pool. | +-----------------------+--------+------------------------------------------+ | healthmonitor | object | A `Healthmonitor object`_. | +-----------------------+--------+------------------------------------------+ | lb_algorithm | string | Load balancing algorithm: One of | | | | ROUND_ROBIN, LEAST_CONNECTIONS, | | | | SOURCE_IP or SOURCE_IP_PORT. | +-----------------------+--------+------------------------------------------+ | loadbalancer_id | string | ID of load balancer. | +-----------------------+--------+------------------------------------------+ | listener_id | string | ID of listener. | +-----------------------+--------+------------------------------------------+ | members | list | A list of `Member objects`_. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the pool. | +-----------------------+--------+------------------------------------------+ | pool_id | string | ID of pool to create. | +-----------------------+--------+------------------------------------------+ | project_id | string | ID of the project owning this resource. | +-----------------------+--------+------------------------------------------+ | protocol | string | Protocol type: One of HTTP, HTTPS, | | | | PROXY, or TCP. | +-----------------------+--------+------------------------------------------+ | session_persistence | dict | Defines session persistence as one of | | | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | | | | OR | | | | {'type': 'APP_COOKIE', | | | | 'cookie_name': } | +-----------------------+--------+------------------------------------------+ | tls_container_data | dict | A `TLS container`_ dict. | +-----------------------+--------+------------------------------------------+ | tls_container_ref | string | The reference to the secrets | | | | container. | +-----------------------+--------+------------------------------------------+ | tls_enabled | bool | True when backend re-encryption is | | | | enabled. | +-----------------------+--------+------------------------------------------+ Delete ^^^^^^ Removes an existing pool and all of its members. Octavia will pass the pool object as a parameter. The pool will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. Update ^^^^^^ Modifies an existing pool using the values supplied in the pool object. Octavia will pass in the original pool object which is the baseline for the update, and a pool object with the fields to be updated. Fields not updated by the user will contain "Unset" as defined in the data model. As of the writing of this specification the update pool object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | ca_tls_container_data | string | A PEM encoded certificate. | +-----------------------+--------+------------------------------------------+ | ca_tls_container_ref | string | The reference to the secrets | | | | container. | +-----------------------+--------+------------------------------------------+ | crl_container_data | string | A PEM encoded CRL file. | +-----------------------+--------+------------------------------------------+ | crl_container_ref | string | The reference to the secrets | | | | container. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | pool. | +-----------------------+--------+------------------------------------------+ | lb_algorithm | string | Load balancing algorithm: One of | | | | ROUND_ROBIN, LEAST_CONNECTIONS, or | | | | SOURCE_IP. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the pool. | +-----------------------+--------+------------------------------------------+ | pool_id | string | ID of pool to update. | +-----------------------+--------+------------------------------------------+ | session_persistence | dict | Defines session persistence as one of | | | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | | | | OR | | | | {'type': 'APP_COOKIE', | | | | 'cookie_name': } | +-----------------------+--------+------------------------------------------+ | tls_container_data | dict | A `TLS container`_ dict. | +-----------------------+--------+------------------------------------------+ | tls_container_ref | string | The reference to the secrets | | | | container. | +-----------------------+--------+------------------------------------------+ | tls_enabled | bool | True when backend re-encryption is | | | | enabled. | +-----------------------+--------+------------------------------------------+ The pool will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the pool to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def pool_create(self, pool): """Creates a new pool. :param pool (object): The pool object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def pool_delete(self, pool): """Deletes a pool and its members. :param pool (object): The pool object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def pool_update(self, old_pool, new_pool): """Updates a pool. :param old_pool (object): The baseline pool object. :param new_pool (object): The updated pool object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Member ------ Create ^^^^^^ Creates a member for a pool. Octavia will pass in the member object with all requested settings. The member will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the member to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The member python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The member will inherit the project_id from the parent load balancer. .. _Member objects: **Member object** As of the writing of this specification the create member object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | address | string | The IP address of the backend member to | | | | receive traffic from the load balancer. | +-----------------------+--------+------------------------------------------+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | backup | bool | Is the member a backup? Backup members | | | | only receive traffic when all non-backup | | | | members are down. | +-----------------------+--------+------------------------------------------+ | member_id | string | ID of member to create. | +-----------------------+--------+------------------------------------------+ | monitor_address | string | An alternate IP address used for health | | | | monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | monitor_port | int | An alternate protocol port used for | | | | health monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the member. | +-----------------------+--------+------------------------------------------+ | pool_id | string | ID of pool. | +-----------------------+--------+------------------------------------------+ | project_id | string | ID of the project owning this resource. | +-----------------------+--------+------------------------------------------+ | protocol_port | int | The port on which the backend member | | | | listens for traffic. | +-----------------------+--------+------------------------------------------+ | subnet_id | string | Subnet ID. | +-----------------------+--------+------------------------------------------+ | weight | int | The weight of a member determines the | | | | portion of requests or connections it | | | | services compared to the other members of| | | | the pool. For example, a member with a | | | | weight of 10 receives five times as many | | | | requests as a member with a weight of 2. | | | | A value of 0 means the member does not | | | | receive new connections but continues to | | | | service existing connections. A valid | | | | value is from 0 to 256. Default is 1. | +-----------------------+--------+------------------------------------------+ Delete ^^^^^^ Removes a pool member. Octavia will pass the member object as a parameter. The member will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. Update ^^^^^^ Modifies an existing member using the values supplied in the listener object. Octavia will pass in the original member object which is the baseline for the update, and a member object with the fields to be updated. Fields not updated by the user will contain "Unset" as defined in the data model. As of the writing of this specification the update member object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | backup | bool | Is the member a backup? Backup members | | | | only receive traffic when all non-backup | | | | members are down. | +-----------------------+--------+------------------------------------------+ | member_id | string | ID of member to update. | +-----------------------+--------+------------------------------------------+ | monitor_address | string | An alternate IP address used for health | | | | monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | monitor_port | int | An alternate protocol port used for | | | | health monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the member. | +-----------------------+--------+------------------------------------------+ | weight | int | The weight of a member determines the | | | | portion of requests or connections it | | | | services compared to the other members of| | | | the pool. For example, a member with a | | | | weight of 10 receives five times as many | | | | requests as a member with a weight of 2. | | | | A value of 0 means the member does not | | | | receive new connections but continues to | | | | service existing connections. A valid | | | | value is from 0 to 256. Default is 1. | +-----------------------+--------+------------------------------------------+ The member will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the member to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. Batch Update ^^^^^^^^^^^^ Set the state of members for a pool in one API call. This may include creating new members, deleting old members, and updating existing members. Existing members are matched based on address/port combination. For example, assume a pool currently has two members. These members have the following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. Now assume a PUT request is made that includes members with address/port combinations: '192.0.2.16:80' and '192.0.2.17:80'. The member '192.0.2.15:80' will be deleted because it was not in the request. The member '192.0.2.16:80' will be updated to match the request data for that member, because it was matched. The member '192.0.2.17:80' will be created, because no such member existed. The members will be in the ``PENDING_CREATE``, ``PENDING_UPDATE``, or ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the members to either ``ACTIVE`` or ``DELETED`` if successfully updated, or ``ERROR`` if the update was not successful. The batch update method will supply a list of `Member objects`_. Existing members not in this list should be deleted, existing members in the list should be updated, and members in the list that do not already exist should be created. **Abstract class definition** .. code-block:: python class Driver(object): def member_create(self, member): """Creates a new member for a pool. :param member (object): The member object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def member_delete(self, member): """Deletes a pool member. :param member (object): The member object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def member_update(self, old_member, new_member): """Updates a pool member. :param old_member (object): The baseline member object. :param new_member (object): The updated member object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def member_batch_update(self, pool_id, members): """Creates, updates, or deletes a set of pool members. :param pool_id (string): The id of the pool to update. :param members (list): List of member objects. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Health Monitor -------------- Create ^^^^^^ Creates a health monitor on a pool. Octavia will pass in the health monitor object with all requested settings. The health monitor will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the health monitor to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The healthmonitor python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. .. _Healthmonitor object: **Healthmonitor object** +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | delay | int | The interval, in seconds, between health | | | | checks. | +-----------------------+--------+------------------------------------------+ | domain_name | string | The domain name to be passed in the host | | | | header for health monitor checks. | +-----------------------+--------+------------------------------------------+ | expected_codes | string | The expected HTTP status codes to get | | | | from a successful health check. This may | | | | be a single value, a list, or a range. | +-----------------------+--------+------------------------------------------+ | healthmonitor_id | string | ID of health monitor to create. | +-----------------------+--------+------------------------------------------+ | http_method | string | The HTTP method that the health monitor | | | | uses for requests. One of CONNECT, | | | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | | | | PUT, or TRACE. | +-----------------------+--------+------------------------------------------+ | http_version | float | The HTTP version to use for health | | | | monitor connections. One of '1.0' or | | | | '1.1'. Defaults to '1.0'. | +-----------------------+--------+------------------------------------------+ | max_retries | int | The number of successful checks before | | | | changing the operating status of the | | | | member to ONLINE. | +-----------------------+--------+------------------------------------------+ | max_retries_down | int | The number of allowed check failures | | | | before changing the operating status of | | | | the member to ERROR. A valid value is | | | | from 1 to 10. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the monitor. | +-----------------------+--------+------------------------------------------+ | pool_id | string | The pool to monitor. | +-----------------------+--------+------------------------------------------+ | project_id | string | ID of the project owning this resource. | +-----------------------+--------+------------------------------------------+ | timeout | int | The time, in seconds, after which a | | | | health check times out. This value must | | | | be less than the delay value. | +-----------------------+--------+------------------------------------------+ | type | string | The type of health monitor. One of HTTP, | | | | HTTPS, PING, TCP, TLS-HELLO or | | | | UDP-CONNECT. | +-----------------------+--------+------------------------------------------+ | url_path | string | The HTTP URL path of the request sent by | | | | the monitor to test the health of a | | | | backend member. Must be a string that | | | | begins with a forward slash (/). | +-----------------------+--------+------------------------------------------+ Delete ^^^^^^ Deletes an existing health monitor. Octavia will pass in the health monitor object as a parameter. The health monitor will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. Update ^^^^^^ Modifies an existing health monitor using the values supplied in the health monitor object. Octavia will pass in the original health monitor object which is the baseline for the update, and a health monitor object with the fields to be updated. Fields not updated by the user will contain "Unset" as defined in the data model. As of the writing of this specification the update health monitor object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | delay | int | The interval, in seconds, between health | | | | checks. | +-----------------------+--------+------------------------------------------+ | domain_name | string | The domain name to be passed in the host | | | | header for health monitor checks. | +-----------------------+--------+------------------------------------------+ | expected_codes | string | The expected HTTP status codes to get | | | | from a successful health check. This may | | | | be a single value, a list, or a range. | +-----------------------+--------+------------------------------------------+ | healthmonitor_id | string | ID of health monitor to create. | +-----------------------+--------+------------------------------------------+ | http_method | string | The HTTP method that the health monitor | | | | uses for requests. One of CONNECT, | | | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | | | | PUT, or TRACE. | +-----------------------+--------+------------------------------------------+ | http_version | float | The HTTP version to use for health | | | | monitor connections. One of '1.0' or | | | | '1.1'. Defaults to '1.0'. | +-----------------------+--------+------------------------------------------+ | max_retries | int | The number of successful checks before | | | | changing the operating status of the | | | | member to ONLINE. | +-----------------------+--------+------------------------------------------+ | max_retries_down | int | The number of allowed check failures | | | | before changing the operating status of | | | | the member to ERROR. A valid value is | | | | from 1 to 10. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the monitor. | +-----------------------+--------+------------------------------------------+ | timeout | int | The time, in seconds, after which a | | | | health check times out. This value must | | | | be less than the delay value. | +-----------------------+--------+------------------------------------------+ | url_path | string | The HTTP URL path of the request sent by | | | | the monitor to test the health of a | | | | backend member. Must be a string that | | | | begins with a forward slash (/). | +-----------------------+--------+------------------------------------------+ The health monitor will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the health monitor to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def health_monitor_create(self, healthmonitor): """Creates a new health monitor. :param healthmonitor (object): The health monitor object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def health_monitor_delete(self, healthmonitor): """Deletes a healthmonitor_id. :param healthmonitor (object): The health monitor object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def health_monitor_update(self, old_healthmonitor, new_healthmonitor): """Updates a health monitor. :param old_healthmonitor (object): The baseline health monitor object. :param new_healthmonitor (object): The updated health monitor object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() L7 Policy --------- Create ^^^^^^ Creates an L7 policy. Octavia will pass in the L7 policy object with all requested settings. The L7 policy will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the L7 policy to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The l7policy python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The l7policy will inherit the project_id from the parent load balancer. .. _L7policy objects: **L7policy object** As of the writing of this specification the create l7policy object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | action | string | The L7 policy action. One of | | | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | | | | REJECT. | +-----------------------+--------+------------------------------------------+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | L7 policy. | +-----------------------+--------+------------------------------------------+ | l7policy_id | string | The ID of the L7 policy. | +-----------------------+--------+------------------------------------------+ | listener_id | string | The ID of the listener. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the L7 policy. | +-----------------------+--------+------------------------------------------+ | position | int | The position of this policy on the | | | | listener. Positions start at 1. | +-----------------------+--------+------------------------------------------+ | project_id | string | ID of the project owning this resource. | +-----------------------+--------+------------------------------------------+ | redirect_http_code | int | The HTTP status code to be returned on | | | | a redirect policy. | +-----------------------+--------+------------------------------------------+ | redirect_pool_id | string | Requests matching this policy will be | | | | redirected to the pool with this ID. | | | | Only valid if action is REDIRECT_TO_POOL.| +-----------------------+--------+------------------------------------------+ | redirect_prefix | string | Requests matching this policy will be | | | | redirected to this Prefix URL. Only | | | | valid if ``action`` is | | | | ``REDIRECT_PREFIX``. | +-----------------------+--------+------------------------------------------+ | redirect_url | string | Requests matching this policy will be | | | | redirected to this URL. Only valid if | | | | action is REDIRECT_TO_URL. | +-----------------------+--------+------------------------------------------+ | rules | list | A list of l7rule objects. | +-----------------------+--------+------------------------------------------+ *Creating a Fully Populated L7 policy* If the "rules" option is specified, the provider driver will create all of the child objects in addition to creating the L7 policy instance. Delete ^^^^^^ Deletes an existing L7 policy. Octavia will pass in the L7 policy object as a parameter. The l7policy will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. Update ^^^^^^ Modifies an existing L7 policy using the values supplied in the l7policy object. Octavia will pass in the original L7 policy object which is the baseline for the update, and an L7 policy object with the fields to be updated. Fields not updated by the user will contain "Unset" as defined in the data model. As of the writing of this specification the update L7 policy object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | action | string | The L7 policy action. One of | | | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | | | | REJECT. | +-----------------------+--------+------------------------------------------+ +-----------------------+--------+------------------------------------------+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | L7 policy. | +-----------------------+--------+------------------------------------------+ | l7policy_id | string | The ID of the L7 policy. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the L7 policy. | +-----------------------+--------+------------------------------------------+ | position | int | The position of this policy on the | | | | listener. Positions start at 1. | +-----------------------+--------+------------------------------------------+ | redirect_http_code | int | The HTTP status code to be returned on | | | | a redirect policy. | +-----------------------+--------+------------------------------------------+ | redirect_pool_id | string | Requests matching this policy will be | | | | redirected to the pool with this ID. | | | | Only valid if action is REDIRECT_TO_POOL.| +-----------------------+--------+------------------------------------------+ | redirect_prefix | string | Requests matching this policy will be | | | | redirected to this Prefix URL. Only | | | | valid if ``action`` is | | | | ``REDIRECT_PREFIX``. | +-----------------------+--------+------------------------------------------+ | redirect_url | string | Requests matching this policy will be | | | | redirected to this URL. Only valid if | | | | action is REDIRECT_TO_URL. | +-----------------------+--------+------------------------------------------+ The L7 policy will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the L7 policy to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def l7policy_create(self, l7policy): """Creates a new L7 policy. :param l7policy (object): The l7policy object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def l7policy_delete(self, l7policy): """Deletes an L7 policy. :param l7policy (object): The l7policy object. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def l7policy_update(self, old_l7policy, new_l7policy): """Updates an L7 policy. :param old_l7policy (object): The baseline l7policy object. :param new_l7policy (object): The updated l7policy object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() L7 Rule ------- Create ^^^^^^ Creates a new L7 rule for an existing L7 policy. Octavia will pass in the L7 rule object with all requested settings. The L7 rule will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the L7 rule to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The l7rule python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. .. _L7rule objects: **L7rule object** As of the writing of this specification the create l7rule object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | compare_type | string | The comparison type for the L7 rule. One | | | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | | | | or STARTS_WITH. | +-----------------------+--------+------------------------------------------+ | invert | bool | When True the logic of the rule is | | | | inverted. For example, with invert True, | | | | equal to would become not equal to. | +-----------------------+--------+------------------------------------------+ | key | string | The key to use for the comparison. For | | | | example, the name of the cookie to | | | | evaluate. | +-----------------------+--------+------------------------------------------+ | l7policy_id | string | The ID of the L7 policy. | +-----------------------+--------+------------------------------------------+ | l7rule_id | string | The ID of the L7 rule. | +-----------------------+--------+------------------------------------------+ | project_id | string | ID of the project owning this resource. | +-----------------------+--------+------------------------------------------+ | type | string | The L7 rule type. One of COOKIE, | | | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | +-----------------------+--------+------------------------------------------+ | value | string | The value to use for the comparison. For | | | | example, the file type to compare. | +-----------------------+--------+------------------------------------------+ Delete ^^^^^^ Deletes an existing L7 rule. Octavia will pass in the L7 rule object as a parameter. The L7 rule will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. Update ^^^^^^ Modifies an existing L7 rule using the values supplied in the l7rule object. Octavia will pass in the original L7 rule object which is the baseline for the update, and an L7 rule object with the fields to be updated. Fields not updated by the user will contain "Unset" as defined in the data model. As of the writing of this specification the update L7 rule object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | compare_type | string | The comparison type for the L7 rule. One | | | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | | | | or STARTS_WITH. | +-----------------------+--------+------------------------------------------+ | invert | bool | When True the logic of the rule is | | | | inverted. For example, with invert True, | | | | equal to would become not equal to. | +-----------------------+--------+------------------------------------------+ | key | string | The key to use for the comparison. For | | | | example, the name of the cookie to | | | | evaluate. | +-----------------------+--------+------------------------------------------+ | l7rule_id | string | The ID of the L7 rule. | +-----------------------+--------+------------------------------------------+ | type | string | The L7 rule type. One of COOKIE, | | | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | +-----------------------+--------+------------------------------------------+ | value | string | The value to use for the comparison. For | | | | example, the file type to compare. | +-----------------------+--------+------------------------------------------+ The L7 rule will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the L7 rule to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def l7rule_create(self, l7rule): """Creates a new L7 rule. :param l7rule (object): The L7 rule object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def l7rule_delete(self, l7rule): """Deletes an L7 rule. :param l7rule (object): The L7 rule object. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def l7rule_update(self, old_l7rule, new_l7rule): """Updates an L7 rule. :param old_l7rule (object): The baseline L7 rule object. :param new_l7rule (object): The updated L7 rule object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Flavor ------ Octavia flavors are defined in a separate `flavor specification`_. Support for flavors will be provided through two provider driver interfaces, one to query supported flavor metadata keys and another to validate that a flavor is supported. Both functions are synchronous. .. _flavor specification: ../specs/version1.0/flavors.html get_supported_flavor_metadata ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Retrieves a dictionary of supported flavor keys and their description. For example: .. code-block:: python {"topology": "The load balancer topology for the flavor. One of: SINGLE, ACTIVE_STANDBY", "compute_flavor": "The compute driver flavor to use for the load balancer instances"} validate_flavor ^^^^^^^^^^^^^^^ Validates that the driver supports the flavor metadata dictionary. The validate_flavor method will be passed a flavor metadata dictionary that the driver will validate. This is used when an operator uploads a new flavor that applies to the driver. The validate_flavor method will either return or raise a ``UnsupportedOptionError`` exception. Following are interface definitions for flavor support: .. code-block:: python def get_supported_flavor_metadata(): """Returns a dictionary of flavor metadata keys supported by this driver. The returned dictionary will include key/value pairs, 'name' and 'description.' :returns: The flavor metadata dictionary :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support flavors. """ raise NotImplementedError() .. code-block:: python def validate_flavor(flavor_metadata): """Validates if driver can support flavor as defined in flavor_metadata. :param flavor_metadata (dict): Dictionary with flavor metadata. :return: Nothing if the flavor is valid and supported. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support flavors. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Availability Zone ----------------- Octavia availability zones have no explicit spec, but are modeled closely after the existing `flavor specification`_. Support for availability_zones will be provided through two provider driver interfaces, one to query supported availability zone metadata keys and another to validate that an availability zone is supported. Both functions are synchronous. get_supported_availability_zone_metadata ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Retrieves a dictionary of supported availability zone keys and their description. For example: .. code-block:: python {"compute_zone": "The compute availability zone to use for this loadbalancer.", "management_network": "The management network ID for the loadbalancer.", "valid_vip_networks": "List of network IDs that are allowed for VIP use. This overrides/replaces the list of allowed networks configured in `octavia.conf`."} validate_availability_zone ^^^^^^^^^^^^^^^^^^^^^^^^^^ Validates that the driver supports the availability zone metadata dictionary. The validate_availability_zone method will be passed an availability zone metadata dictionary that the driver will validate. This is used when an operator uploads a new availability zone that applies to the driver. The validate_availability_zone method will either return or raise a ``UnsupportedOptionError`` exception. Following are interface definitions for availability zone support: .. code-block:: python def get_supported_availability_zone_metadata(): """Returns a dict of supported availability zone metadata keys. The returned dictionary will include key/value pairs, 'name' and 'description.' :returns: The availability zone metadata dictionary :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support AZs. """ raise NotImplementedError() .. code-block:: python def validate_availability_zone(availability_zone_metadata): """Validates if driver can support the availability zone. :param availability_zone_metadata: Dictionary with az metadata. :type availability_zone_metadata: dict :return: Nothing if the availability zone is valid and supported. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support availability zones. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Exception Model --------------- DriverError ^^^^^^^^^^^ This is a catch all exception that drivers can return if there is an unexpected error. An example might be a delete call for a load balancer the driver does not recognize. This exception includes two strings: The user fault string and the optional operator fault string. The user fault string, "user_fault_string", will be provided to the API requester. The operator fault string, "operator_fault_string", will be logged in the Octavia API log file for the operator to use when debugging. .. code-block:: python class DriverError(Exception): user_fault_string = _("An unknown driver error occurred.") operator_fault_string = _("An unknown driver error occurred.") def __init__(self, *args, **kwargs): self.user_fault_string = kwargs.pop('user_fault_string', self.user_fault_string) self.operator_fault_string = kwargs.pop('operator_fault_string', self.operator_fault_string) super(DriverError, self).__init__(*args, **kwargs) NotImplementedError ^^^^^^^^^^^^^^^^^^^ Driver implementations may not support all operations, and are free to reject a request. If the driver does not implement an API function, the driver will raise a NotImplementedError exception. .. code-block:: python class NotImplementedError(Exception): user_fault_string = _("A feature is not implemented by this driver.") operator_fault_string = _("A feature is not implemented by this driver.") def __init__(self, *args, **kwargs): self.user_fault_string = kwargs.pop('user_fault_string', self.user_fault_string) self.operator_fault_string = kwargs.pop('operator_fault_string', self.operator_fault_string) super(NotImplementedError, self).__init__(*args, **kwargs) UnsupportedOptionError ^^^^^^^^^^^^^^^^^^^^^^ Provider drivers will validate that they can complete the request -- that all options are supported by the driver. If the request fails validation, drivers will raise an UnsupportedOptionError exception. For example, if a driver does not support a flavor passed as an option to load balancer create(), the driver will raise an UnsupportedOptionError and include a message parameter providing an explanation of the failure. .. code-block:: python class UnsupportedOptionError(Exception): user_fault_string = _("A specified option is not supported by this driver.") operator_fault_string = _("A specified option is not supported by this driver.") def __init__(self, *args, **kwargs): self.user_fault_string = kwargs.pop('user_fault_string', self.user_fault_string) self.operator_fault_string = kwargs.pop('operator_fault_string', self.operator_fault_string) super(UnsupportedOptionError, self).__init__(*args, **kwargs) Driver Support Library ====================== Provider drivers need support for updating provisioning status, operating status, and statistics. Drivers will not directly use database operations, and instead will callback to octavia-lib using a new API. .. warning:: The methods listed here are the only callable methods for drivers. All other interfaces are not considered stable or safe for drivers to access. See `Stable Provider Driver Interface`_ for a list of acceptable APIs for provider driver use. .. warning:: This library is interim and will be removed when the driver support endpoint is made available. At which point drivers will not import any code from octavia-lib. Update Provisioning and Operating Status API -------------------------------------------- The update status API defined below can be used by provider drivers to update the provisioning and/or operating status of Octavia resources (load balancer, listener, pool, member, health monitor, L7 policy, or L7 rule). For the following status API, valid values for provisioning status and operating status parameters are as defined by Octavia status codes. If an existing object is not included in the input parameter, the status remains unchanged. .. note:: If the driver-agent exceeds its configured `status_max_processes` this call may block while it waits for a status process slot to become available. The operator will be notified if the driver-agent approaches or reaches the configured limit. provisioning_status: status associated with lifecycle of the resource. See `Octavia Provisioning Status Codes `_. operating_status: the observed status of the resource. See `Octavia Operating Status Codes `_. The dictionary takes this form: .. code-block:: python { "loadbalancers": [{"id": "123", "provisioning_status": "ACTIVE", "operating_status": "ONLINE"},...], "healthmonitors": [], "l7policies": [], "l7rules": [], "listeners": [], "members": [], "pools": [] } .. code-block:: python def update_loadbalancer_status(status): """Update load balancer status. :param status (dict): dictionary defining the provisioning status and operating status for load balancer objects, including pools, members, listeners, L7 policies, and L7 rules. :raises: UpdateStatusError :returns: None """ Update Statistics API --------------------- Provider drivers can update statistics for listeners using the following API. Similar to the status function above, a single dictionary with multiple listener statistics is used to update statistics in a single call. If an existing listener is not included, the statistics that object remain unchanged. .. note:: If the driver-agent exceeds its configured `stats_max_processes` this call may block while it waits for a stats process slot to become available. The operator will be notified if the driver-agent approaches or reaches the configured limit. The general form of the input dictionary is a list of listener statistics: .. code-block:: python { "listeners": [{"id": "123", "active_connections": 12, "bytes_in": 238908, "bytes_out": 290234, "request_errors": 0, "total_connections": 3530},...] } .. code-block:: python def update_listener_statistics(statistics): """Update listener statistics. :param statistics (dict): Statistics for listeners: id (string): ID of the listener. active_connections (int): Number of currently active connections. bytes_in (int): Total bytes received. bytes_out (int): Total bytes sent. request_errors (int): Total requests not fulfilled. total_connections (int): The total connections handled. :raises: UpdateStatisticsError :returns: None """ Get Resource Support -------------------- Provider drivers may need to get information about an Octavia resource. As an example of its use, a provider driver may need to sync with Octavia, and therefore need to fetch all of the Octavia resources it is responsible for managing. Provider drivers can use the existing Octavia API to get these resources. See the `Octavia API Reference `_. API Exception Model ------------------- The driver support API will include exceptions: two API groups: * UpdateStatusError * UpdateStatisticsError * DriverAgentNotFound * DriverAgentTimeout Each exception class will include a message field that describes the error and references to the failed record if available. .. code-block:: python class UpdateStatusError(Exception): fault_string = _("The status update had an unknown error.") status_object = None status_object_id = None status_record = None def __init__(self, *args, **kwargs): self.fault_string = kwargs.pop('fault_string', self.fault_string) self.status_object = kwargs.pop('status_object', None) self.status_object_id = kwargs.pop('status_object_id', None) self.status_record = kwargs.pop('status_record', None) super(UpdateStatusError, self).__init__(self.fault_string, *args, **kwargs) class UpdateStatisticsError(Exception): fault_string = _("The statistics update had an unknown error.") stats_object = None stats_object_id = None stats_record = None def __init__(self, *args, **kwargs): self.fault_string = kwargs.pop('fault_string', self.fault_string) self.stats_object = kwargs.pop('stats_object', None) self.stats_object_id = kwargs.pop('stats_object_id', None) self.stats_record = kwargs.pop('stats_record', None) super(UpdateStatisticsError, self).__init__(self.fault_string, *args, **kwargs) class DriverAgentNotFound(Exception): fault_string = _("The driver-agent process was not found or not ready.") def __init__(self, *args, **kwargs): self.fault_string = kwargs.pop('fault_string', self.fault_string) super(DriverAgentNotFound, self).__init__(self.fault_string, *args, **kwargs) class DriverAgentTimeout(Exception): fault_string = _("The driver-agent timeout.") def __init__(self, *args, **kwargs): self.fault_string = kwargs.pop('fault_string', self.fault_string) super(DriverAgentTimeout, self).__init__(self.fault_string, *args, **kwargs) Provider Agents =============== Provider agents are long-running processes started by the Octavia driver-agent process at start up. They are intended to allow provider drivers a long running process that can handle periodic jobs for the provider driver or receive events from another provider agent. Provider agents are optional and not required for a successful Octavia provider driver. Provider Agents have access to the same `Stable Provider Driver Interface`_ as the provider driver. A provider agent must not access any other Octavia code. .. warning:: The methods listed in the `Driver Support Library`_ section are the only Octavia callable methods for provider agents. All other interfaces are not considered stable or safe for provider agents to access. See `Stable Provider Driver Interface`_ for a list of acceptable APIs for provider agents use. Declaring Your Provider Agent ----------------------------- The Octavia driver-agent will use `stevedore `_ to load enabled provider agents at start up. Provider agents are enabled in the Octavia configuration file. Provider agents that are installed, but not enabled, will not be loaded. An example configuration file entry for a provider agent is: .. code-block:: INI [driver_agent] enabled_provider_agents = amphora_agent, noop_agent The provider agent name must match the provider agent name declared in your python setup tools entry point. For example: .. code-block:: python octavia.driver_agent.provider_agents = amphora_agent = octavia.api.drivers.amphora_driver.agent:AmphoraProviderAgent noop_agent = octavia.api.drivers.noop_driver.agent:noop_provider_agent Provider Agent Method Invocation -------------------------------- On start up of the Octavia driver-agent, the method defined in the entry point will be launched in its own `multiprocessing Process `_. Your provider agent method will be passed a `multiprocessing Event `_ that will be used to signal that the provider agent should shutdown. When this event is "set", the provider agent should gracefully shutdown. If the provider agent fails to exit within the Octavia configuration file setting "provider_agent_shutdown_timeout" period, the driver-agent will forcefully shutdown the provider agent with a SIGKILL signal. Example Provider Agent Method ----------------------------- If, for example, you declared a provider agent as "my_agent": .. code-block:: python octavia.driver_agent.provider_agents = my_agent = example_inc.drivers.my_driver.agent:my_provider_agent The signature of your "my_provider_agent" method would be: .. code-block:: python def my_provider_agent(exit_event): Documenting the Driver ====================== Octavia provides two documents to let operators and users know about available drivers and their features. Available Provider Drivers -------------------------- The :doc:`../../admin/providers/index` document provides administrators with a guide to the available Octavia provider drivers. Since provider drivers are not included in the Octavia source repositories, this guide is an important tool for administrators to find your provider driver. You can submit information for your provider driver by submitting a patch to the Octavia documentation following the normal OpenStack process. See the `OpenStack Contributor Guide `_ for more information on submitting a patch to OpenStack. Octavia Provider Feature Matrix ------------------------------- The Octavia documentation includes a :doc:`../../user/feature-classification/index` that informs users on which Octavia features are supported by each provider driver. The feature matrices are built using the `Oslo sphinx-feature-classification `_ library. This allows a simple INI file format for describing the capabilities of an Octavia provider driver. Each driver should define a [driver.] section and then add a line to each feature specifying the level of support the provider driver provides for the feature. For example, the Amphora driver support for "admin_state_up" would add the following to the feature-matrix-lb.ini file. .. code-block:: INI [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [operation.admin_state_up] ... driver.amphora=complete Valid driver feature support statuses are: ``complete`` Fully implemented, expected to work at all times. ``partial`` Implemented, but with caveats about when it will work. ``missing`` Not implemented at all. You can also optionally provide additional, provider driver specific, notes for users by defining a "driver-notes.". .. code-block:: INI [operation.admin_state_up] ... driver.amphora=complete driver-notes.amphora=The Amphora driver fully supports admin_state_up. Driver notes are highly recommended when a provider driver declares a ``partial`` status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/guides/remote-debugger.rst0000664000175000017500000000555200000000000024707 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Debugging Octavia code ====================== This document describes how to setup and debug Octavia code using your favorite IDE (e.g. PyCharm, Visual Studio Code). Prerequisites ============= * Octavia installed. * IDE installed and Octavia added as project. Setup ===== Ensure your OpenStack and IDE environments have the PyDev or ptvsd library installed. If you're using PyCharm, you can find it in */path/to/pycharm/debug-eggs/pycharm-debug.egg* (Python 2) and */path/to/pycharm/debug-eggs/pycharm-debug-py3k.egg* (Python 3). Copy that file into your OpenStack host and install the library in your Python path: :: $ sudo easy_install pycharm-debug.egg If using Visual Studio Code, simply install ptvsd in both environments: :: $ pip install ptvsd Create a remote debugging configuration in your IDE. In PyCharm, go to *Run -> Edit Configurations -> Python Remote Debug*. The local host name refers to the local machine you're running your IDE from and it must be one reachable by your OpenStack environment. The port can be any available port (e.g. 5678). If the code on the OpenStack and PyCharm hosts is on different paths (likely), define a path mapping in the remote debug configuration. Invoke the debug configuration (*Run -> Debug... -> (config name)*). PyCharm will begin listening on the specified host and port. Export *DEBUGGER_TYPE*, *DEBUGGER_HOST* and *DEBUGGER_PORT* (host and port of the system running the IDE, respectively), and start the Octavia service you want to debug. It is recommended to run only one uWSGI process/controller worker. For example, to debug the Octavia Worker service: :: $ export DEBUGGER_TYPE=pydev $ export DEBUGGER_HOST=192.168.121.1 $ export DEBUGGER_PORT=5678 $ /usr/bin/octavia-worker --config-file /etc/octavia/octavia.conf Another example is debugging the Octavia API service with the ptvsd debugger: :: $ export DEBUGGER_TYPE=ptvsd $ export DEBUGGER_HOST=192.168.121.1 $ export DEBUGGER_PORT=5678 $ /usr/bin/uwsgi --ini /etc/octavia/octavia-uwsgi.ini -p 1 The service will connect to your IDE, at which point remote debugging is active. Resume the program on the debugger to continue with the initialization of the service. At this point, the service should be operational and you can start debugging. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/contributor/index.rst0000664000175000017500000000320500000000000021452 0ustar00zuulzuul00000000000000=================== Octavia Contributor =================== Contributor Guidelines ---------------------- .. toctree:: :glob: :maxdepth: 1 contributing.rst CONSTITUTION.rst HACKING.rst Contributor Reference --------------------- .. toctree:: :glob: :maxdepth: 1 guides/* devref/* Internal APIs ------------- .. toctree:: :glob: :maxdepth: 1 api/* .. Note:: The documents listed below are design documents and specifications created and approved at a previous point in time. The code base and current functionality may deviate from these original documents. Please see the Octavia documentation for the current feature details. Design Documentation -------------------- Version 0.5 (liberty) ````````````````````` .. toctree:: :glob: :maxdepth: 1 design/version0.5/* Project Specifications ---------------------- Version 0.5 (liberty) ````````````````````` .. toctree:: :glob: :maxdepth: 1 specs/version0.5/* Version 0.8 (mitaka) ```````````````````` .. toctree:: :glob: :maxdepth: 1 specs/version0.8/* Version 0.9 (newton) ```````````````````` .. toctree:: :glob: :maxdepth: 1 specs/version0.9/* Version 1.0 (pike) `````````````````` .. toctree:: :glob: :maxdepth: 1 specs/version1.0/* Version 1.1 (queens) ```````````````````` .. toctree:: :glob: :maxdepth: 1 specs/version1.1/* .. only:: latex Module Reference ---------------- .. toctree:: :hidden: modules/modules .. only:: html Indices and Search ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/index.rst0000664000175000017500000000147400000000000017106 0ustar00zuulzuul00000000000000.. octavia documentation master file ===================== Octavia Documentation ===================== Welcome to the OpenStack Octavia project documentation. Octavia brings network load balancing to OpenStack. See :doc:`reference/introduction` for an overview of Octavia. For information on what is new see the `Octavia Release Notes `_. .. only:: html To align with the overall OpenStack documentation, the Octavia documentation is grouped into the following topic areas. .. toctree:: :maxdepth: 1 admin/index.rst Octavia API Reference cli/index.rst configuration/index.rst contributor/index.rst install/index.rst reference/index.rst user/index.rst .. only:: html :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/doc/source/install/0000775000175000017500000000000000000000000016705 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/install/index.rst0000664000175000017500000000026700000000000020553 0ustar00zuulzuul00000000000000==================== Octavia Installation ==================== .. toctree:: :maxdepth: 1 Installation overview guide <../contributor/guides/dev-quick-start> install.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/install/install-amphorav2.rst0000664000175000017500000000513200000000000023003 0ustar00zuulzuul00000000000000.. _install-amphorav2: Additional configuration steps to configure amphorav2 provider ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you would like to use amphorav2 provider for load-balancer service the following additional steps are required. Prerequisites ------------- Amphorav2 provider requires creation of additional database ``octavia_persistence`` to store info about state of tasks and progress of its execution. Also to monitor progress on taskflow jobs amphorav2 provider uses jobboard. As jobboard backend could be used Redis or Zookeeper key-value storages. Operator should chose the one that is more preferable for specific cloud. The default is Redis. 1. Create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql * Create the ``octavia_persistence`` database: .. code-block:: console CREATE DATABASE octavia_persistence; * Grant proper access to the ``octavia_persistence`` database: .. code-block:: console GRANT ALL PRIVILEGES ON octavia.* TO 'octavia_persistence'@'localhost' \ IDENTIFIED BY 'OCTAVIA_DBPASS'; GRANT ALL PRIVILEGES ON octavia.* TO 'octavia_persistence'@'%' \ IDENTIFIED BY 'OCTAVIA_DBPASS'; Replace OCTAVIA_DBPASS with a suitable password. 2. Install desired key-value backend (Redis or Zookeper). Additional configuration to octavia components ---------------------------------------------- 1. Edit the ``/etc/octavia/octavia.conf`` file ``[task_flow]`` section * Configure database access for persistence backend: .. code-block:: ini [task_flow] persistence_connection = mysql+pymysql://octavia:OCTAVIA_DBPASS@controller/octavia_persistence Replace OCTAVIA_DBPASS with the password you chose for the Octavia databases. * Set desired jobboard backend and its configuration: .. code-block:: ini [task_flow] jobboard_backend_driver = 'redis_taskflow_driver' jobboard_backend_hosts = KEYVALUE_HOST_IPS jobboard_backend_port = KEYVALUE_PORT jobboard_backend_password = OCTAVIA_JOBBOARDPASS jobboard_backend_namespace = 'octavia_jobboard' Replace OCTAVIA_JOBBOARDPASS with the password you chose for the Octavia key-value storage. Replace KEYVALUE_HOST_IPS and KEYVALUE_PORT with ip and port which chosen key-value storage is using. 2. Populate the octavia database: .. code-block:: console # octavia-db-manage --config-file /etc/octavia/octavia.conf upgrade_persistence ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/install/install-ubuntu.rst0000664000175000017500000004244500000000000022436 0ustar00zuulzuul00000000000000.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Load-balancer service for Ubuntu 18.04 (LTS). Prerequisites ------------- Before you install and configure the service, you must create a database, service credentials, and API endpoints. 1. Create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql * Create the ``octavia`` database: .. code-block:: console CREATE DATABASE octavia; * Grant proper access to the ``octavia`` database: .. code-block:: console GRANT ALL PRIVILEGES ON octavia.* TO 'octavia'@'localhost' \ IDENTIFIED BY 'OCTAVIA_DBPASS'; GRANT ALL PRIVILEGES ON octavia.* TO 'octavia'@'%' \ IDENTIFIED BY 'OCTAVIA_DBPASS'; Replace OCTAVIA_DBPASS with a suitable password. * Exit the database access client. .. code-block:: console exit; 2. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc 3. To create the Octavia service credentials, complete these steps: * Create the ``octavia`` user: .. code-block:: console $ openstack user create --domain default --password-prompt octavia User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | b18ee38e06034b748141beda8fc8bfad | | name | octavia | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``octavia`` user: .. code-block:: console $ openstack role add --project service --user octavia admin .. note:: This command produces no output. .. note:: The Octavia service does not require the full admin role. Details of how to run Octavia without the admin role will come in a future version of this document. * Create the octavia service entities: .. code-block:: console $ openstack service create --name octavia --description "OpenStack Octavia" load-balancer +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Octavia | | enabled | True | | id | d854f6fff0a64f77bda8003c8dedfada | | name | octavia | | type | load-balancer | +-------------+----------------------------------+ 4. Create the Load-balancer service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ load-balancer public http://controller:9876 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 47cf883de46242c39f147c52f2958ebf | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | d854f6fff0a64f77bda8003c8dedfada | | service_name | octavia | | service_type | load-balancer | | url | http://controller:9876 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ load-balancer internal http://controller:9876 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 225aef8465ef4df48a341aaaf2b0a390 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | d854f6fff0a64f77bda8003c8dedfada | | service_name | octavia | | service_type | load-balancer | | url | http://controller:9876 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ load-balancer admin http://controller:9876 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 375eb5057fb546edbdf3ee4866179672 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | d854f6fff0a64f77bda8003c8dedfada | | service_name | octavia | | service_type | load-balancer | | url | http://controller:9876 | +--------------+----------------------------------+ 5. Create octavia-openrc file .. code-block:: console cat << EOF >> $HOME/octavia-openrc export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=service export OS_USERNAME=octavia export OS_PASSWORD=OCTAVIA_PASS export OS_AUTH_URL=http://controller:5000 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2 export OS_VOLUME_API_VERSION=3 EOF Replace OCTAVIA_PASS with the password you chose for the octavia user in the Identity service. 6. Source the ``octavia`` credentials to gain access to octavia CLI commands: .. code-block:: console $ . $HOME/octavia-openrc 7. Create the amphora image For creating amphora image, please refer to the `Building Octavia Amphora Images `_. 8. Upload the amphora image .. code-block:: console $ openstack image create --disk-format qcow2 --container-format bare \ --private --tag amphora \ --file amphora-x64-haproxy 9. Create a flavor for the amphora image .. code-block:: console $ openstack flavor create --id 200 --vcpus 1 --ram 1024 \ --disk 2 "amphora" --private Install and configure components -------------------------------- 1. Install the packages: .. code-block:: console # apt install octavia-api octavia-health-manager octavia-housekeeping \ octavia-worker python3-octavia python3-octaviaclient If octavia-common and octavia-api packages ask you to configure, choose No. 2. Create the certificates .. code-block:: console $ git clone https://opendev.org/openstack/octavia.git $ cd octavia/bin/ $ source create_dual_intermediate_CA.sh $ sudo mkdir -p /etc/octavia/certs/private $ sudo chmod 755 /etc/octavia -R $ sudo cp -p etc/octavia/certs/server_ca.cert.pem /etc/octavia/certs $ sudo cp -p etc/octavia/certs/server_ca-chain.cert.pem /etc/octavia/certs $ sudo cp -p etc/octavia/certs/server_ca.key.pem /etc/octavia/certs/private $ sudo cp -p etc/octavia/certs/client_ca.cert.pem /etc/octavia/certs $ sudo cp -p etc/octavia/certs/client.cert-and-key.pem /etc/octavia/certs/private For the production environment, Please refer to the `Octavia Certificate Configuration Guide `_. 3. Source the ``octavia`` credentials to gain access to octavia CLI commands: .. code-block:: console $ . octavia-openrc 4. Create security groups and their rules .. code-block:: console $ openstack security group create lb-mgmt-sec-grp $ openstack security group rule create --protocol icmp lb-mgmt-sec-grp $ openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp $ openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp $ openstack security group create lb-health-mgr-sec-grp $ openstack security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp 5. Create a key pair for logging in to the amphora instance .. code-block:: console $ openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey .. note:: Check whether " ~/.ssh/id_rsa.pub" file exists or not in advance. If the file does not exist, run the ssh-keygen command to create it. 6. Create dhclient.conf file for dhclient .. code-block:: console $ cd $HOME $ sudo mkdir -m755 -p /etc/dhcp/octavia $ sudo cp octavia/etc/dhcp/dhclient.conf /etc/dhcp/octavia 7. Create a network .. note:: During the execution of the below command, please save the of BRNAME and MGMT_PORT_MAC in a notepad for further reference. .. code-block:: console $ OCTAVIA_MGMT_SUBNET=172.16.0.0/12 $ OCTAVIA_MGMT_SUBNET_START=172.16.0.100 $ OCTAVIA_MGMT_SUBNET_END=172.16.31.254 $ OCTAVIA_MGMT_PORT_IP=172.16.0.2 $ openstack network create lb-mgmt-net $ openstack subnet create --subnet-range $OCTAVIA_MGMT_SUBNET --allocation-pool \ start=$OCTAVIA_MGMT_SUBNET_START,end=$OCTAVIA_MGMT_SUBNET_END \ --network lb-mgmt-net lb-mgmt-subnet $ SUBNET_ID=$(openstack subnet show lb-mgmt-subnet -f value -c id) $ PORT_FIXED_IP="--fixed-ip subnet=$SUBNET_ID,ip-address=$OCTAVIA_MGMT_PORT_IP" $ MGMT_PORT_ID=$(openstack port create --security-group \ lb-health-mgr-sec-grp --device-owner Octavia:health-mgr \ --host=$(hostname) -c id -f value --network lb-mgmt-net \ $PORT_FIXED_IP octavia-health-manager-listen-port) $ MGMT_PORT_MAC=$(openstack port show -c mac_address -f value \ $MGMT_PORT_ID) $ MGMT_PORT_IP=$(openstack port show -f yaml -c fixed_ips \ $MGMT_PORT_ID | awk '{FS=",|";gsub(",","");gsub("'\''",""); \ for(line = 1; line <= NF; ++line) {if ($line ~ /^- ip_address:/) \ {split($line, word, " ");if (ENVIRON["IPV6_ENABLED"] == "" && word[3] ~ /\./) \ print word[3];if (ENVIRON["IPV6_ENABLED"] != "" && word[3] ~ /:/) print word[3];} \ else {split($line, word, " ");for(ind in word) {if (word[ind] ~ /^ip_address=/) \ {split(word[ind], token, "=");if (ENVIRON["IPV6_ENABLED"] == "" && token[2] ~ /\./) \ print token[2];if (ENVIRON["IPV6_ENABLED"] != "" && token[2] ~ /:/) print token[2];}}}}}') $ sudo ip link add o-hm0 type veth peer name o-bhm0 $ NETID=$(openstack network show lb-mgmt-net -c id -f value) $ BRNAME=brq$(echo $NETID|cut -c 1-11) $ sudo brctl addif $BRNAME o-bhm0 $ sudo ip link set o-bhm0 up $ sudo ip link set dev o-hm0 address $MGMT_PORT_MAC $ sudo iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT $ sudo dhclient -v o-hm0 -cf /etc/dhcp/octavia 8. Below settings are required to create veth pair after the host reboot Edit the ``/etc/systemd/network/o-hm0.network`` file .. code-block:: ini [Match] Name=o-hm0 [Network] DHCP=yes Edit the ``/etc/systemd/system/octavia-interface.service`` file .. code-block:: ini [Unit] Description=Octavia Interface Creator Requires=neutron-linuxbridge-agent.service After=neutron-linuxbridge-agent.service [Service] Type=oneshot RemainAfterExit=true ExecStart=/opt/octavia-interface.sh start ExecStop=/opt/octavia-interface.sh stop [Install] WantedBy=multi-user.target Edit the ``/opt/octavia-interface.sh`` file .. code-block:: console #!/bin/bash set -ex MAC=$MGMT_PORT_MAC BRNAME=$BRNAME if [ "$1" == "start" ]; then ip link add o-hm0 type veth peer name o-bhm0 brctl addif $BRNAME o-bhm0 ip link set o-bhm0 up ip link set dev o-hm0 address $MAC ip link set o-hm0 up iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT elif [ "$1" == "stop" ]; then ip link del o-hm0 else brctl show $BRNAME ip a s dev o-hm0 fi You need to substitute $MGMT_PORT_MAC and $BRNAME for the values in your environment. 9. Edit the ``/etc/octavia/octavia.conf`` file * In the ``[database]`` section, configure database access: .. code-block:: ini [database] connection = mysql+pymysql://octavia:OCTAVIA_DBPASS@controller/octavia Replace OCTAVIA_DBPASS with the password you chose for the Octavia databases. * In the ``[DEFAULT]`` section, configure the transport url for RabbitMQ message broker. .. code-block:: ini [DEFAULT] transport_url = rabbit://openstack:RABBIT_PASS@controller Replace RABBIT_PASS with the password you chose for the openstack account in RabbitMQ. * In the ``[oslo_messaging]`` section, configure the transport url for RabbitMQ message broker and topic name. .. code-block:: ini [oslo_messaging] ... topic = octavia_prov Replace RABBIT_PASS with the password you chose for the openstack account in RabbitMQ. * In the ``[api_settings]`` section, configure the host IP and port to bind to. .. code-block:: ini [api_settings] bind_host = 0.0.0.0 bind_port = 9876 * In the ``[keystone_authtoken]`` section, configure Identity service access. .. code-block:: ini [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = octavia password = OCTAVIA_PASS Replace OCTAVIA_PASS with the password you chose for the octavia user in the Identity service. * In the ``[service_auth]`` section, configure credentials for using other openstack services .. code-block:: ini [service_auth] auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = octavia password = OCTAVIA_PASS Replace OCTAVIA_PASS with the password you chose for the octavia user in the Identity service. * In the ``[certificates]`` section, configure the absolute path to the CA Certificate, the Private Key for signing, and passphrases. .. code-block:: ini [certificates] ... server_certs_key_passphrase = insecure-key-do-not-use-this-key ca_private_key_passphrase = not-secure-passphrase ca_private_key = /etc/octavia/certs/private/server_ca.key.pem ca_certificate = /etc/octavia/certs/server_ca.cert.pem .. note:: The values of ca_private_key_passphrase and server_certs_key_passphrase are default and should not be used in production. The server_certs_key_passphrase must be a base64 compatible and 32 characters long string. * In the ``[haproxy_amphora]`` section, configure the client certificate and the CA. .. code-block:: ini [haproxy_amphora] ... server_ca = /etc/octavia/certs/server_ca-chain.cert.pem client_cert = /etc/octavia/certs/private/client.cert-and-key.pem * In the ``[health_manager]`` section, configure the IP and port number for heartbeat. .. code-block:: ini [health_manager] ... bind_port = 5555 bind_ip = 172.16.0.2 controller_ip_port_list = 172.16.0.2:5555 * In the ``[controller_worker]`` section, configure worker settings. .. code-block:: ini [controller_worker] ... amp_image_owner_id = amp_image_tag = amphora amp_ssh_key_name = mykey amp_secgroup_list = amp_boot_network_list = amp_flavor_id = 200 network_driver = allowed_address_pairs_driver compute_driver = compute_nova_driver amphora_driver = amphora_haproxy_rest_driver client_ca = /etc/octavia/certs/client_ca.cert.pem 10. Populate the octavia database: .. code-block:: console # octavia-db-manage --config-file /etc/octavia/octavia.conf upgrade head Finalize installation --------------------- Restart the services: .. code-block:: console # systemctl restart octavia-api octavia-health-manager octavia-housekeeping octavia-worker ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/install/install.rst0000664000175000017500000000103700000000000021106 0ustar00zuulzuul00000000000000.. _install: Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Load-balancer service, code-named Octavia, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Identity Service, Image Service, Placement Service, Compute Service, and Networking Service. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 2 install-ubuntu.rst install-amphorav2.rst ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/doc/source/reference/0000775000175000017500000000000000000000000017175 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/reference/glossary.rst0000664000175000017500000001766500000000000021611 0ustar00zuulzuul00000000000000================ Octavia Glossary ================ As the Octavia project evolves, it's important that people working on Octavia, users using Octavia, and operators deploying Octavia use a common set of terminology in order to avoid misunderstandings and confusion. To that end, we are providing the following glossary of terms. Note also that many of these terms are expanded upon in design documents in this same repository. What follows is a brief but necessarily incomplete description of these terms. .. glossary:: :sorted: Amphora Virtual machine, container, dedicated hardware, appliance or device that actually performs the task of load balancing in the Octavia system. More specifically, an amphora takes requests from clients on the front-end and distributes these to back-end systems. Amphorae communicate with their controllers over the LB Network through a driver interface on the controller. Amphora Load Balancer Driver Component of the controller that does all the communication with amphorae. Drivers communicate with the controller through a generic base class and associated methods, and translate these into control commands appropriate for whatever type of software is running on the back-end amphora corresponding with the driver. This communication happens over the LB network. Apolocation Term used to describe when two or more amphorae are not colocated on the same physical hardware (which is often essential in HA topologies). May also be used to describe two or more loadbalancers which are not colocated on the same amphora. Controller Daemon with access to both the LB Network and OpenStack components which coordinates and manages the overall activity of the Octavia load balancing system. Controllers will usually use an abstracted driver interface (usually a base class) for communicating with various other components in the OpenStack environment in order to facilitate loose coupling with these other components. These are the "brains" of the Octavia system. HAProxy Load balancing software used in the reference implementation for Octavia. (See http://www.haproxy.org/ ). HAProxy processes run on amphorae and actually accomplish the task of delivering the load balancing service. Health Monitor An object that defines a check method for each member of the pool. The health monitor itself is a pure-db object which describes the method the load balancing software on the amphora should use to monitor the health of back-end members of the pool with which the health monitor is associated. L7 Policy Layer 7 Policy Collection of L7 rules that get logically ANDed together as well as a routing policy for any given HTTP or terminated HTTPS client requests which match said rules. An L7 Policy is associated with exactly one HTTP or terminated HTTPS listener. For example, a user could specify an L7 policy that any client request that matches the L7 rule "request URI starts with '/api'" should get routed to the "api" pool. L7 Rule Layer 7 Rule Single logical expression used to match a condition present in a given HTTP or terminated HTTPS request. L7 rules typically match against a specific header or part of the URI and are used in conjunction with L7 policies to accomplish L7 switching. An L7 rule is associated with exactly one L7 policy. For example, a user could specify an L7 rule that matches any request URI path that begins with "/api" L7 Switching Layer 7 Switching This is a load balancing feature specific to HTTP or terminated HTTPS sessions, in which different client requests are routed to different back-end pools depending on one or more layer 7 policies the user might configure. For example, using L7 switching, a user could specify that any requests with a URI path that starts with "/api" get routed to the "api" back-end pool, and that all other requests get routed to the default pool. LB Network Load Balancer Network. The network over which the controller(s) and amphorae communicate. The LB network itself will usually be a nova or neutron network to which both the controller and amphorae have access, but is not associated with any one tenant. The LB Network is generally also *not* part of the undercloud and should not be directly exposed to any OpenStack core components other than the Octavia Controller. Listener Object representing the listening endpoint of a load balanced service. TCP / UDP port, as well as protocol information and other protocol- specific details are attributes of the listener. Notably, though, the IP address is not. Load Balancer Object describing a logical grouping of listeners on one or more VIPs and associated with one or more amphorae. (Our "Loadbalancer" most closely resembles a Virtual IP address in other load balancing implementations.) Whether the load balancer exists on more than one amphora depends on the topology used. The load balancer is also often the root object used in various Octavia APIs. Load Balancing The process of taking client requests on a front-end interface and distributing these to a number of back-end servers according to various rules. Load balancing allows for many servers to participate in delivering some kind TCP or UDP service to clients in an effectively transparent and often highly-available and scalable way (from the client's perspective). Member Object representing a single back-end server or system that is a part of a pool. A member is associated with only one pool. Octavia Octavia is an operator-grade open source load balancing solution. Also known as the Octavia system or Octavia project. The term by itself should be used to refer to the system as a whole and not any individual component within the Octavia load balancing system. Pool Object representing the grouping of members to which the listener forwards client requests. Note that a pool is associated with only one listener, but a listener might refer to several pools (and switch between them using layer 7 policies). TLS Termination Transport Layer Security Termination Type of load balancing protocol where HTTPS sessions are terminated (decrypted) on the amphora as opposed to encrypted packets being forwarded on to back-end servers without being decrypted on the amphora. Also known as SSL termination. The main advantages to this type of load balancing are that the payload can be read and / or manipulated by the amphora, and that the expensive tasks of handling the encryption are off-loaded from the back-end servers. This is particularly useful if layer 7 switching is employed in the same listener configuration. VIP Virtual IP Address Single service IP address which is associated with a load balancer. This is similar to what is described here: http://en.wikipedia.org/wiki/Virtual_IP_address In a highly available load balancing topology in Octavia, the VIP might be assigned to several amphorae, and a layer-2 protocol like CARP, VRRP, or HSRP (or something unique to the networking infrastructure) might be used to maintain its availability. In layer-3 (routed) topologies, the VIP address might be assigned to an upstream networking device which routes packets to amphorae, which then load balance requests to back-end members. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/reference/index.rst0000664000175000017500000000051300000000000021035 0ustar00zuulzuul00000000000000================= Octavia Reference ================= .. toctree:: :glob: :maxdepth: 1 * Command Line Interface Reference .. only:: html Indices and Search ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/reference/introduction.rst0000664000175000017500000001712100000000000022452 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 IBM Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================== Introducing Octavia =================== Welcome to Octavia! Octavia is an open source, operator-scale load balancing solution designed to work with OpenStack. Octavia was born out of the Neutron LBaaS project. Its conception influenced the transformation of the Neutron LBaaS project, as Neutron LBaaS moved from version 1 to version 2. Starting with the Liberty release of OpenStack, Octavia has become the reference implementation for Neutron LBaaS version 2. Octavia accomplishes its delivery of load balancing services by managing a fleet of virtual machines, containers, or bare metal servers—collectively known as *amphorae*\— which it spins up on demand. This on-demand, horizontal scaling feature differentiates Octavia from other load balancing solutions, thereby making Octavia truly suited "for the cloud." Where Octavia fits into the OpenStack ecosystem ----------------------------------------------- Load balancing is essential for enabling simple or automatic delivery scaling and availability. In turn, application delivery scaling and availability must be considered vital features of any cloud. Together, these facts imply that load balancing is a vital feature of any cloud. Therefore, we consider Octavia to be as essential as Nova, Neutron, Glance or any other "core" project that enables the essential features of a modern OpenStack cloud. In accomplishing its role, Octavia makes use of other OpenStack projects: * **Nova** - For managing amphora lifecycle and spinning up compute resources on demand. * **Neutron** - For network connectivity between amphorae, tenant environments, and external networks. * **Barbican** - For managing TLS certificates and credentials, when TLS session termination is configured on the amphorae. * **Keystone** - For authentication against the Octavia API, and for Octavia to authenticate with other OpenStack projects. * **Glance** - For storing the amphora virtual machine image. * **Oslo** - For communication between Octavia controller components, making Octavia work within the standard OpenStack framework and review system, and project code structure. * **Taskflow** - Is technically part of Oslo; however, Octavia makes extensive use of this job flow system when orchestrating back-end service configuration and management. Octavia is designed to interact with the components listed previously. In each case, we've taken care to define this interaction through a driver interface. That way, external components can be swapped out with functionally-equivalent replacements— without having to restructure major components of Octavia. For example, if you use an SDN solution other than Neutron in your environment, it should be possible for you to write an Octavia networking driver for your SDN environment, which can be a drop-in replacement for the standard Neutron networking driver in Octavia. As of Pike, it is recommended to run Octavia as a standalone load balancing solution. Neutron LBaaS is deprecated in the Queens release, and Octavia is its replacement. Whenever possible, operators are **strongly** advised to migrate to Octavia. For end-users, this transition should be relatively seamless, because Octavia supports the Neutron LBaaS v2 API and it has a similar CLI interface. Alternatively, if end-users cannot migrate on their side in the forseable future, operators could enable the experimental Octavia proxy plugin in Neutron LBaaS. It is also possible to use Octavia as a Neutron LBaaS plugin, in the same way as any other vendor. You can think of Octavia as an "open source vendor" for Neutron LBaaS. Octavia supports third-party vendor drivers just like Neutron LBaaS, and fully replaces Neutron LBaaS as the load balancing solution for OpenStack. For further information on OpenStack Neutron LBaaS deprecation, please refer to https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation. Octavia terminology ------------------- Before you proceed further in this introduction, please note: Experience shows that—within the subsegment of the IT industry that creates, deploys, and uses load balancing devices or services— terminology is often used inconsistently. To reduce confusion, the Octavia team has created a glossary of terms as they are defined and used within the context of the Octavia project and Neutron LBaaS version 2. This glossary is available here: :doc:`glossary` If you are familiar with Neutron LBaaS version 1 terms and usage, it is especially important for you to understand how the meanings of the terms "VIP," "load balancer," and "load balancing," have changed in Neutron LBaaS version 2. Our use of these terms should remain consistent with the :doc:`glossary` throughout Octavia's documentation, in discussions held by Octavia team members on public mailing lists, in IRC channels, and at conferences. To avoid misunderstandings, it's a good idea to familiarize yourself with these glossary definitions. A 10,000-foot overview of Octavia components -------------------------------------------- .. image:: octavia-component-overview.svg :width: 660px :alt: Octavia Component Overview Octavia version 4.0 consists of the following major components: * **amphorae** - Amphorae are the individual virtual machines, containers, or bare metal servers that accomplish the delivery of load balancing services to tenant application environments. In Octavia version 0.8, the reference implementation of the amphorae image is an Ubuntu virtual machine running HAProxy. * **controller** - The Controller is the "brains" of Octavia. It consists of five sub-components, which are individual daemons. They can be run on separate back-end infrastructure if desired: * **API Controller** - As the name implies, this subcomponent runs Octavia's API. It takes API requests, performs simple sanitizing on them, and ships them off to the controller worker over the Oslo messaging bus. * **Controller Worker** - This subcomponent takes sanitized API commands from the API controller and performs the actions necessary to fulfill the API request. * **Health Manager** - This subcomponent monitors individual amphorae to ensure they are up and running, and otherwise healthy. It also handles failover events if amphorae fail unexpectedly. * **Housekeeping Manager** - This subcomponent cleans up stale (deleted) database records, manages the spares pool, and manages amphora certificate rotation. * **Driver Agent** - The driver agent receives status and statistics updates from provider drivers. * **network** - Octavia cannot accomplish what it does without manipulating the network environment. Amphorae are spun up with a network interface on the "load balancer network," and they may also plug directly into tenant networks to reach back-end pool members, depending on how any given load balancing service is deployed by the tenant. For a more complete description of Octavia's components, please see the :doc:`../contributor/design/version0.5/component-design` document within this documentation repository. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/reference/octavia-component-overview.svg0000664000175000017500000004362000000000000025215 0ustar00zuulzuul00000000000000 Page-1 Parallelogram.1023 Rectangle Controller Worker Driver Controller Worker Driver Square Certificate Driver Certificate Driver Square.3 Compute Driver Compute Driver Square.4 Network Driver Network Driver Square.5 Amphora Driver Amphora Driver Ellipse.6 Neutron Neutron Ellipse.5 Nova Nova Ellipse.69 Barbican / Castellan Barbican / Castellan Ellipse.3 Oslo Messaging Oslo Messaging Dynamic connector.1006 Dynamic connector.1007 Dynamic connector.1008 Dynamic connector.1009 Dynamic connector.1010 Sheet.1011 Square.9 Octavia API Octavia API Sheet.15 Sheet.1012 Square.6 Octavia Worker Octavia Worker Sheet.16 Sheet.1013 Square.7 Health Manager Health Manager Sheet.17 Sheet.1014 Square.8 Housekeeping Manager Housekeeping Manager Sheet.18 Dynamic connector.1016 Dynamic connector.1017 Dynamic connector.1018 Can.1019 Database Sheet.1020 Database Parallelogram.1021 Parallelogram.1022 Amphora Amphora Sheet.1054 Dynamic connector.1050 Dynamic connector.1051 Sheet.1056 Amphora Driver Amphora Driver Sheet.1057 Square.7 Driver Agent Driver Agent Sheet.19 Dynamic connector.1059 Dynamic connector ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/doc/source/user/0000775000175000017500000000000000000000000016215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/doc/source/user/feature-classification/0000775000175000017500000000000000000000000022641 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/feature-matrix-healthmonitor.ini0000664000175000017500000001237300000000000031160 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [driver.ovn] title=OVN Provider link=https://docs.openstack.org/networking-ovn/latest/admin/loadbalancer.html # Note: These should be in the order they appear in a create call. [operation.admin_state_up] title=admin_state_up status=mandatory notes=Enables and disables the health monitor. cli=openstack loadbalancer healthmonitor create [--enable | --disable] driver.amphora=complete driver.ovn=missing [operation.delay] title=delay status=mandatory notes=The time, in seconds, between sending probes to members. cli=openstack loadbalancer healthmonitor create --delay driver.amphora=complete driver.ovn=missing [operation.domain_name] title=domain_name status=optional notes=The domain name, which be injected into the HTTP Host Header to the backend server for HTTP health check. cli=openstack loadbalancer healthmonitor create [--domain-name ] driver.amphora=complete driver.ovn=missing [operation.expected_codes] title=expected_codes status=optional notes=The list of HTTP status codes expected in response from the member to declare it healthy. cli=openstack loadbalancer healthmonitor create [--expected-codes ] driver.amphora=complete driver.ovn=missing [operation.http_method] title=http_method status=optional notes=The HTTP method that the health monitor uses for requests. cli=openstack loadbalancer healthmonitor create [--http-method ] driver.amphora=complete driver.ovn=missing [operation.http_version] title=http_version status=optional notes=The HTTP version to use for health checks. cli=openstack loadbalancer healthmonitor create [[--http-version ]] driver.amphora=complete driver.ovn=missing [operation.name] title=name status=optional notes=The name of the health monitor. Provided by the Octavia API service. cli=openstack loadbalancer healthmonitor create [--name ] driver.amphora=complete driver.ovn=missing [operation.max_retries] title=max_retries status=mandatory notes=The number of successful checks before changing the operating status of the member to ONLINE. cli=openstack loadbalancer healthmonitor create --max-retries driver.amphora=complete driver.ovn=missing [operation.max_retries_down] title=max_retries_down status=optional notes=The number of allowed check failures before changing the operating status of the member to ERROR. cli=openstack loadbalancer healthmonitor create [--max-retries-down ] driver.amphora=complete driver.ovn=missing [operation.tags] title=tags status=optional notes=The tags for the health monitor. Provided by the Octavia API service. cli=openstack loadbalancer healthmonitor create [--tag ] driver.amphora=complete driver.ovn=missing [operation.timeout] title=timeout status=mandatory notes=The maximum time, in seconds, that a monitor waits to connect before it times out. cli=openstack loadbalancer healthmonitor create --timeout driver.amphora=complete driver.ovn=missing [operation.type.HTTP] title=type - HTTP status=optional notes=Use HTTP for the health monitor. cli=openstack loadbalancer healthmonitor create --type HTTP driver.amphora=complete driver.ovn=missing [operation.type.HTTPS] title=type - HTTPS status=optional notes=Use HTTPS for the health monitor. cli=openstack loadbalancer healthmonitor create --type HTTPS driver.amphora=complete driver.ovn=missing [operation.type.PING] title=type - PING status=optional notes=Use PING for the health monitor. cli=openstack loadbalancer healthmonitor create --type PING driver.amphora=partial driver-notes.amphora=CentOS 7 based amphora do not support PING health monitors. driver.ovn=missing [operation.type.TCP] title=type - TCP status=optional notes=Use TCP for the health monitor. cli=openstack loadbalancer healthmonitor create --type TCP driver.amphora=complete driver.ovn=missing [operation.type.TLS-HELLO] title=type - TLS-HELLO status=optional notes=Use TLS-HELLO handshake for the health monitor. cli=openstack loadbalancer healthmonitor create --type TLS-HELLO driver.amphora=complete driver.ovn=missing [operation.type.UDP-CONNECT] title=type - UDP-CONNECT status=optional notes=Use UDP-CONNECT for the health monitor. cli=openstack loadbalancer healthmonitor create --type UDP-CONNECT driver.amphora=complete driver.ovn=missing [operation.url_path] title=url_path status=optional notes=The HTTP URL path of the request sent by the monitor to test the health of a backend member. cli=openstack loadbalancer healthmonitor create [--url-path ] driver.amphora=complete driver.ovn=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/feature-matrix-l7policy.ini0000664000175000017500000001004000000000000030032 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [driver.ovn] title=OVN Provider link=https://docs.openstack.org/networking-ovn/latest/admin/loadbalancer.html # Note: These should be in the order they appear in a create call. [operation.action.REDIRECT_TO_POOL] title=action - REDIRECT_TO_POOL status=optional notes=The L7 policy action REDIRECT_TO_POOL. cli=openstack loadbalancer l7policy create --action REDIRECT_TO_POOL driver.amphora=complete driver.ovn=missing [operation.action.REDIRECT_TO_PREFIX] title=action - REDIRECT_TO_PREFIX status=optional notes=The L7 policy action REDIRECT_TO_PREFIX. cli=openstack loadbalancer l7policy create --action REDIRECT_TO_PREFIX driver.amphora=complete driver.ovn=missing [operation.action.REDIRECT_TO_URL] title=action - REDIRECT_TO_URL status=optional notes=The L7 policy action REDIRECT_TO_URL. cli=openstack loadbalancer l7policy create --action REDIRECT_TO_URL driver.amphora=complete driver.ovn=missing [operation.action.REJECT] title=action - REJECT status=optional notes=The L7 policy action REJECT. cli=openstack loadbalancer l7policy create --action REJECT driver.amphora=complete driver.ovn=missing [operation.admin_state_up] title=admin_state_up status=mandatory notes=Enables and disables the L7 policy. cli=openstack loadbalancer l7policy create [--enable | --disable] driver.amphora=complete driver.ovn=missing [operation.description] title=description status=optional notes=The description of the L7 policy. Provided by the Octavia API service. cli=openstack loadbalancer l7policy create [--description ] driver.amphora=complete driver.ovn=missing [operation.name] title=name status=optional notes=The name of the L7 policy. Provided by the Octavia API service. cli=openstack loadbalancer l7policy create [--name ] driver.amphora=complete driver.ovn=missing [operation.position] title=position status=optional notes=The position of this policy on the listener. cli=openstack loadbalancer l7policy create [--position ] driver.amphora=complete driver.ovn=missing [operation.redirect_http_code] title=redirect_http_code status=optional notes=Requests matching this policy will be redirected to the specified URL or Prefix URL with the HTTP response code. cli=openstack loadbalancer l7policy create [--redirect-http-code ] driver.amphora=complete driver.ovn=missing [operation.redirect_pool_id] title=redirect_pool_id status=optional notes=Requests matching this policy will be redirected to the pool with this ID. cli=openstack loadbalancer l7policy create [--redirect-pool ] driver.amphora=complete driver.ovn=missing [operation.redirect_prefix] title=redirect_prefix status=optional notes=Requests matching this policy will be redirected to this Prefix URL. cli=openstack loadbalancer l7policy create [--redirect-prefix ] driver.amphora=complete driver.ovn=missing [operation.redirect_url] title=redirect_url status=optional notes=Requests matching this policy will be redirected to this URL. cli=openstack loadbalancer l7policy create [--redirect-url ] driver.amphora=complete driver.ovn=missing [operation.tags] title=tags status=optional notes=The tags for the L7 policy. Provided by the Octavia API service. cli=openstack loadbalancer l7policy create [--tag ] driver.amphora=complete driver.ovn=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/feature-matrix-l7rule.ini0000664000175000017500000001146300000000000027514 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [driver.ovn] title=OVN Provider link=https://docs.openstack.org/networking-ovn/latest/admin/loadbalancer.html # Note: These should be in the order they appear in a create call. [operation.admin_state_up] title=admin_state_up status=mandatory notes=Enables and disables the L7 rule. cli=openstack loadbalancer l7rule create [--enable | --disable] driver.amphora=complete driver.ovn=missing [operation.compare_type.CONTAINS] title=compare_type - CONTAINS status=mandatory notes=The CONTAINS comparison type for the L7 rule. cli=openstack loadbalancer l7rule create --compare-type CONTAINS driver.amphora=complete driver.ovn=missing [operation.compare_type.ENDS_WITH] title=compare_type - ENDS_WITH status=mandatory notes=The ENDS_WITH comparison type for the L7 rule. cli=openstack loadbalancer l7rule create --compare-type ENDS_WITH driver.amphora=complete driver.ovn=missing [operation.compare_type.EQUAL_TO] title=compare_type - EQUAL_TO status=mandatory notes=The EQUAL_TO comparison type for the L7 rule. cli=openstack loadbalancer l7rule create --compare-type EQUAL_TO driver.amphora=complete driver.ovn=missing [operation.compare_type.REGEX] title=compare_type - REGEX status=mandatory notes=The REGEX comparison type for the L7 rule. cli=openstack loadbalancer l7rule create --compare-type REGEX driver.amphora=complete driver.ovn=missing [operation.compare_type.STARTS_WITH] title=compare_type - STARTS_WITH status=mandatory notes=The STARTS_WITH comparison type for the L7 rule. cli=openstack loadbalancer l7rule create --compare-type STARTS_WITH driver.amphora=complete driver.ovn=missing [operation.invert] title=invert status=optional notes=When true the logic of the rule is inverted. cli=openstack loadbalancer l7rule create [--invert] driver.amphora=complete driver.ovn=missing [operation.key] title=key status=optional notes=The key to use for the comparison. cli=openstack loadbalancer l7rule create [--key ] driver.amphora=complete driver.ovn=missing [operation.tags] title=tags status=optional notes=The tags for the L7 rule. Provided by the Octavia API service. cli=openstack loadbalancer l7rule create [--tag ] driver.amphora=complete driver.ovn=missing [operation.type.COOKIE] title=type - COOKIE status=optional notes=The COOKIE L7 rule type. cli=openstack loadbalancer l7rule create --type COOKIE driver.amphora=complete driver.ovn=missing [operation.type.FILE_TYPE] title=type - FILE_TYPE status=optional notes=The FILE_TYPE L7 rule type. cli=openstack loadbalancer l7rule create --type FILE_TYPE driver.amphora=complete driver.ovn=missing [operation.type.HEADER] title=type - HEADER status=optional notes=The HEADER L7 rule type. cli=openstack loadbalancer l7rule create --type HEADER driver.amphora=complete driver.ovn=missing [operation.type.HOST_NAME] title=type - HOST_NAME status=optional notes=The HOST_NAME L7 rule type. cli=openstack loadbalancer l7rule create --type HOST_NAME driver.amphora=complete driver.ovn=missing [operation.type.PATH] title=type - PATH status=optional notes=The PATH L7 rule type. cli=openstack loadbalancer l7rule create --type PATH driver.amphora=complete driver.ovn=missing [operation.type.SSL_CONN_HAS_CERT] title=type - SSL_CONN_HAS_CERT status=optional notes=The SSL_CONN_HAS_CERT L7 rule type. cli=openstack loadbalancer l7rule create --type SSL_CONN_HAS_CERT driver.amphora=complete driver.ovn=missing [operation.type.SSL_VERIFY_RESULT] title=type - SSL_VERIFY_RESULT status=optional notes=The SSL_VERIFY_RESULT L7 rule type. cli=openstack loadbalancer l7rule create --type SSL_VERIFY_RESULT driver.amphora=complete driver.ovn=missing [operation.type.SSL_DN_FIELD] title=type - SSL_DN_FIELD status=optional notes=The SSL_DN_FIELD L7 rule type. cli=openstack loadbalancer l7rule create --type SSL_DN_FIELD driver.amphora=complete driver.ovn=missing [operation.value] title=value status=mandatory notes=The value to use for the comparison. cli=openstack loadbalancer l7rule create --value driver.amphora=complete driver.ovn=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/feature-matrix-lb.ini0000664000175000017500000000654100000000000026700 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [driver.ovn] title=OVN Provider link=https://docs.openstack.org/networking-ovn/latest/admin/loadbalancer.html # Note: These should be in the order they appear in a create call. [operation.admin_state_up] title=admin_state_up status=mandatory notes=Enables and disables the load balancer. cli=openstack loadbalancer create [--enable | --disable] driver.amphora=complete driver.ovn=complete [operation.description] title=description status=optional notes=The description of the load balancer. Provided by the Octavia API service. cli=openstack loadbalancer create [--description ] driver.amphora=complete driver.ovn=complete [operation.flavor] title=flavor status=optional notes=The flavor of the load balancer. cli=openstack loadbalancer create [--flavor ] driver.amphora=complete driver.ovn=missing [operation.name] title=name status=optional notes=The name of the load balancer. Provided by the Octavia API service. cli=openstack loadbalancer create [--name ] driver.amphora=complete driver.ovn=complete [operation.stats] title=Load Balancer statistics status=mandatory notes=The ability to show statistics for a load balancer. cli=openstack loadbalancer stats show driver.amphora=complete driver.ovn=missing [operation.status] title=Load Balancer status tree status=mandatory notes=The ability to show a status tree for the load balancer. cli=openstack loadbalancer status show driver.amphora=complete driver.ovn=complete [operation.tags] title=tags status=optional notes=The tags for the load balancer. Provided by the Octavia API service. cli=openstack loadbalancer create [--tag ] driver.amphora=complete driver.ovn=complete [operation.vip_address] title=vip_address status=optional cli=openstack loadbalancer create [--vip-address ] driver.amphora=complete driver.ovn=complete [operation.vip_network_id] title=vip_network_id status=optional cli=openstack loadbalancer create [--vip-network-id ] driver.amphora=complete driver.ovn=complete [operation.vip_port_id] title=vip_port_id status=optional cli=openstack loadbalancer create [--vip-port-id ] driver.amphora=complete driver.ovn=complete [operation.vip_qos_policy_id] title=vip_qos_policy_id status=optional cli=openstack loadbalancer create [--vip-qos-policy-id ] driver.amphora=complete driver.ovn=complete [operation.vip_subnet_id] title=vip_subnet_id status=optional cli=openstack loadbalancer create [--vip-subnet-id ] driver.amphora=complete driver.ovn=complete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/feature-matrix-listener.ini0000664000175000017500000002651700000000000030135 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [driver.ovn] title=OVN Provider link=https://docs.openstack.org/networking-ovn/latest/admin/loadbalancer.html # Note: These should be in the order they appear in a create call. [operation.admin_state_up] title=admin_state_up status=mandatory notes=Enables and disables the listener. cli=openstack loadbalancer listener create [--enable | --disable] driver.amphora=complete driver.ovn=complete [operation.allowed_cidr] title=allowed_cidr status=optional notes=CIDR to allow access to the listener (can be set multiple times). cli=openstack loadbalancer listener create [--allowed-cidr ] driver.amphora=complete driver.ovn=missing [operation.client_authentication] title=client_authentication status=optional notes=The TLS client authentication mode. cli=openstack loadbalancer listener create [--client-authentication {NONE,OPTIONAL,MANDATORY}] driver.amphora=complete driver.ovn=missing [operation.client_ca_tls_container_ref] title=client_ca_tls_container_ref status=optional notes=The ref of the key manager service secret containing a PEM format client CA certificate bundle for TERMINATED_TLS listeners. cli=openstack loadbalancer listener create [--client-ca-tls-container-ref ] driver.amphora=complete driver.ovn=missing [operation.client_crl_container_ref] title=client_crl_container_ref status=optional notes=The URI of the key manager service secret containing a PEM format CA revocation list file for TERMINATED_TLS listeners. cli=openstack loadbalancer listener create [--client-crl-container-ref ] driver.amphora=complete driver.ovn=missing [operation.connection_limit] title=connection_limit status=optional notes=The maximum number of connections permitted for this listener. cli=openstack loadbalancer listener create [--connection-limit ] driver.amphora=complete driver.ovn=missing [operation.default_tls_container_ref] title=default_tls_container_ref status=optional notes=The URI of the key manager service secret containing a PKCS12 format certificate/key bundle for TERMINATED_TLS listeners. cli=openstack loadbalancer listener create [--default-tls-container-ref ] driver.amphora=complete driver.ovn=missing [operation.description] title=description status=optional notes=The description of the listener. Provided by the Octavia API service. cli=openstack loadbalancer listener create [--description ] driver.amphora=complete driver.ovn=complete [operation.insert_headers.X-Forwarded-For] title=insert_headers - X-Forwarded-For status=optional notes=When “true” a X-Forwarded-For header is inserted into the request to the backend member that specifies the client IP address. cli=openstack loadbalancer listener create [--insert-headers X-Forwarded-For=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-Forwarded-Port] title=insert_headers - X-Forwarded-Port status=optional notes=When “true” a X-Forwarded-Port header is inserted into the request to the backend member that specifies the listener port. cli=openstack loadbalancer listener create [--insert-headers X-Forwarded-Port=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-Forwarded-Proto] title=insert_headers - X-Forwarded-Proto status=optional notes=When “true” a X-Forwarded-Proto header is inserted into the request to the backend member. cli=openstack loadbalancer listener create [--insert-headers X-Forwarded-Proto=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Client-Verify] title=insert_headers - X-SSL-Client-Verify status=optional notes=When “true” a X-SSL-Client-Verify header is inserted into the request to the backend member that contains 0 if the client authentication was successful, or an result error number greater than 0 that align to the openssl veryify error codes. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Verify=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Client-Has-Cert] title=insert_headers - X-SSL-Client-Has-Cert status=optional notes=When “true” a X-SSL-Client-Has-Cert header is inserted into the request to the backend member that is ‘’true’’ if a client authentication certificate was presented, and ‘’false’’ if not. Does not indicate validity. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Has-Cert=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Client-DN] title=insert_headers - X-SSL-Client-DN status=optional notes=When “true” a X-SSL-Client-DN header is inserted into the request to the backend member that contains the full Distinguished Name of the certificate presented by the client. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-DN=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Client-CN] title=insert_headers - X-SSL-Client-CN status=optional notes=When “true” a X-SSL-Client-CN header is inserted into the request to the backend member that contains the Common Name from the full Distinguished Name of the certificate presented by the client. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-CN=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Issuer] title=insert_headers - X-SSL-Issuer status=optional notes=When “true” a X-SSL-Issuer header is inserted into the request to the backend member that contains the full Distinguished Name of the client certificate issuer. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Issuer=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Client-SHA1] title=insert_headers - X-SSL-Client-SHA1 status=optional notes=When “true” a X-SSL-Client-SHA1 header is inserted into the request to the backend member that contains the SHA-1 fingerprint of the certificate presented by the client in hex string format. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-SHA1=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Client-Not-Before] title=insert_headers - X-SSL-Client-Not-Before status=optional notes=When “true” a X-SSL-Client-Not-Before header is inserted into the request to the backend member that contains the start date presented by the client as a formatted string YYMMDDhhmmss[Z]. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Not-Before=true] driver.amphora=complete driver.ovn=missing [operation.insert_headers.X-SSL-Client-Not-After] title=insert_headers - X-SSL-Client-Not-After status=optional notes=When “true” a X-SSL-Client-Not-After header is inserted into the request to the backend member that contains the end date presented by the client as a formatted string YYMMDDhhmmss[Z]. cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Not-Aftr=true] driver.amphora=complete driver.ovn=missing [operation.name] title=name status=optional notes=The name of the load balancer listener. Provided by the Octavia API service. cli=openstack loadbalancer listener create [--name ] driver.amphora=complete driver.ovn=complete [operation.protocol.HTTP] title=protocol - HTTP status=optional notes=HTTP protocol support for the listener. cli=openstack loadbalancer listener create --protocol HTTP driver.amphora=complete driver.ovn=missing [operation.protocol.HTTPS] title=protocol - HTTPS status=optional notes=HTTPS protocol support for the listener. cli=openstack loadbalancer listener create --protocol HTTPS driver.amphora=complete driver.ovn=missing [operation.protocol.TCP] title=protocol - TCP status=optional notes=TCP protocol support for the listener. cli=openstack loadbalancer listener create --protocol TCP driver.amphora=complete driver.ovn=complete [operation.protocol.TERMINATED_HTTPS] title=protocol - TERMINATED_HTTPS status=optional notes=Terminated HTTPS protocol support for the listener. cli=openstack loadbalancer listener create --protocol TERMINATED_HTTPS driver.amphora=complete driver.ovn=missing [operation.protocol.UDP] title=protocol - UDP status=optional notes=UDP protocol support for the listener. cli=openstack loadbalancer listener create --protocol UDP driver.amphora=complete driver.ovn=complete [operation.protocol_port] title=protocol_port status=mandatory notes=The protocol port number for the listener. cli=openstack loadbalancer listener create --protocol-port driver.amphora=complete driver.ovn=complete [operation.sni_container_refs] title=sni_container_refs status=optional notes=A list of URIs to the key manager service secrets containing PKCS12 format certificate/key bundles for TERMINATED_TLS listeners. cli=openstack loadbalancer listener create [--sni-container-refs [ [ ...]]] driver.amphora=complete driver.ovn=missing [operation.stats] title=Listener statistics status=mandatory notes=The ability to show statistics for a listener. cli=openstack loadbalancer listener stats show driver.amphora=complete driver.ovn=missing [operation.tags] title=tags status=optional notes=The tags for the load balancer listener. Provided by the Octavia API service. cli=openstack loadbalancer listener create [--tags ] driver.amphora=complete driver.ovn=complete [operation.timeout_client_data] title=timeout_client_data status=optional notes=Frontend client inactivity timeout in milliseconds. cli=openstack loadbalancer listener create [--timeout-client-data ] driver.amphora=complete driver.ovn=missing [operation.timeout_member_connect] title=timeout_member_connect status=optional notes=Backend member connection timeout in milliseconds. cli=openstack loadbalancer listener create [--timeout-member-connect ] driver.amphora=complete driver.ovn=missing [operation.timeout-member-data] title=timeout-member-data status=optional notes=Backend member inactivity timeout in milliseconds. cli=openstack loadbalancer listener create [--timeout-member-data ] driver.amphora=complete driver.ovn=missing [operation.timeout-tcp-inspect] title=timeout-tcp-inspect status=optional notes=Time, in milliseconds, to wait for additional TCP packets for content inspection. cli=openstack loadbalancer listener create [--timeout-tcp-inspect ] driver.amphora=complete driver.ovn=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/feature-matrix-member.ini0000664000175000017500000000665400000000000027557 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [driver.ovn] title=OVN Provider link=https://docs.openstack.org/networking-ovn/latest/admin/loadbalancer.html # Note: These should be in the order they appear in a create call. [operation.admin_state_up] title=admin_state_up status=mandatory notes=Enables and disables the member. cli=openstack loadbalancer member create [--enable | --disable] driver.amphora=complete driver.ovn=complete [operation.address] title=address status=mandatory notes=The IP address for the member. cli=openstack loadbalancer member create --address driver.amphora=complete driver.ovn=complete [operation.backup] title=backup status=optional notes=True if the member is a backup member server. cli=openstack loadbalancer member create [--enable-backup] driver.amphora=complete driver.ovn=missing [operation.batch] title=Batch update members status=mandatory notes=Ability to update the members of a pool in one API call. driver.amphora=complete driver.ovn=partial driver-notes.ovn=The OVN provider does not support all of the member features. [operation.monitor_address] title=monitor_address status=optional notes=An alternate IP address used for health monitoring a backend member. cli=openstack loadbalancer member create [--monitor-address ] driver.amphora=complete driver.ovn=missing [operation.monitor_port] title=monitor_port status=optional notes=An alternate protocol port used for health monitoring a backend member. cli=openstack loadbalancer member create [--monitor-port ] driver.amphora=complete driver.ovn=missing [operation.name] title=name status=optional notes=The name for the member. Provided by the Octavia API service. cli=openstack loadbalancer member create [--name ] driver.amphora=complete driver.ovn=complete [operation.protocol_port] title=protocol_port status=mandatory notes=The protocol port number to connect with on the member server. cli=openstack loadbalancer member create --protocol_port driver.amphora=complete driver.ovn=complete [operation.subnet_id] title=subnet_id status=optional notes=The subnet ID the member service is accessible from. cli=openstack loadbalancer member create [--subnet-id ] driver.amphora=complete driver.ovn=complete [operation.tags] title=tags status=optional notes=The tags for the member. Provided by the Octavia API service. cli=openstack loadbalancer member create [--tag ] driver.amphora=complete driver.ovn=complete [operation.weight] title=weight status=optional notes=The weight of a member determines the portion of requests or connections it services compared to the other members of the pool. cli=openstack loadbalancer member create [--weight ] driver.amphora=complete driver.ovn=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/feature-matrix-pool.ini0000664000175000017500000001653300000000000027256 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.amphora] title=Amphora Provider link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html [driver.ovn] title=OVN Provider link=https://docs.openstack.org/networking-ovn/latest/admin/loadbalancer.html # Note: These should be in the order they appear in a create call. [operation.admin_state_up] title=admin_state_up status=mandatory notes=Enables and disables the pool. cli=openstack loadbalancer pool create [--enable | --disable] --listener driver.amphora=complete driver.ovn=complete [operation.ca_tls_container_ref] title=ca_tls_container_ref status=optional notes=The reference of the key manager service secret containing a PEM format CA certificate bundle for tls_enabled pools. cli=openstack loadbalancer pool create [--ca-tls-container-ref ] --listener driver.amphora=complete driver.ovn=missing [operation.crl_container_ref] title=crl_container_ref status=optional notes=The reference of the key manager service secret containing a PEM format CA revocation list file for tls_enabled pools. cli=openstack loadbalancer pool create [--crl-container-ref ] --listener driver.amphora=complete driver.ovn=missing [operation.lb_algorithm.LEAST_CONNECTIONS] title=lb_algorithm - LEAST_CONNECTIONS notes=The pool will direct connections to the member server with the least connections in use. cli=openstack loadbalancer pool create --lb-algorithm LEAST_CONNECTIONS --listener driver.amphora=complete driver.ovn=missing [operation.lb_algorithm.ROUND_ROBIN] title=lb_algorithm - ROUND_ROBIN notes=The pool will direct connections to the next member server, one after the other, rotating through the available memeber servers. cli=openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener driver.amphora=complete driver.ovn=missing [operation.lb_algorithm.SOURCE_IP] title=lb_algorithm - SOURCE_IP notes=The pool will direct connections to the member server based on a hash of the source IP. cli=openstack loadbalancer pool create --lb-algorithm SOURCE_IP --listener driver.amphora=complete driver.ovn=missing [operation.lb_algorithm.SOURCE_IP_PORT] title=lb_algorithm - SOURCE_IP_PORT notes=The pool will direct connections to the member server based on a hash of the source IP and Port. cli=openstack loadbalancer pool create --lb-algorithm SOURCE_IP_PORT --listener driver.amphora=missing driver.ovn=complete [operation.description] title=description status=optional notes=The description of the pool. Provided by the Octavia API service. cli=openstack loadbalancer pool create [--description ] --listener driver.amphora=complete driver.ovn=complete [operation.name] title=name status=optional notes=The name of the pool. Provided by the Octavia API service. cli=openstack loadbalancer pool create [--name ] --listener driver.amphora=complete driver.ovn=complete [operation.protocol.HTTP] title=protocol - HTTP status=optional notes=HTTP protocol support for the pool. cli=openstack loadbalancer pool create --protocol HTTP --listener driver.amphora=complete driver.ovn=missing [operation.protocol.HTTPS] title=protocol - HTTPS status=optional notes=HTTPS protocol support for the pool. cli=openstack loadbalancer pool create --protocol HTTP --listener driver.amphora=complete driver.ovn=missing [operation.protocol.PROXY] title=protocol - PROXY status=optional notes=PROXY protocol support for the pool. cli=openstack loadbalancer pool create --protocol PROXY --listener driver.amphora=complete driver.ovn=missing [operation.protocol.TCP] title=protocol - TCP status=optional notes=TCP protocol support for the pool. cli=openstack loadbalancer pool create --protocol TCP --listener driver.amphora=complete driver.ovn=complete [operation.protocol.UDP] title=protocol - UDP status=optional notes=UDP protocol support for the pool. cli=openstack loadbalancer pool create --protocol UDP --listener driver.amphora=complete driver.ovn=complete [operation.session_persistence.APP_COOKIE] title=session_persistence - APP_COOKIE status=optional notes=Session persistence using an application supplied cookie. cli=openstack loadbalancer pool create --session-persistence type=APP_COOKIE --listener driver.amphora=complete driver.ovn=missing [operation.session_persistence.cookie_name] title=session_persistence - cookie_name status=optional notes=The name of the application cookie to use for session persistence. cli=openstack loadbalancer pool create --session-persistence cookie_name=chocolate --listener driver.amphora=complete driver.ovn=missing [operation.session_persistence.HTTP_COOKIE] title=session_persistence - HTTP_COOKIE status=optional notes=Session persistence using a cookie created by the load balancer. cli=openstack loadbalancer pool create --session-persistence type=HTTP_COOKIE --listener driver.amphora=complete driver.ovn=missing [operation.session_persistence.persistence_timeout] title=session_persistence - persistence_timeout status=optional notes=The timeout, in seconds, after which a UDP flow may be rescheduled to a different member. cli=openstack loadbalancer pool create --session-persistence persistence_timeout=360 --listener driver.amphora=complete driver.ovn=missing [operation.session_persistence.persistence_granularity] title=session_persistence - persistence_granularity status=optional notes=The netmask used to determine UDP SOURCE_IP session persistence. cli=openstack loadbalancer pool create --session-persistence persistence_granularity=255.255.255.255 --listener driver.amphora=complete driver.ovn=missing [operation.session_persistence.SOURCE_IP] title=session_persistence - SOURCE_IP status=optional notes=Session persistence using the source IP address. cli=openstack loadbalancer pool create --session-persistence type=SOURCE_IP --listener driver.amphora=complete driver.ovn=missing [operation.tags] title=tags status=optional notes=The tags for the pool. Provided by the Octavia API service. cli=openstack loadbalancer pool create [--tag ] --listener driver.amphora=complete driver.ovn=complete [operation.tls_enabled] title=tls_enabled status=optional notes=When true connections to backend member servers will use TLS encryption. cli=openstack loadbalancer pool create [--enable-tls] --listener driver.amphora=complete driver.ovn=missing [operation.tls_container_ref] title=tls_container_ref status=optional notes=The reference to the key manager service secret containing a PKCS12 format certificate/key bundle for tls_enabled pools for TLS client authentication to the member servers. cli=openstack loadbalancer pool create [--tls-container-ref ] --listener driver.amphora=complete driver.ovn=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/feature-classification/index.rst0000664000175000017500000000701000000000000024500 0ustar00zuulzuul00000000000000.. Copyright (c) 2019 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================== Octavia Provider Feature Matrix =============================== Load Balancer Features ====================== Provider feature support matrix for an Octavia load balancer. Load Balancer API Features -------------------------- These features are documented in the Octavia API reference `Create a Load Balancer `_ section. .. support_matrix:: feature-matrix-lb.ini Listener Features ================= Provider feature support matrix for an Octavia load balancer listener. Listener API Features --------------------- These features are documented in the Octavia API reference `Create a Listener `_ section. .. support_matrix:: feature-matrix-listener.ini Pool Features ============= Provider feature support matrix for an Octavia load balancer pool. Pool API Features ----------------- These features are documented in the Octavia API reference `Create a Pool `_ section. .. support_matrix:: feature-matrix-pool.ini Member Features =============== Provider feature support matrix for an Octavia load balancer member. Member API Features ------------------- These features are documented in the Octavia API reference `Create a Member `_ section. .. support_matrix:: feature-matrix-member.ini Health Monitor Features ======================= Provider feature support matrix for an Octavia load balancer health monitor. Health Monitor API Features --------------------------- These features are documented in the Octavia API reference `Create a Health Monitor `_ section. .. support_matrix:: feature-matrix-healthmonitor.ini L7 Policy Features ================== Provider feature support matrix for an Octavia load balancer L7 Policies. L7 Policy API Features ---------------------- These features are documented in the Octavia API reference `Create an L7 Policy `_ section. .. support_matrix:: feature-matrix-l7policy.ini L7 Rule Features ================ Provider feature support matrix for an Octavia load balancer L7 Rules. L7 Rule API Features -------------------- These features are documented in the Octavia API reference `Create an L7 Rule `_ section. .. support_matrix:: feature-matrix-l7rule.ini ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/doc/source/user/guides/0000775000175000017500000000000000000000000017475 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/guides/basic-cookbook.rst0000664000175000017500000014032700000000000023123 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 IBM Copyright 2019 Red Hat, Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================= Basic Load Balancing Cookbook ============================= Introduction ============ This document contains several examples of using basic load balancing services as a tenant or "regular" cloud user. For the purposes of this guide we assume that the neutron and barbican command-line interfaces, via the OpenStack client, are going to be used to configure all features of Octavia. In order to keep these examples short, we also assume that tasks not directly associated with deploying load balancing services have already been accomplished. This might include such things as deploying and configuring web servers, setting up Neutron networks, obtaining TLS certificates from a trusted provider, and so on. A description of the starting conditions is given in each example below. Please also note that this guide assumes you are familiar with the specific load balancer terminology defined in the :doc:`../../reference/glossary`. For a description of load balancing itself and the Octavia project, please see: :doc:`../../reference/introduction`. Examples ======== Deploy a basic HTTP load balancer --------------------------------- While this is technically the simplest complete load balancing solution that can be deployed, we recommend deploying HTTP load balancers with a health monitor to ensure back-end member availability. See :ref:`basic-lb-with-hm` below. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an HTTP application on TCP port 80. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes web requests to the back-end servers. **Solution**: 1. Create load balancer *lb1* on subnet *public-subnet*. 2. Create listener *listener1*. 3. Create pool *pool1* as *listener1*'s default pool. 4. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 .. _basic-lb-with-hm: Deploy a basic HTTP load balancer with a health monitor ------------------------------------------------------- This is the simplest recommended load balancing solution for HTTP applications. This solution is appropriate for operators with provider networks that are not compatible with Neutron floating-ip functionality (such as IPv6 networks). However, if you need to retain control of the external IP through which a load balancer is accessible, even if the load balancer needs to be destroyed or recreated, it may be more appropriate to deploy your basic load balancer using a floating IP. See :ref:`basic-lb-with-hm-and-fip` below. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an HTTP application on TCP port 80. * These back-end servers have been configured with a health check at the URL path "/healthcheck". See :ref:`http-health-monitors` below. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes web requests to the back-end servers, and which checks the "/healthcheck" path to ensure back-end member health. **Solution**: 1. Create load balancer *lb1* on subnet *public-subnet*. 2. Create listener *listener1*. 3. Create pool *pool1* as *listener1*'s default pool. 4. Create a health monitor on *pool1* which tests the "/healthcheck" path. 5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path /healthcheck pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 .. _basic-lb-with-hm-and-fip: Deploy a basic HTTP load balancer using a floating IP ----------------------------------------------------- It can be beneficial to use a floating IP when setting up a load balancer's VIP in order to ensure you retain control of the IP that gets assigned as the floating IP in case the load balancer needs to be destroyed, moved, or recreated. Note that this is not possible to do with IPv6 load balancers as floating IPs do not work with IPv6. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an HTTP application on TCP port 80. * These back-end servers have been configured with a health check at the URL path "/healthcheck". See :ref:`http-health-monitors` below. * Neutron network *public* is a shared external network created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes web requests to the back-end servers, and which checks the "/healthcheck" path to ensure back-end member health. Further, we want to do this using a floating IP. **Solution**: 1. Create load balancer *lb1* on subnet *private-subnet*. 2. Create listener *listener1*. 3. Create pool *pool1* as *listener1*'s default pool. 4. Create a health monitor on *pool1* which tests the "/healthcheck" path. 5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. 6. Create a floating IP address on *public-subnet*. 7. Associate this floating IP with the *lb1*'s VIP port. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path /healthcheck pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 openstack floating ip create public # The following IDs should be visible in the output of previous commands openstack floating ip set --port Deploy a basic HTTP load balancer with session persistence ---------------------------------------------------------- **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an HTTP application on TCP port 80. * The application is written such that web clients should always be directed to the same back-end server throughout their web session, based on an application cookie inserted by the web application named 'PHPSESSIONID'. * These back-end servers have been configured with a health check at the URL path "/healthcheck". See :ref:`http-health-monitors` below. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes web requests to the back-end servers, persists sessions using the PHPSESSIONID as a key, and which checks the "/healthcheck" path to ensure back-end member health. **Solution**: 1. Create load balancer *lb1* on subnet *public-subnet*. 2. Create listener *listener1*. 3. Create pool *pool1* as *listener1*'s default pool which defines session persistence on the 'PHPSESSIONID' cookie. 4. Create a health monitor on *pool1* which tests the "/healthcheck" path. 5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --session-persistence type=APP_COOKIE,cookie_name=PHPSESSIONID openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path /healthcheck pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 Deploy a TCP load balancer -------------------------- This is generally suitable when load balancing a non-HTTP TCP-based service. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an custom application on TCP port 23456 * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes requests to the back-end servers. * We want to employ a TCP health check to ensure that the back-end servers are available. **Solution**: 1. Create load balancer *lb1* on subnet *public-subnet*. 2. Create listener *listener1*. 3. Create pool *pool1* as *listener1*'s default pool. 4. Create a health monitor on *pool1* which probes *pool1*'s members' TCP service port. 5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol TCP --protocol-port 23456 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol TCP openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type TCP pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 Deploy a QoS ruled load balancer -------------------------------- This solution limits the bandwidth available through the Load Balancer's VIP by applying a Neutron Quality of Service(QoS) policy to the VIP, so Load Balancer can accept the QoS Policy from Neutron; Then limits the vip of Load Balancer incoming or outgoing traffic. .. note:: Before using this feature, please make sure the Neutron QoS extension(qos) is enabled on running OpenStack environment by command .. code-block:: console openstack extension list **Scenario description**: * QoS-policy created from Neutron with bandwidth-limit-rules by us. * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an HTTP application on TCP port 80. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer and want to limit the traffic bandwidth when web traffic reaches the vip. **Solution**: 1. Create QoS policy *qos-policy-bandwidth* with *bandwidth_limit* in Neutron. 2. Create load balancer *lb1* on subnet *public-subnet* with the id of *qos-policy-bandwidth*. 3. Create listener *listener1*. 4. Create pool *pool1* as *listener1*'s default pool. 5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack network qos policy create qos-policy-bandwidth openstack network qos rule create --type bandwidth_limit --max-kbps 1024 --max-burst-kbits 1024 qos-policy-bandwidth openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --vip-qos-policy-id qos-policy-bandwidth # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 lb1 --protocol HTTP --protocol-port 80 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer member create --subnet-id --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id --address 192.0.2.11 --protocol-port 80 pool1 Deploy a load balancer with access control list ----------------------------------------------- This solution limits incoming traffic to a listener to a set of allowed source IP addresses. Any other incoming traffic will be rejected. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an custom application on TCP port 23456 * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes requests to the back-end servers. * The application on TCP port 23456 is accessible to a limited source IP addresses (192.0.2.0/24 and 198.51.100/24). **Solution**: 1. Create load balancer *lb1* on subnet *public-subnet*. 2. Create listener *listener1* with allowed CIDRs. 3. Create pool *pool1* as *listener1*'s default pool. 4. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol TCP --protocol-port 23456 --allowed-cidr 192.0.2.0/24 --allowed-cidr 198.51.100/24 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol TCP openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 Deploy a non-terminated HTTPS load balancer ------------------------------------------- A non-terminated HTTPS load balancer acts effectively like a generic TCP load balancer: The load balancer will forward the raw TCP traffic from the web client to the back-end servers without decrypting it. This means that the back-end servers themselves must be configured to terminate the HTTPS connection with the web clients, and in turn, the load balancer cannot insert headers into the HTTP session indicating the client IP address. (That is, to the back-end server, all web requests will appear to originate from the load balancer.) Also, advanced load balancer features (like Layer 7 functionality) cannot be used with non-terminated HTTPS. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with a TLS-encrypted web application on TCP port 443. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes requests to the back-end servers. * We want to employ a TCP health check to ensure that the back-end servers are available. **Solution**: 1. Create load balancer *lb1* on subnet *public-subnet*. 2. Create listener *listener1*. 3. Create pool *pool1* as *listener1*'s default pool. 4. Create a health monitor on *pool1* which probes *pool1*'s members' TCP service port. 5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTPS --protocol-port 443 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTPS openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTPS --url-path /healthcheck pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 443 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 443 pool1 .. _basic-tls-terminated-listener: Deploy a TLS-terminated HTTPS load balancer ------------------------------------------- With a TLS-terminated HTTPS load balancer, web clients communicate with the load balancer over TLS protocols. The load balancer terminates the TLS session and forwards the decrypted requests to the back-end servers. By terminating the TLS session on the load balancer, we offload the CPU-intensive encryption work to the load balancer, and enable the possibility of using advanced load balancer features, like Layer 7 features and header manipulation. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with regular HTTP application on TCP port 80. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * A TLS certificate, key, and intermediate certificate chain for www.example.com have been obtained from an external certificate authority. These now exist in the files server.crt, server.key, and ca-chain.crt in the current directory. The key and certificate are PEM-encoded, and the intermediate certificate chain is multiple PEM-encoded certs concatenated together. The key is not encrypted with a passphrase. * We want to configure a TLS-terminated HTTPS load balancer that is accessible from the internet using the key and certificate mentioned above, which distributes requests to the back-end servers over the non-encrypted HTTP protocol. * Octavia is configured to use barbican for key management. **Solution**: 1. Combine the individual cert/key/intermediates to a single PKCS12 file. 2. Create a barbican *secret* resource for the PKCS12 file. We will call this *tls_secret1*. 3. Create load balancer *lb1* on subnet *public-subnet*. 4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing *tls_secret1* as its default TLS container. 5. Create pool *pool1* as *listener1*'s default pool. 6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 Deploy a TLS-terminated HTTPS load balancer with SNI ---------------------------------------------------- This example is exactly like :ref:`basic-tls-terminated-listener`, except that we have multiple TLS certificates that we would like to use on the same listener using Server Name Indication (SNI) technology. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with regular HTTP application on TCP port 80. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * TLS certificates, keys, and intermediate certificate chains for www.example.com and www2.example.com have been obtained from an external certificate authority. These now exist in the files server.crt, server.key, ca-chain.crt, server2.crt, server2.key, and ca-chain2.crt in the current directory. The keys and certificates are PEM-encoded, and the intermediate certificate chains are multiple certs PEM-encoded and concatenated together. Neither key is encrypted with a passphrase. * We want to configure a TLS-terminated HTTPS load balancer that is accessible from the internet using the keys and certificates mentioned above, which distributes requests to the back-end servers over the non-encrypted HTTP protocol. * If a web client connects that is not SNI capable, we want the load balancer to respond with the certificate for www.example.com. **Solution**: 1. Combine the individual cert/key/intermediates to single PKCS12 files. 2. Create barbican *secret* resources for the PKCS12 files. We will call them *tls_secret1* and *tls_secret2*. 3. Create load balancer *lb1* on subnet *public-subnet*. 4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing *tls_secret1* as its default TLS container, and referencing both *tls_secret1* and *tls_secret2* using SNI. 5. Create pool *pool1* as *listener1*'s default pool. 6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 openssl pkcs12 -export -inkey server2.key -in server2.crt -certfile ca-chain2.crt -passout pass: -out server2.p12 openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" openstack secret store --name='tls_secret2' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server2.p12)" openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --sni-container-refs $(openstack secret list | awk '/ tls_secret1 / {print $2}') $(openstack secret list | awk '/ tls_secret2 / {print $2}') -- lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 Deploy a TLS-terminated HTTPS load balancer with client authentication ---------------------------------------------------------------------- With a TLS-terminated HTTPS load balancer, web clients communicate with the load balancer over TLS protocols. The load balancer terminates the TLS session and forwards the decrypted requests to the back-end servers. By terminating the TLS session on the load balancer, we offload the CPU-intensive encryption work to the load balancer, and enable the possibility of using advanced load balancer features, like Layer 7 features and header manipulation. Adding client authentication allows users to authenticate connections to the VIP using certificates. This is also known as two-way TLS authentication. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with a regular HTTP application on TCP port 80. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * A TLS certificate, key, and intermediate certificate chain for www.example.com have been obtained from an external certificate authority. These now exist in the files server.crt, server.key, and ca-chain.crt in the current directory. The key and certificate are PEM-encoded, and the intermediate certificate chain is multiple PEM-encoded certificates concatenated together. The key is not encrypted with a passphrase. * A Certificate Authority (CA) certificate chain and optional Certificate Revocation List (CRL) have been obtained from an external certificate authority to authenticate client certificates against. * We want to configure a TLS-terminated HTTPS load balancer that is accessible from the internet using the key and certificate mentioned above, which distributes requests to the back-end servers over the non-encrypted HTTP protocol. * Octavia is configured to use barbican for key management. **Solution**: 1. Combine the individual cert/key/intermediates to a single PKCS12 file. 2. Create a barbican *secret* resource for the PKCS12 file. We will call this *tls_secret1*. 3. Create a barbican *secret* resource for the client CA certificate. We will call this *client_ca_cert*. 4. Optionally create a barbican *secret* for the CRL file. We will call this *client_ca_crl*. 5. Create load balancer *lb1* on subnet *public-subnet*. 6. Create listener *listener1* as a TERMINATED_HTTPS listener referencing *tls_secret1* as its default TLS container, client authentication enabled, *client_ca_cert* as the client CA tls container reference, and *client_ca_crl* as the client CRL container reference. 7. Create pool *pool1* as *listener1*'s default pool. 8. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" openstack secret store --name='client_ca_cert' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < client_ca.pem)" openstack secret store --name='client_ca_crl' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < client_ca.crl)" openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --client-authentication=MANDATORY --client-ca-tls-container-ref=$(openstack secret list | awk '/ client_ca_cert / {print $2}') --client-crl-container=$(openstack secret list | awk '/ client_ca_crl / {print $2}') lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 Deploy HTTP and TLS-terminated HTTPS load balancing on the same IP and backend ------------------------------------------------------------------------------ This example is exactly like :ref:`basic-tls-terminated-listener`, except that we would like to have both an HTTP and TERMINATED_HTTPS listener that use the same back-end pool (and therefore, probably respond with the exact same content regardless of whether the web client uses the HTTP or HTTPS protocol to connect). Please note that if you wish all HTTP requests to be redirected to HTTPS (so that requests are only served via HTTPS, and attempts to access content over HTTP just get redirected to the HTTPS listener), then please see `the example `__ in the :doc:`l7-cookbook`. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with regular HTTP application on TCP port 80. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * A TLS certificate, key, and intermediate certificate chain for www.example.com have been obtained from an external certificate authority. These now exist in the files server.crt, server.key, and ca-chain.crt in the current directory. The key and certificate are PEM-encoded, and the intermediate certificate chain is multiple PEM-encoded certs concatenated together. The key is not encrypted with a passphrase. * We want to configure a TLS-terminated HTTPS load balancer that is accessible from the internet using the key and certificate mentioned above, which distributes requests to the back-end servers over the non-encrypted HTTP protocol. * We also want to configure a HTTP load balancer on the same IP address as the above which serves the exact same content (ie. forwards to the same back-end pool) as the TERMINATED_HTTPS listener. **Solution**: 1. Combine the individual cert/key/intermediates to a single PKCS12 file. 2. Create a barbican *secret* resource for the PKCS12 file. We will call this *tls_secret1*. 3. Create load balancer *lb1* on subnet *public-subnet*. 4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing *tls_secret1* as its default TLS container. 5. Create pool *pool1* as *listener1*'s default pool. 6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. 7. Create listener *listener2* as an HTTP listener with *pool1* as its default pool. **CLI commands**: :: openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1 openstack loadbalancer listener create --protocol-port 80 --protocol HTTP --name listener2 --default-pool pool1 lb1 Deploy a load balancer with backend re-encryption ------------------------------------------------- This example will demostrate how to enable TLS encryption from the load balancer to the backend member servers. Typically this is used with TLS termination enabled on the listener, but, to simplify the example, we are going to use an unencrypted HTTP listener. For information on setting up a TLS terminated listener, see the above section :ref:`basic-tls-terminated-listener`. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an HTTPS application on TCP port 443. * A Certificate Authority (CA) certificate chain and optional Certificate Revocation List (CRL) have been obtained from an external certificate authority to authenticate member server certificates against. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes web requests to the back-end servers. **Solution**: 1. Create a barbican *secret* resource for the member CA certificate. We will call this *member_ca_cert*. 2. Optionally create a barbican *secret* for the CRL file. We will call this *member_ca_crl*. 3. Create load balancer *lb1* on subnet *public-subnet*. 4. Create listener *listener1*. 5. Create pool *pool1* as *listener1*'s default pool, that is TLS enabled, with a Certificate Authority (CA) certificate chain *member_ca_cert* to validate the member server certificate, and a Certificate Revocation List (CRL) *member_ca_crl* to check the member server certificate against. 6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack secret store --name='member_ca_cert' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.pem)" openstack secret store --name='member_ca_crl' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.crl)" openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --enable-tls --ca-tls-container-ref $(openstack secret list | awk '/ member_ca_cert / {print $2}') --crl-container-ref $(openstack secret list | awk '/ member_ca_crl / {print $2}') openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 443 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 443 pool1 Deploy a load balancer with backend re-encryption and client authentication --------------------------------------------------------------------------- This example will demostrate how to enable TLS encryption from the load balancer to the backend member servers with the load balancer being authenticated using TLS client authentication. Typically this is used with TLS termination enabled on the listener, but, to simplify the example, we are going to use an unencrypted HTTP listener. For information on setting up a TLS terminated listener, see the above section :ref:`basic-tls-terminated-listener`. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an HTTPS application on TCP port 443. * A Certificate Authority (CA) certificate chain and optional Certificate Revocation List (CRL) have been obtained from an external certificate authority to authenticate member server certificates against. * A TLS certificate and key have been obtained from an external Certificate Authority (CA). The now exist in the files member.crt and member.key. The key and certificate are PEM-encoded and the key is not encrypted with a passphrase (for this example). * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes web requests to the back-end servers. **Solution**: 1. Combine the member client authentication certificate and key to a single PKCS12 file. 2. Create a barbican *secret* resource for the PKCS12 file. We will call this *member_secret1*. 3. Create a barbican *secret* resource for the member CA certificate. We will call this *member_ca_cert*. 4. Optionally create a barbican *secret* for the CRL file. We will call this *member_ca_crl*. 5. Create load balancer *lb1* on subnet *public-subnet*. 6. Create listener *listener1*. 7. Create pool *pool1* as *listener1*'s default pool, that is TLS enabled, with a TLS container reference for the member client authentication key and certificate pkcs12, also with a Certificate Authority (CA) certificate chain *member_ca_cert* to validate the member server certificate, and a Certificate Revocation List (CRL) *member_ca_crl* to check the member server certificate against. 8. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openssl pkcs12 -export -inkey member.key -in member.crt -passout pass: -out member.p12 openstack secret store --name='member_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member.p12)" openstack secret store --name='member_ca_cert' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.pem)" openstack secret store --name='member_ca_crl' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.crl)" openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --enable-tls --ca-tls-container-ref $(openstack secret list | awk '/ member_ca_cert / {print $2}') --crl-container-ref $(openstack secret list | awk '/ member_ca_crl / {print $2}') --tls-container-ref $(openstack secret list | awk '/ member_secret1 / {print $2}') openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 443 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 443 pool1 Deploy a UDP load balancer with a health monitor ------------------------------------------------ This is a load balancer solution suitable for UDP-based services. **Scenario description**: * Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have been configured with an application on UDP port 1234. * Subnet *public-subnet* is a shared external subnet created by the cloud operator which is reachable from the internet. * We want to configure a basic load balancer that is accessible from the internet, which distributes requests to the back-end servers. * We want to employ a UDP health check to ensure that the back-end servers are available. UDP health checks may not work correctly if ICMP Destination Unreachable (ICMP type 3) messages are blocked by a security rule (see :ref:`other-health-monitors`). **Solution**: 1. Create load balancer *lb1* on subnet *private-subnet*. 2. Create listener *listener1*. 3. Create pool *pool1* as *listener1*'s default pool. 4. Create a health monitor on *pool1* which connects to the back-end servers. 5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: :: openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet # Re-run the following until lb1 shows ACTIVE and ONLINE statuses: openstack loadbalancer show lb1 openstack loadbalancer listener create --name listener1 --protocol UDP --protocol-port 1234 lb1 openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol UDP openstack loadbalancer healthmonitor create --delay 3 --max-retries 2 --timeout 2 --type UDP-CONNECT pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 1234 pool1 openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 1234 pool1 .. _health-monitor-best-practices: Health Monitor Best Practices ============================= While it is possible to set up a listener without a health monitor, if a back-end pool member goes down, Octavia will not remove the failed server from the pool until a considerable time has passed. This can lead to service disruption for web clients. Because of this, we recommend always configuring production load balancers to use a health monitor. The health monitor itself is a process that does periodic health checks on each back-end server to pre-emptively detect failed servers and temporarily pull them out of the pool. Since effective health monitors depend as much on back-end application server configuration as proper load balancer configuration, some additional discussion of best practices is warranted here. See also: `Octavia API Reference `_ Health monitor options ---------------------- All of the health monitors Octavia supports have the following configurable options: * ``delay``: Number of seconds to wait between health checks. * ``timeout``: Number of seconds to wait for any given health check to complete. ``timeout`` should always be smaller than ``delay``. * ``max-retries``: Number of subsequent health checks a given back-end server must fail before it is considered *down*, or that a failed back-end server must pass to be considered *up* again. .. _http-health-monitors: HTTP health monitors -------------------- In general, the application-side component of HTTP health checks are a part of the web application being load balanced. By default, Octavia will probe the "/" path on the application server. However, in many applications this is not appropriate because the "/" path ends up being a cached page, or causes the application server to do more work than is necessary for a basic health check. In addition to the above options, HTTP health monitors also have the following options: * ``url_path``: Path part of the URL that should be retrieved from the back-end server. By default this is "/". * ``http_method``: HTTP method that should be used to retrieve the ``url_path``. By default this is "GET". * ``expected_codes``: List of HTTP status codes that indicate an OK health check. By default this is just "200". Please keep the following best practices in mind when writing the code that generates the health check in your web application: * The health monitor ``url_path`` should not require authentication to load. * By default the health monitor ``url_path`` should return a HTTP 200 OK status code to indicate a healthy server unless you specify alternate ``expected_codes``. * The health check should do enough internal checks to ensure the application is healthy and no more. This may mean ensuring database or other external storage connections are up and running, server load is acceptable, the site is not in maintenance mode, and other tests specific to your application. * The page generated by the health check should be very light weight: * It should return in a sub-second interval. * It should not induce significant load on the application server. * The page generated by the health check should never be cached, though the code running the health check may reference cached data. For example, you may find it useful to run a more extensive health check via cron and store the results of this to disk. The code generating the page at the health monitor ``url_path`` would incorporate the results of this cron job in the tests it performs. * Since Octavia only cares about the HTTP status code returned, and since health checks are run so frequently, it may make sense to use the "HEAD" or "OPTIONS" HTTP methods to cut down on unnecessary processing of a whole page. .. _other-health-monitors: Other health monitors --------------------- Other health monitor types include ``PING``, ``TCP``, ``HTTPS``, ``TLS-HELLO``, and ``UDP-CONNECT``. ``PING`` health monitors send periodic ICMP PING requests to the back-end servers. Obviously, your back-end servers must be configured to allow PINGs in order for these health checks to pass. .. warning:: Health monitors of type ``PING`` only check if the member is reachable and responds to ICMP echo requests. It will not detect if your application running on that instance is healthy or not. Most pools should use one of the other health monitor options. ``PING`` should only be used in specific cases where an ICMP echo request is a valid health check. ``TCP`` health monitors open a TCP connection to the back-end server's protocol port. Your custom TCP application should be written to respond OK to the load balancer connecting, opening a TCP connection, and closing it again after the TCP handshake without sending any data. ``HTTPS`` health monitors operate exactly like HTTP health monitors, but with ssl back-end servers. Unfortunately, this causes problems if the servers are performing client certificate validation, as HAProxy won't have a valid cert. In this case, using ``TLS-HELLO`` type monitoring is an alternative. ``TLS-HELLO`` health monitors simply ensure the back-end server responds to SSLv3 client hello messages. It will not check any other health metrics, like status code or body contents. ``UDP-CONNECT`` health monitors do a basic UDP port connect. Health monitors of this type may not work correctly if Destination Unreachable (ICMP type 3) is not enabled on the member server or is blocked by a security rule. A member server may be marked as operating status ONLINE when it is actually down. Intermediate certificate chains =============================== Some TLS certificates require you to install an intermediate certificate chain in order for web client browsers to trust the certificate. This chain can take several forms, and is a file provided by the organization from whom you obtained your TLS certificate. PEM-encoded chains ------------------ The simplest form of the intermediate chain is a PEM-encoded text file that either contains a sequence of individually-encoded PEM certificates, or a PEM encoded PKCS7 block(s). If this is the type of intermediate chain you have been provided, the file will contain either ``-----BEGIN PKCS7-----`` or ``-----BEGIN CERTIFICATE-----`` near the top of the file, and one or more blocks of 64-character lines of ASCII text (that will look like gobbedlygook to a human). These files are also typically named with a ``.crt`` or ``.pem`` extension. DER-encoded chains ------------------ If the intermediates chain provided to you is a file that contains what appears to be random binary data, it is likely that it is a PKCS7 chain in DER format. These files also may be named with a ``.p7b`` extension. You may use the binary DER file as-is when building your PKCS12 bundle: :: openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.p7b -passout pass: -out server.p12 ... or you can convert it to a series of PEM-encoded certificates: :: openssl pkcs7 -in intermediates-chain.p7b -inform DER -print_certs -out intermediates-chain.crt ... or you can convert it to a PEM-encoded PKCS7 bundle: :: openssl pkcs7 -in intermediates-chain.p7b -inform DER -outform PEM -out intermediates-chain.crt If the file is not a PKCS7 DER bundle, either of the two ``openssl pkcs7`` commands will fail. Further reading =============== For examples of using Layer 7 features for more advanced load balancing, please see: :doc:`l7-cookbook` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/guides/l7-cookbook.rst0000664000175000017500000005021400000000000022357 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 IBM Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ Layer 7 Cookbook ================ Introduction ============ This document gives several examples of common L7 load balancer usage. For a description of L7 load balancing see: :doc:`l7` For the purposes of this guide we assume that the OpenStack Client command-line interface is going to be used to configure all features of Octavia with the Octavia driver back-end. Also, in order to keep these examples short, we assume that many non-L7 configuration tasks (such as deploying loadbalancers, listeners, pools, members, healthmonitors, etc.) have already been accomplished. A description of the starting conditions is given in each example below. Examples ======== .. _redirect-http-to-https: Redirect *http://www.example.com/* to *https://www.example.com/* ---------------------------------------------------------------- **Scenario description**: * Load balancer *lb1* has been set up with ``TERMINATED_HTTPS`` listener *tls_listener* on TCP port 443. * *tls_listener* has been populated with a default pool, members, etc. * *tls_listener* is available under the DNS name *https://www.example.com/* * We want any regular HTTP requests to TCP port 80 on *lb1* to be redirected to *tls_listener* on TCP port 443. **Solution**: 1. Create listener *http_listener* as an HTTP listener on *lb1* port 80. 2. Set up an L7 Policy *policy1* on *http_listener* with action ``REDIRECT_TO_URL`` pointed at the URL *https://www.example.com/* 3. Add an L7 Rule to *policy1* which matches all requests. **CLI commands**: .. code-block:: bash openstack loadbalancer listener create --name http_listener --protocol HTTP --protocol-port 80 lb1 openstack loadbalancer l7policy create --action REDIRECT_PREFIX --redirect-prefix https://www.example.com/ --name policy1 http_listener openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value / policy1 .. _send-requests-to-static-pool: Send requests starting with /js or /images to *static_pool* ----------------------------------------------------------- **Scenario description**: * Listener *listener1* on load balancer *lb1* is set up to send all requests to its default_pool *pool1*. * We are introducing static content servers 10.0.0.10 and 10.0.0.11 on subnet *private-subnet*, and want any HTTP requests with a URL that starts with either "/js" or "/images" to be sent to those two servers instead of *pool1*. **Solution**: 1. Create pool *static_pool* on *lb1*. 2. Populate *static_pool* with the new back-end members. 3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at *static_pool*. 4. Create an L7 Rule on *policy1* which looks for "/js" at the start of the request path. 5. Create L7 Policy *policy2* with action ``REDIRECT_TO_POOL`` pointed at *static_pool*. 6. Create an L7 Rule on *policy2* which looks for "/images" at the start of the request path. **CLI commands**: .. code-block:: bash openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name static_pool --protocol HTTP openstack loadbalancer member create --address 10.0.0.10 --protocol-port 80 --subnet-id private-subnet static_pool openstack loadbalancer member create --address 10.0.0.11 --protocol-port 80 --subnet-id private-subnet static_pool openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool --name policy1 listener1 openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value /js policy1 openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool --name policy2 listener1 openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value /images policy2 **Alternate solution** (using regular expressions): 1. Create pool *static_pool* on *lb1*. 2. Populate *static_pool* with the new back-end members. 3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at *static_pool*. 4. Create an L7 Rule on *policy1* which uses a regular expression to match either "/js" or "/images" at the start of the request path. **CLI commands**: .. code-block:: bash openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name static_pool --protocol HTTP openstack loadbalancer member create --address 10.0.0.10 --protocol-port 80 --subnet-id private-subnet static_pool openstack loadbalancer member create --address 10.0.0.11 --protocol-port 80 --subnet-id private-subnet static_pool openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool --name policy1 listener1 openstack loadbalancer l7rule create --compare-type REGEX --type PATH --value '^/(js|images)' policy1 Send requests for *http://www2.example.com/* to *pool2* ------------------------------------------------------- **Scenario description**: * Listener *listener1* on load balancer *lb1* is set up to send all requests to its default_pool *pool1*. * We have set up a new pool *pool2* on *lb1* and want any requests using the HTTP/1.1 hostname *www2.example.com* to be sent to *pool2* instead. **Solution**: 1. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at *pool2*. 2. Create an L7 Rule on *policy1* which matches the hostname *www2.example.com*. **CLI commands**: .. code-block:: bash openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool2 --name policy1 listener1 openstack loadbalancer l7rule create --compare-type EQUAL_TO --type HOST_NAME --value www2.example.com policy1 Send requests for *\*.example.com* to *pool2* --------------------------------------------- **Scenario description**: * Listener *listener1* on load balancer *lb1* is set up to send all requests to its default_pool *pool1*. * We have set up a new pool *pool2* on *lb1* and want any requests using any HTTP/1.1 hostname like *\*.example.com* to be sent to *pool2* instead. **Solution**: 1. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at *pool2*. 2. Create an L7 Rule on *policy1* which matches any hostname that ends with *example.com*. **CLI commands**: .. code-block:: bash openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool2 --name policy1 listener1 openstack loadbalancer l7rule create --compare-type ENDS_WITH --type HOST_NAME --value example.com policy1 Send unauthenticated users to *login_pool* (scenario 1) ------------------------------------------------------- **Scenario description**: * ``TERMINATED_HTTPS`` listener *listener1* on load balancer *lb1* is set up to send all requests to its default_pool *pool1*. * The site behind *listener1* requires all web users to authenticate, after which a browser cookie *auth_token* will be set. * When web users log out, or if the *auth_token* is invalid, the application servers in *pool1* clear the *auth_token*. * We want to introduce new secure authentication server 10.0.1.10 on Neutron subnet *secure_subnet* (a different Neutron subnet from the default application servers) which handles authenticating web users and sets the *auth_token*. *Note:* Obviously, to have a more secure authentication system that is less vulnerable to attacks like XSS, the new secure authentication server will need to set session variables to which the default_pool servers will have access outside the data path with the web client. There may be other security concerns as well. This example is not meant to address how these are to be accomplished--it's mainly meant to show how L7 application routing can be done based on a browser cookie. **Solution**: 1. Create pool *login_pool* on *lb1*. 2. Add member 10.0.1.10 on *secure_subnet* to *login_pool*. 3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at *login_pool*. 4. Create an L7 Rule on *policy1* which looks for browser cookie *auth_token* (with any value) and matches if it is *NOT* present. **CLI commands**: .. code-block:: bash openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name login_pool --protocol HTTP openstack loadbalancer member create --address 10.0.1.10 --protocol-port 80 --subnet-id secure_subnet login_pool openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool login_pool --name policy1 listener1 openstack loadbalancer l7rule create --compare-type REGEX --key auth_token --type COOKIE --value '.*' --invert policy1 Send unauthenticated users to *login_pool* (scenario 2) -------------------------------------------------------- **Scenario description**: * ``TERMINATED_HTTPS`` listener *listener1* on load balancer *lb1* is set up to send all requests to its default_pool *pool1*. * The site behind *listener1* requires all web users to authenticate, after which a browser cookie *auth_token* will be set. * When web users log out, or if the *auth_token* is invalid, the application servers in *pool1* set *auth_token* to the literal string "INVALID". * We want to introduce new secure authentication server 10.0.1.10 on Neutron subnet *secure_subnet* (a different Neutron subnet from the default application servers) which handles authenticating web users and sets the *auth_token*. *Note:* Obviously, to have a more secure authentication system that is less vulnerable to attacks like XSS, the new secure authentication server will need to set session variables to which the default_pool servers will have access outside the data path with the web client. There may be other security concerns as well. This example is not meant to address how these are to be accomplished-- it's mainly meant to show how L7 application routing can be done based on a browser cookie. **Solution**: 1. Create pool *login_pool* on *lb1*. 2. Add member 10.0.1.10 on *secure_subnet* to *login_pool*. 3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at *login_pool*. 4. Create an L7 Rule on *policy1* which looks for browser cookie *auth_token* (with any value) and matches if it is *NOT* present. 5. Create L7 Policy *policy2* with action ``REDIRECT_TO_POOL`` pointed at *login_pool*. 6. Create an L7 Rule on *policy2* which looks for browser cookie *auth_token* and matches if it is equal to the literal string "INVALID". **CLI commands**: .. code-block:: bash openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name login_pool --protocol HTTP openstack loadbalancer member create --address 10.0.1.10 --protocol-port 80 --subnet-id secure_subnet login_pool openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool login_pool --name policy1 listener1 openstack loadbalancer l7rule create --compare-type REGEX --key auth_token --type COOKIE --value '.*' --invert policy1 openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool login_pool --name policy2 listener1 openstack loadbalancer l7rule create --compare-type EQUAL_TO --key auth_token --type COOKIE --value INVALID policy2 Send requests for *http://api.example.com/api* to *api_pool* ------------------------------------------------------------ **Scenario description**: * Listener *listener1* on load balancer *lb1* is set up to send all requests to its default_pool *pool1*. * We have created pool *api_pool* on *lb1*, however, for legacy business logic reasons, we only want requests sent to this pool if they match the hostname *api.example.com* AND the request path starts with */api*. **Solution**: 1. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at *api_pool*. 2. Create an L7 Rule on *policy1* which matches the hostname *api.example.com*. 3. Create an L7 Rule on *policy1* which matches */api* at the start of the request path. (This rule will be logically ANDed with the previous rule.) **CLI commands**: .. code-block:: bash openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool api_pool --name policy1 listener1 openstack loadbalancer l7rule create --compare-type EQUAL_TO --type HOST_NAME --value api.example.com policy1 openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value /api policy1 Set up A/B testing on an existing production site using a cookie ---------------------------------------------------------------- **Scenario description**: * Listener *listener1* on load balancer *lb1* is a production site set up as described under :ref:`send-requests-to-static-pool` (alternate solution) above. Specifically: * HTTP requests with a URL that starts with either "/js" or "/images" are sent to pool *static_pool*. * All other requests are sent to *listener1's* default_pool *pool1*. * We are introducing a "B" version of the production site, complete with its own default_pool and static_pool. We will call these *pool_B* and *static_pool_B* respectively. * The *pool_B* members should be 10.0.0.50 and 10.0.0.51, and the *static_pool_B* members should be 10.0.0.100 and 10.0.0.101 on subnet *private-subnet*. * Web clients which should be routed to the "B" version of the site get a cookie set by the member servers in *pool1*. This cookie is called "site_version" and should have the value "B". **Solution**: 1. Create pool *pool_B* on *lb1*. 2. Populate *pool_B* with its new back-end members. 3. Create pool *static_pool_B* on *lb1*. 4. Populate *static_pool_B* with its new back-end members. 5. Create L7 Policy *policy2* with action ``REDIRECT_TO_POOL`` pointed at *static_pool_B*. This should be inserted at position 1. 6. Create an L7 Rule on *policy2* which uses a regular expression to match either "/js" or "/images" at the start of the request path. 7. Create an L7 Rule on *policy2* which matches the cookie "site_version" to the exact string "B". 8. Create L7 Policy *policy3* with action ``REDIRECT_TO_POOL`` pointed at *pool_B*. This should be inserted at position 2. 9. Create an L7 Rule on *policy3* which matches the cookie "site_version" to the exact string "B". *A word about L7 Policy position*: Since L7 Policies are evaluated in order according to their position parameter, and since the first L7 Policy whose L7 Rules all evaluate to True is the one whose action is followed, it is important that L7 Policies with the most specific rules get evaluated first. For example, in this solution, if *policy3* were to appear in the listener's L7 Policy list before *policy2* (that is, if *policy3* were to have a lower position number than *policy2*), then if a web client were to request the URL http://www.example.com/images/a.jpg with the cookie "site_version:B", then *policy3* would match, and the load balancer would send the request to *pool_B*. From the scenario description, this request clearly was meant to be sent to *static_pool_B*, which is why *policy2* needs to be evaluated before *policy3*. **CLI commands**: .. code-block:: bash openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name pool_B --protocol HTTP openstack loadbalancer member create --address 10.0.0.50 --protocol-port 80 --subnet-id private-subnet pool_B openstack loadbalancer member create --address 10.0.0.51 --protocol-port 80 --subnet-id private-subnet pool_B openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name static_pool_B --protocol HTTP openstack loadbalancer member create --address 10.0.0.100 --protocol-port 80 --subnet-id private-subnet static_pool_B openstack loadbalancer member create --address 10.0.0.101 --protocol-port 80 --subnet-id private-subnet static_pool_B openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool_B --name policy2 --position 1 listener1 openstack loadbalancer l7rule create --compare-type REGEX --type PATH --value '^/(js|images)' policy2 openstack loadbalancer l7rule create --compare-type EQUAL_TO --key site_version --type COOKIE --value B policy2 openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool_B --name policy3 --position 2 listener1 openstack loadbalancer l7rule create --compare-type EQUAL_TO --key site_version --type COOKIE --value B policy3 Redirect requests with an invalid TLS client authentication certificate ----------------------------------------------------------------------- **Scenario description**: * Listener *listener1* on load balancer *lb1* is configured for ``OPTIONAL`` client_authentication. * Web clients that do not present a TLS client authentication certificate should be redirected to a signup page at *http://www.example.com/signup*. **Solution**: 1. Create the load balancer *lb1*. 2. Create a listener *listner1* of type ``TERMINATED_TLS`` with a client_ca_tls_container_ref and client_authentication ``OPTIONAL``. 3. Create a L7 Policy *policy1* on *listener1* with action ``REDIRECT_TO_URL`` pointed at the URL *http://www.example.com/signup*. 4. Add an L7 Rule to *policy1* that does not match ``SSL_CONN_HAS_CERT``. **CLI commands**: .. code-block:: bash openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet openstack loadbalancer listener create --name listener1 --protocol TERMINATED_HTTPS --client-authentication OPTIONAL --protocol-port 443 --default-tls-container-ref http://192.0.2.15:9311/v1/secrets/697c2a6d-ffbe-40b8-be5e-7629fd636bca --client-ca-tls-container-ref http://192.0.2.15:9311/v1/secrets/dba60b77-8dad-4171-8a96-f21e1ca5fb46 lb1 openstack loadbalancer l7policy create --action REDIRECT_TO_URL --redirect-url http://www.example.com/signup --name policy1 listener1 openstack loadbalancer l7rule create --type SSL_CONN_HAS_CERT --invert --compare-type EQUAL_TO --value True policy1 Send users from the finance department to pool2 ----------------------------------------------- **Scenario description**: * Users from the finance department have client certificates with the OU field of the distinguished name set to ``finance``. * Only users with valid finance department client certificates should be able to access ``pool2``. Others will be rejected. **Solution**: 1. Create the load balancer *lb1*. 2. Create a listener *listner1* of type ``TERMINATED_TLS`` with a client_ca_tls_container_ref and client_authentication ``MANDATORY``. 3. Create a pool *pool2* on load balancer *lb1*. 4. Create a L7 Policy *policy1* on *listener1* with action ``REDIRECT_TO_POOL`` pointed at *pool2*. 5. Add an L7 Rule to *policy1* that matches ``SSL_CONN_HAS_CERT``. 6. Add an L7 Rule to *policy1* that matches ``SSL_VERIFY_RESULT`` with a value of 0. 7. Add an L7 Rule to *policy1* of type ``SSL_DN_FIELD`` that looks for "finance" in the "OU" field of the client authentication distinguished name. **CLI commands**: .. code-block:: bash openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet openstack loadbalancer listener create --name listener1 --protocol TERMINATED_HTTPS --client-authentication MANDATORY --protocol-port 443 --default-tls-container-ref http://192.0.2.15:9311/v1/secrets/697c2a6d-ffbe-40b8-be5e-7629fd636bca --client-ca-tls-container-ref http://192.0.2.15:9311/v1/secrets/dba60b77-8dad-4171-8a96-f21e1ca5fb46 lb1 openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name pool2 --protocol HTTP openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool2 --name policy1 listener1 openstack loadbalancer l7rule create --type SSL_CONN_HAS_CERT --compare-type EQUAL_TO --value True policy1 openstack loadbalancer l7rule create --type SSL_VERIFY_RESULT --compare-type EQUAL_TO --value 0 policy1 openstack loadbalancer l7rule create --type SSL_DN_FIELD --compare-type EQUAL_TO --key OU --value finance policy1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/guides/l7.rst0000664000175000017500000002162100000000000020553 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 IBM Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Layer 7 Load Balancing ====================== What is L7 load balancing? ========================== Layer 7 load balancing takes its name from the OSI model, indicating that the load balancer distributes requests to back-end pools based on layer 7 (application) data. Layer 7 load balancing is also known as "request switching," "application load balancing," "content based routing," "content based switching," and "content based balancing." A layer 7 load balancer consists of a listener that accepts requests on behalf of a number of back-end pools and distributes those requests based on policies that use application data to determine which pools should service any given request. This allows for the application infrastructure to be specifically tuned/optimized to serve specific types of content. For example, one group of back-end servers (pool) can be tuned to serve only images, another for execution of server-side scripting languages like PHP and ASP, and another for static content such as HTML, CSS, and JavaScript. Unlike lower-level load balancing, layer 7 load balancing does not require that all pools behind the load balancing service have the same content. In fact, it is generally expected that a layer 7 load balancer expects the back-end servers from different pools will have different content. Layer 7 load balancers are capable of directing requests based on URI, host, HTTP headers, and other data in the application message. L7 load balancing in Octavia ---------------------------- The layer 7 load balancing capabilities described in this document were added to Neutron LBaaS and Octavia in the Mitaka release cycle (Octavia 0.8). While layer 7 load balancing in general can theoretically be done for any well-defined layer 7 application interface, for the purposes of Octavia, L7 functionality refers only to the HTTP protocol and its semantics. How does it work? ================= Neutron LBaaS and Octavia accomplish the logic of layer 7 load balancing through the use of L7 Rules and L7 Policies. An L7 Rule is a single, simple logical test which evaluates to true or false. An L7 Policy is a collection of L7 rules, as well as a defined action that should be taken if all the rules associated with the policy match. These concepts and their specific details are expanded upon below. L7 Rules -------- An L7 Rule is a single, simple logical test which returns either true or false. It consists of a rule type, a comparison type, a value, and an optional key that gets used depending on the rule type. An L7 rule must always be associated with an L7 policy. See also: `Octavia API Reference `_ Rule types __________ L7 rules have the following types: * ``HOST_NAME``: The rule does a comparison between the HTTP/1.1 hostname in the request against the value parameter in the rule. * ``PATH``: The rule compares the path portion of the HTTP URI against the value parameter in the rule. * ``FILE_TYPE``: The rule compares the last portion of the URI against the value parameter in the rule. (eg. "txt", "jpg", etc.) * ``HEADER``: The rule looks for a header defined in the key parameter and compares it against the value parameter in the rule. * ``COOKIE``: The rule looks for a cookie named by the key parameter and compares it against the value parameter in the rule. * ``SSL_CONN_HAS_CERT``: The rule will match if the client has presented a certificate for TLS client authentication. This does not imply the certificate is valid. * ``SSL_VERIFY_RESULT``: This rule will match the TLS client authentication certificate validation result. A value of '0' means the certificate was successfully validated. A value greater than '0' means the certificate failed validation. This value follows the `openssl-verify result codes `_. * ``SSL_DN_FIELD``: The rule looks for a Distinguished Name feild defined in the key parameter and compares it against the value parameter in the rule. Comparison types ________________ L7 rules of a given type always do comparisons. The types of comparisons we support are listed below. Note that not all rule types support all comparison types: * ``REGEX``: Perl type regular expression matching * ``STARTS_WITH``: String starts with * ``ENDS_WITH``: String ends with * ``CONTAINS``: String contains * ``EQUAL_TO``: String is equal to Invert ______ In order to more fully express the logic required by some policies, rules may have their result inverted. That is to say, if the invert parameter of a given rule is true, the result of its comparison will be inverted. (For example, an inverted "equal to" rule effectively becomes a "not equal to", and an inverted "regex" rule returns true only if the given regex does not match.) L7 Policies ----------- An L7 Policy is a collection of L7 rules associated with a Listener, and which may also have an association to a back-end pool. Policies describe actions that should be taken by the load balancing software if all of the rules in the policy return true. See also: `Octavia API Reference `_ Policy Logic ____________ Policy logic is very simple: All the rules associated with a given policy are logically ANDed together. A request must match all the policy's rules to match the policy. If you need to express a logical OR operation between rules, then do this by creating multiple policies with the same action (or, possibly, by making a more elaborate regular expression). Policy Actions ______________ If an L7 policy matches a given request, then that policy's action is executed. The following are the actions an L7 Policy may take: * ``REJECT``: The request is denied with an appropriate response code, and not forwarded on to any back-end pool. * ``REDIRECT_TO_URL``: The request is sent an HTTP redirect to the URL defined in the ``redirect_url`` parameter. * ``REDIRECT_TO_POOL``: The request is forwarded to the back-end pool associated with the L7 policy. Policy Position _______________ When multiple L7 Policies are associated with a listener, then the policies' ``position`` parameter becomes important. The ``position`` parameter is used when determining the order in which L7 policies are evaluated. Here are a few notes about how policy position affects listener behavior: * In the reference implementation (haproxy amphorae) of Octavia, haproxy enforces the following ordering regarding policy actions: * ``REJECT`` policies take precedence over all other policies. * ``REDIRECT_TO_URL`` policies take precedence over ``REDIRECT_TO_POOL`` policies. * ``REDIRECT_TO_POOL`` policies are only evaluated after all of the above, and in the order specified by the ``position`` of the policy. * L7 Policies are evaluated in a specific order (as defined by the ``position`` attribute), and the first policy that matches a given request will be the one whose action is followed. * If no policy matches a given request, then the request is routed to the listener's default pool ,if it exists. If the listener has no default pool, then an error 503 is returned. * Policy position numbering starts with 1. * If a new policy is created with a position that matches that of an existing policy, then the new policy is inserted at the given position. * If a new policy is created without specifying a position, or specifying a position that is greater than the number of policies already in the list, the new policy will just be appended to the list. * When policies are inserted, deleted, or appended to the list, the policy position values are re-ordered from 1 without skipping numbers. For example, if policy A, B, and C have position values of 1, 2 and 3 respectively, if you delete policy B from the list, policy C's position becomes 2. L7 usage examples ================= For a cookbook of common L7 usage examples, please see the :doc:`l7-cookbook` Useful links ============ * `Octavia API Reference `_ * `LBaaS Layer 7 rules `_ * `Using ACLs and fetching samples `_ * `OpenSSL openssl-verify command `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/index.rst0000664000175000017500000000136400000000000020062 0ustar00zuulzuul00000000000000============ Octavia User ============ Cookbooks ========= .. toctree:: :glob: :maxdepth: 1 guides/basic-cookbook guides/l7-cookbook Guides ====== .. toctree:: :glob: :maxdepth: 1 guides/l7 feature-classification/index References ========== .. toctree:: :glob: :maxdepth: 1 Octavia API Reference Command Line Interface Reference sdks Videos ====== .. toctree:: :glob: :maxdepth: 1 Introduction to OpenStack Load Balancing (2017 Boston Summit) .. only:: html Indices and Search ------------------ * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/doc/source/user/sdks.rst0000664000175000017500000000266000000000000017717 0ustar00zuulzuul00000000000000.. Copyright (c) 2018 Rackspace, US Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================= Octavia Software Development Kits (SDK) ======================================= Introduction ============ This is a list of known SDKs and language bindings that support OpenStack load balancing via the Octavia API. This list is a "best effort" to keep updated, so please check with your favorite SDK project to see if they support OpenStack load balancing. If not, open a bug for them! .. Note:: The projects listed here may not be maintained by the OpenStack LBaaS team. Please submit bugs for these projects through their respective bug tracking systems. Go == `Gophercloud `_ Java ==== `OpenStack4j `_ Python ====== `OpenStack SDK `_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/0000775000175000017500000000000000000000000015006 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/elements/amphora-agent/0000775000175000017500000000000000000000000017531 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/README.rst0000664000175000017500000000045700000000000021226 0ustar00zuulzuul00000000000000Element to install an Octavia Amphora agent. By default, it installs the agent from source. To enable installation from distribution repositories, define the following: export DIB_INSTALLTYPE_amphora_agent=package Note: this requires a system base image modified to include OpenStack repositories ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/element-deps0000664000175000017500000000012400000000000022033 0ustar00zuulzuul00000000000000dib-init-system install-static package-installs pkg-map source-repositories svc-map ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3222165 octavia-6.2.2/elements/amphora-agent/install.d/0000775000175000017500000000000000000000000021421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/elements/amphora-agent/install.d/amphora-agent-source-install/0000775000175000017500000000000000000000000027106 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/install.d/amphora-agent-source-install/75-amphora-agent-install0000775000175000017500000000300500000000000033452 0ustar00zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail SCRIPTDIR=$(dirname $0) AMP_VENV=/opt/amphora-agent-venv # Create a virtual environment to contain the amphora agent ${DIB_PYTHON} -m virtualenv $AMP_VENV $AMP_VENV/bin/pip install pip --upgrade $AMP_VENV/bin/pip install -U -c /opt/upper-constraints.txt /opt/amphora-agent # Let's capture the git reference we installed in the venv git --git-dir=/opt/amphora-agent/.git rev-parse HEAD >> /opt/amphora-agent.gitref # Link the amphora-agent out to /usr/local/bin where the startup scripts look ln -s $AMP_VENV/bin/amphora-agent /usr/local/bin/amphora-agent || true # Also link out the vrrp check script(s) so they're in PATH for keepalived ln -s $AMP_VENV/bin/haproxy-vrrp-* /usr/local/bin/ || true mkdir /etc/octavia # we assume certs, etc will come in through the config drive mkdir /etc/octavia/certs mkdir -p /var/lib/octavia install -D -g root -o root -m 0644 ${SCRIPTDIR}/amphora-agent.logrotate /etc/logrotate.d/amphora-agent case "$DIB_INIT_SYSTEM" in upstart) install -D -g root -o root -m 0644 ${SCRIPTDIR}/amphora-agent.conf /etc/init/amphora-agent.conf ;; systemd) install -D -g root -o root -m 0644 ${SCRIPTDIR}/amphora-agent.service /usr/lib/systemd/system/amphora-agent.service ;; sysv) install -D -g root -o root -m 0644 ${SCRIPTDIR}/amphora-agent.init /etc/init.d/amphora-agent.init ;; *) echo "Unsupported init system" exit 1 ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.conf0000664000175000017500000000062200000000000032500 0ustar00zuulzuul00000000000000description "Start up the Octavia Amphora Agent" start on started certs-ramfs stop on runlevel [!2345] respawn respawn limit 2 2 exec amphora-agent --config-file /etc/octavia/amphora-agent.conf post-start script PID=`status amphora-agent | egrep -oi '([0-9]+)$' | head -n1` echo $PID > /var/run/amphora-agent.pid end script post-stop script rm -f /var/run/amphora-agent.pid end script ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.init0000664000175000017500000000400000000000000032510 0ustar00zuulzuul00000000000000### BEGIN INIT INFO # Provides: amphora-agent # Required-Start: $remote_fs $syslog $network certs-ramfs # Required-Stop: $remote_fs $syslog $network # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Runs the Amphora Agent processes # Description: This script runs Octavia Amphora Agent processes. # This script will start the Amphora Agent services # and kill them. ### END INIT INFO # Using the lsb functions to perform the operations. . /lib/lsb/init-functions # Process name ( For display ) NAME=amphora-agent # Daemon name, where is the actual executable DAEMON=/usr/local/bin/amphora-agent # pid file for the daemon PIDFILE=/var/run/amphora-agent.pid # If the daemon is not there, then exit. test -x $DAEMON || exit 5 case $1 in start) # Checked the PID file exists and check the actual status of process if [ -e $PIDFILE ]; then status_of_proc -p $PIDFILE $DAEMON "$NAME process" && status="0" || status="$?" # If the status is SUCCESS then don't need to start again. if [ $status = "0" ]; then exit # Exit fi fi # Start the daemon. log_daemon_msg "Starting the process" "$NAME" # Start the daemon with the help of start-stop-daemon # Log the message appropriately if start-stop-daemon --start -m --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON -- --config-file /etc/octavia/amphora-agent.conf ; then log_end_msg 0 else log_end_msg 1 fi ;; stop) # Stop the daemon. if [ -e $PIDFILE ]; then status_of_proc -p $PIDFILE $DAEMON "Stopping the $NAME process" && status="0" || status="$?" if [ "$status" = 0 ]; then start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE /bin/rm -rf $PIDFILE fi else log_daemon_msg "$NAME process is not running" log_end_msg 0 fi ;; restart) # Restart the daemon. $0 stop && sleep 2 && $0 start ;; *) # For invalid arguments, print the usage message. echo "Usage: $0 {start|stop|restart|reload|status}" exit 2 ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.logrotate0000664000175000017500000000055300000000000033556 0ustar00zuulzuul00000000000000/var/log/amphora-agent.log { daily rotate 10 missingok notifempty compress delaycompress sharedscripts postrotate # Signal name shall not have the SIG prefix in kill command # http://pubs.opengroup.org/onlinepubs/9699919799/utilities/kill.html kill -s USR1 $(cat /var/run/amphora-agent.pid) endscript } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.service0000664000175000017500000000065200000000000033216 0ustar00zuulzuul00000000000000[Unit] Description=OpenStack Octavia Amphora Agent After=network.target syslog.service certs-ramfs.service Requires=certs-ramfs.service Wants=syslog.service [Service] ExecStart=/usr/local/bin/amphora-agent --config-file /etc/octavia/amphora-agent.conf KillMode=mixed Restart=always ExecStartPost=/bin/sh -c "echo $MAINPID > /var/run/amphora-agent.pid" PIDFile=/var/run/amphora-agent.pid [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/package-installs.yaml0000664000175000017500000000154200000000000023641 0ustar00zuulzuul00000000000000amphora-agent: installtype: package build-essential: build-only: True libffi-dev: build-only: True libssl-dev: build-only: True python-dev: build-only: True acl: # Note: Red Hat family does not currently ship acpid for ppc64le. # This sets up a pkg-map to exclude it for Red Hat family ppc64le arch acpid-ppc64le: arch: ppc64le, ppc64el acpid: arch: i386, amd64, arm64, s390x apparmor: apt-transport-https: at: bash-completion: cloud-guest-utils: cloud-init: cron: curl: dbus: dkms: dmeventd: ethtool: gawk: ifenslave: ifupdown: iptables: iputils-tracepath: irqbalance: isc-dhcp-client: less: logrotate: lsof: net-tools: netbase: netcat-openbsd: network-scripts: open-vm-tools: arch: i386, amd64 openssh-client: openssh-server: pollinate: psmisc: rsyslog: screen: socat: tcpdump: ubuntu-cloudimage-keyring: ureadahead: uuid-runtime: vim-tiny: vlan: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/pkg-map0000664000175000017500000000217400000000000021014 0ustar00zuulzuul00000000000000{ "release": { "rhel": { "8": { "isc-dhcp-client": "dhcp-client", "python-dev": "platform-python-devel", "vlan": "", "screen": "", "dkms": "" } }, "centos": { "8": { "isc-dhcp-client": "dhcp-client", "python-dev": "platform-python-devel", "vlan": "", "screen": "", "dkms": "" } } }, "family": { "redhat": { "amphora-agent": "openstack-octavia-amphora-agent", "acpid-ppc64le": "", "netcat-openbsd": "nmap-ncat", "netbase": "", "cron": "cronie", "ifenslave": "", "iputils-tracepath": "", "cloud-guest-utils": "cloud-utils-growpart", "apparmor": "", "dmeventd": "", "isc-dhcp-client": "dhclient", "uuid-runtime": "", "ubuntu-cloudimage-keyring": "", "vim-tiny": "", "ureadahead": "", "apt-transport-https": "", "pollinate": "", "ifupdown": "", "network-scripts": "network-scripts" } }, "default": { "amphora-agent": "amphora-agent", "acpid-ppc64le": "acpid", "network-scripts": "" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3622167 octavia-6.2.2/elements/amphora-agent/post-install.d/0000775000175000017500000000000000000000000022404 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/post-install.d/10-enable-network-scripts0000775000175000017500000000030600000000000027151 0ustar00zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail if [[ "$DISTRO_NAME" =~ (rhel|centos) ]] && [ "$DIB_RELEASE" == "8" ]; then chkconfig network on fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/post-install.d/11-enable-amphora-agent-systemd0000775000175000017500000000027400000000000030211 0ustar00zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail if [ "$DIB_INIT_SYSTEM" == "systemd" ]; then systemctl enable $(svc-map amphora-agent) fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/post-install.d/90-remove-build-deps0000775000175000017500000000073000000000000026103 0ustar00zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail [ "${DIB_INSTALLTYPE_amphora_agent:-}" = "package" ] && exit 0 rm -rf /opt/amphora-agent case $DISTRO_NAME in ubuntu | debian ) apt-get --assume-yes purge --auto-remove ;; fedora | centos* | rhel* ) YUM=${YUM:-yum} ${YUM} -v -y autoremove ;; *) echo "ERROR: Unsupported distribution $DISTRO_NAME" exit 1 ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/source-repository-amphora-agent0000664000175000017500000000035200000000000025712 0ustar00zuulzuul00000000000000# This is used for source-based builds amphora-agent git /opt/amphora-agent https://opendev.org/openstack/octavia stable/ussuri upper-constraints file /opt/upper-constraints.txt https://releases.openstack.org/constraints/upper/ussuri ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/amphora-agent/static/0000775000175000017500000000000000000000000021020 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/amphora-agent/static/etc/0000775000175000017500000000000000000000000021573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/amphora-agent/static/etc/NetworkManager/0000775000175000017500000000000000000000000024517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/amphora-agent/static/etc/NetworkManager/conf.d/0000775000175000017500000000000000000000000025666 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/static/etc/NetworkManager/conf.d/no-auto-default.conf0000664000175000017500000000050000000000000031534 0ustar00zuulzuul00000000000000[main] # Disable auto configuration for newly detected devices. # This prevents having temporary addresses and routes in the default namespace # between the detection of a new devices and its move to the amphora-haproxy # namespace. # The management interface configuration is triggered by cloud-init. no-auto-default=* ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/amphora-agent/static/etc/rsyslog.d/0000775000175000017500000000000000000000000023517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/static/etc/rsyslog.d/55-octavia-socket.conf0000664000175000017500000000015500000000000027532 0ustar00zuulzuul00000000000000module(load="imuxsock") input(type="imuxsock" Socket="/run/rsyslog/octavia/log" CreatePath="on") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/amphora-agent/svc-map0000664000175000017500000000011000000000000021012 0ustar00zuulzuul00000000000000amphora-agent: default: amphora-agent redhat: octavia-amphora-agent ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/certs-ramfs/0000775000175000017500000000000000000000000017234 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/README.rst0000664000175000017500000000024300000000000020722 0ustar00zuulzuul00000000000000Element to setup an encrypted ramfs to store the TLS certificates and keys. Enabling this element will mean that the amphora can no longer recover from a reboot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/element-deps0000664000175000017500000000006000000000000021535 0ustar00zuulzuul00000000000000dib-init-system package-installs install-static ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/certs-ramfs/init-scripts/0000775000175000017500000000000000000000000021664 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/certs-ramfs/init-scripts/systemd/0000775000175000017500000000000000000000000023354 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/init-scripts/systemd/certs-ramfs.service0000664000175000017500000000066400000000000027172 0ustar00zuulzuul00000000000000[Unit] Description=Creates an encrypted ramfs for Octavia certs Before=amphora-agent.service After=cloud-config.target [Service] Type=oneshot ExecStart=/usr/local/bin/certfs-ramfs ExecStop=/bin/sh -c 'certs_path=$$(awk "/base_cert_dir / {printf \\$$3}" /etc/octavia/amphora-agent.conf); umount "$${certs_path}"; cryptsetup luksClose /dev/mapper/certfs-ramfs;' RemainAfterExit=yes TimeoutSec=0 [Install] WantedBy=amphora-agent.service ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/certs-ramfs/init-scripts/sysv/0000775000175000017500000000000000000000000022670 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/init-scripts/sysv/certs-ramfs0000664000175000017500000000211400000000000025037 0ustar00zuulzuul00000000000000### BEGIN INIT INFO # Provides: certs-ramfs # Required-Start: $remote_fs $syslog $network cloud-config # Required-Stop: $remote_fs $syslog $network # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Creates an encrypted ramfs for Octavia certs # Description: Creates an encrypted ramfs for Octavia TLS # certificates and key storage. ### END INIT INFO # Using the lsb functions to perform the operations. . /lib/lsb/init-functions # Process name ( For display ) NAME=certs-ramfs case $1 in start) log_daemon_msg "Starting the process" "$NAME" /usr/local/bin/certfs-ramfs log_end_msg 0 ;; stop) log_daemon_msg "Stopping the process" "$NAME" certs_path=$(awk "/base_cert_dir / {printf \$3}" /etc/octavia/amphora-agent.conf) umount "${certs_path}" cryptsetup luksClose /dev/mapper/certfs-ramfs log_end_msg 0 ;; restart) # Restart the daemon. $0 stop && sleep 2 && $0 start ;; *) # For invalid arguments, print the usage message. echo "Usage: $0 {start|stop|restart|reload|status}" exit 2 ;; esac ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/certs-ramfs/init-scripts/upstart/0000775000175000017500000000000000000000000023366 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/init-scripts/upstart/certs-ramfs.conf0000664000175000017500000000056000000000000026464 0ustar00zuulzuul00000000000000description "Creates an encrypted ramfs for Octavia certs" start on started cloud-config stop on runlevel [!2345] pre-start script /usr/local/bin/certfs-ramfs end script post-stop script certs_path=$(awk "/base_cert_dir / {printf \$3}" /etc/octavia/amphora-agent.conf) umount "${certs_path}" cryptsetup luksClose /dev/mapper/certfs-ramfs end script ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/package-installs.yaml0000664000175000017500000000001400000000000023335 0ustar00zuulzuul00000000000000cryptsetup: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/certs-ramfs/post-install.d/0000775000175000017500000000000000000000000022107 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/post-install.d/30-enable-certs-ramfs-service0000775000175000017500000000054100000000000027365 0ustar00zuulzuul00000000000000#!/bin/bash if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then set -x fi set -eu set -o pipefail case "$DIB_INIT_SYSTEM" in upstart|sysv) # nothing to do exit 0 ;; systemd) systemctl enable certs-ramfs.service ;; *) echo "Unsupported init system $DIB_INIT_SYSTEM" exit 1 ;; esac ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/certs-ramfs/static/0000775000175000017500000000000000000000000020523 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/certs-ramfs/static/usr/0000775000175000017500000000000000000000000021334 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3262167 octavia-6.2.2/elements/certs-ramfs/static/usr/local/0000775000175000017500000000000000000000000022426 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/certs-ramfs/static/usr/local/bin/0000775000175000017500000000000000000000000023176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/static/usr/local/bin/certfs-ramfs0000775000175000017500000000103100000000000025513 0ustar00zuulzuul00000000000000#!/bin/bash cryptsetup_args="--type=luks1" # Only 1 block ram device is needed modprobe brd rd_nr=1 passphrase=$(head /dev/urandom | tr -dc "a-zA-Z0-9" | fold -w 32 | head -n 1) certs_path=$(awk "/base_cert_dir / {printf \$3}" /etc/octavia/amphora-agent.conf) mkdir -p "${certs_path}" echo -n "${passphrase}" | cryptsetup $cryptsetup_args luksFormat /dev/ram0 - echo -n "${passphrase}" | cryptsetup $cryptsetup_args luksOpen /dev/ram0 certfs-ramfs - mkfs.ext2 /dev/mapper/certfs-ramfs mount /dev/mapper/certfs-ramfs "${certs_path}" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/certs-ramfs/svc-map0000664000175000017500000000004400000000000020523 0ustar00zuulzuul00000000000000certs-ramfs: default: certs-ramfs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/disable-makecache/0000775000175000017500000000000000000000000020310 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/disable-makecache/README.rst0000664000175000017500000000047400000000000022004 0ustar00zuulzuul00000000000000This element disables the dnf makecache hourly timer. The amphora typically do not have internet access nor access to DNS servers. We want to disable this makecache timer to stop the amphora from attempting to update/download the dnf cache every hour. Without this element it will run and log a failure every hour. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/disable-makecache/post-install.d/0000775000175000017500000000000000000000000023163 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/disable-makecache/post-install.d/80-disable-makecache0000775000175000017500000000045500000000000026644 0ustar00zuulzuul00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail case $DISTRO_NAME in fedora | centos* | rhel* ) systemctl disable dnf-makecache.timer || true ;; *) echo "ERROR: Unsupported distribution $DISTRO_NAME" exit 1 ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/disable-makecache/svc-map0000664000175000017500000000006000000000000021575 0ustar00zuulzuul00000000000000disable-makecache: default: disable-makecache ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/haproxy-octavia/0000775000175000017500000000000000000000000020124 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/README.rst0000664000175000017500000000010100000000000021603 0ustar00zuulzuul00000000000000Element to install an Octavia Amphora with an haproxy backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/element-deps0000664000175000017500000000004000000000000022423 0ustar00zuulzuul00000000000000package-installs sysctl pkg-map ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/haproxy-octavia/install.d/0000775000175000017500000000000000000000000022014 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/install.d/76-haproxy0000775000175000017500000000017100000000000023665 0ustar00zuulzuul00000000000000#!/bin/bash set -eux set -o pipefail [ -d /var/lib/haproxy ] || install -d -D -m 0755 -o root -g root /var/lib/haproxy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/package-installs.json0000664000175000017500000000006200000000000024237 0ustar00zuulzuul00000000000000{ "haproxy": null, "iputils-ping": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/pkg-map0000664000175000017500000000045100000000000021403 0ustar00zuulzuul00000000000000{ "distro": { "ubuntu": { "haproxy": "haproxy" } }, "family": { "debian": { "haproxy": "haproxy" }, "redhat": { "iputils-ping": "iputils" } }, "default": { "haproxy": "haproxy" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/haproxy-octavia/post-install.d/0000775000175000017500000000000000000000000022777 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/post-install.d/20-disable-default-haproxy0000775000175000017500000000010000000000000027650 0ustar00zuulzuul00000000000000#!/bin/bash set -eu set -o pipefail systemctl disable haproxy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/post-install.d/20-haproxy-tune-kernel0000775000175000017500000000221100000000000027061 0ustar00zuulzuul00000000000000#!/bin/bash set -eu set -o pipefail sysctl-write-value net.ipv4.tcp_max_tw_buckets 5800000 sysctl-write-value net.ipv4.tcp_max_orphans 5800000 sysctl-write-value net.ipv4.tcp_max_syn_backlog 100000 sysctl-write-value net.ipv4.tcp_keepalive_time 300 sysctl-write-value net.ipv4.tcp_tw_recycle 0 sysctl-write-value net.ipv4.tcp_tw_reuse 1 sysctl-write-value net.core.somaxconn 65534 sysctl-write-value net.ipv4.tcp_synack_retries 3 sysctl-write-value net.core.netdev_max_backlog 100000 # This should allow HAProxy maxconn to be 1,000,000 sysctl-write-value fs.file-max 2600000 sysctl-write-value fs.nr_open 2600000 # It's ok for these to fail if conntrack module isn't loaded sysctl-write-value net.ipv4.netfilter.ip_conntrack_tcp_timeout_time_wait 5 || true sysctl-write-value net.ipv4.netfilter.ip_conntrack_tcp_timeout_fin_wait 5 || true sysctl-write-value net.ipv4.tcp_fin_timeout 5 sysctl-write-value net.ipv4.ip_nonlocal_bind 1 sysctl-write-value net.ipv6.ip_nonlocal_bind 1 sysctl-write-value net.ipv4.tcp_rmem "16384 65536 524288" sysctl-write-value net.ipv4.tcp_wmem "16384 349520 699040" sysctl-write-value net.ipv4.ip_local_port_range "1025 65534" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/post-install.d/20-haproxy-user-group-config0000775000175000017500000000054200000000000030210 0ustar00zuulzuul00000000000000#!/bin/bash set -eu set -o pipefail case $DISTRO_NAME in ubuntu | debian ) HAPROXY_USER_GROUP=nogroup ;; fedora | centos* | rhel* ) HAPROXY_USER_GROUP=haproxy ;; *) HAPROXY_USER_GROUP=nogroup ;; esac cat >> /var/lib/octavia/haproxy-default-user-group.conf <> /etc/rsyslog.d/49-haproxy.conf < /var/lib/octavia/ping-wrapper.sh < /dev/null 2>&1 else $ping_cmd -q -n -w 1 -c 1 \$HAPROXY_SERVER_ADDR > /dev/null 2>&1 fi EOF chmod 755 /var/lib/octavia/ping-wrapper.sh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/haproxy-octavia/svc-map0000664000175000017500000000003400000000000021412 0ustar00zuulzuul00000000000000haproxy: default: haproxy ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/ipvsadmin/0000775000175000017500000000000000000000000017000 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/ipvsadmin/README.rst0000664000175000017500000000004000000000000020461 0ustar00zuulzuul00000000000000Element to install ipvsadmin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/ipvsadmin/element-deps0000664000175000017500000000003100000000000021277 0ustar00zuulzuul00000000000000package-installs pkg-map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/ipvsadmin/package-installs.json0000664000175000017500000000003000000000000023106 0ustar00zuulzuul00000000000000{ "ipvsadm": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/ipvsadmin/svc-map0000664000175000017500000000004000000000000020263 0ustar00zuulzuul00000000000000ipvsadmin: default: ipvsadmin ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/keepalived-octavia/0000775000175000017500000000000000000000000020543 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/keepalived-octavia/README.rst0000664000175000017500000000010100000000000022222 0ustar00zuulzuul00000000000000Element to install an Octavia Amphora with keepalived backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/keepalived-octavia/element-deps0000664000175000017500000000003100000000000023042 0ustar00zuulzuul00000000000000package-installs pkg-map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/keepalived-octavia/package-installs.json0000664000175000017500000000003300000000000024654 0ustar00zuulzuul00000000000000{ "keepalived": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/keepalived-octavia/pkg-map0000664000175000017500000000036700000000000022030 0ustar00zuulzuul00000000000000{ "distro": { "ubuntu": { "keepalived": "keepalived" } }, "family": { "debian": { "keepalived": "keepalived" } }, "default": { "keepalived": "keepalived" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/keepalived-octavia/svc-map0000664000175000017500000000004600000000000022034 0ustar00zuulzuul00000000000000vrrp-octavia: default: vrrp-octavia ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/no-resolvconf/0000775000175000017500000000000000000000000017600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/no-resolvconf/README.rst0000664000175000017500000000115500000000000021271 0ustar00zuulzuul00000000000000This element clears out /etc/resolv.conf and prevents dhclient from populating it with data from DHCP. This means that DNS resolution will not work from the amphora. This is OK because all outbound connections from the amphora will be based using raw IP addresses. In addition we remove dns from the nsswitch.conf hosts setting. This has the real benefit of speeding up host boot and configutation times. This is especially helpful when running tempest tests in a devstack environment where DNS resolution from the amphora usually doesn't work anyway: This means that the amphora never waits for DNS timeouts to occur. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/no-resolvconf/finalise.d/0000775000175000017500000000000000000000000021614 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/no-resolvconf/finalise.d/99-disable-resolv-conf0000775000175000017500000000112600000000000025637 0ustar00zuulzuul00000000000000#!/bin/bash echo "" > /etc/resolv.conf echo "" > /etc/resolv.conf.ORIG if [ -d /etc/dhcp/dhclient-enter-hooks.d ] then # Debian/Ubuntu echo "#!/bin/sh make_resolv_conf() { : ; }" > /etc/dhcp/dhclient-enter-hooks.d/noresolvconf chmod +x /etc/dhcp/dhclient-enter-hooks.d/noresolvconf rm -f /etc/dhcp/dhclient-enter-hooks.d/resolvconf else # RHEL/CentOS/Fedora echo "#!/bin/sh make_resolv_conf() { : ; }" > /etc/dhcp/dhclient-enter-hooks chmod +x /etc/dhcp/dhclient-enter-hooks fi if [ -e /etc/nsswitch.conf ]; then sed -i -e "/hosts:/ s/dns//g" /etc/nsswitch.conf fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/rebind-sshd/0000775000175000017500000000000000000000000017210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/rebind-sshd/README.rst0000664000175000017500000000073700000000000020706 0ustar00zuulzuul00000000000000This element adds a post-BOUND script to the dhclient configuration to rebind the ssh daemon to listen only on the management network interface. The reason for doing this is that some use cases require load balancing services on TCP port 22 to work, and if SSH binds to the wildcard address on port 22, then haproxy can't. This also has the secondary benefit of making the amphora slightly more secure as its SSH daemon will only respond to requests on the management network. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/rebind-sshd/finalise.d/0000775000175000017500000000000000000000000021224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/rebind-sshd/finalise.d/98-rebind-sshd-after-dhcp0000775000175000017500000000114600000000000025627 0ustar00zuulzuul00000000000000#!/bin/bash # isc dhcpd specific section if [[ $DISTRO_NAME = "ubuntu" || $DISTRO_NAME = "debian" ]]; then mkdir -p /etc/dhcp/dhclient-enter-hooks.d echo '#!/bin/sh if [ "$reason" = "BOUND" ]; then if `grep -q "#ListenAddress 0.0.0.0" /etc/ssh/sshd_config`; then /bin/sed -i "s/^#ListenAddress 0.0.0.0.*$/ListenAddress $new_ip_address/g" /etc/ssh/sshd_config if `/bin/ps -ef|/bin/grep -v grep|/bin/grep -q sshd`; then /usr/sbin/service ssh restart fi fi fi' > /etc/dhcp/dhclient-enter-hooks.d/rebind-sshd chmod +x /etc/dhcp/dhclient-enter-hooks.d/rebind-sshd fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/remove-default-ints/0000775000175000017500000000000000000000000020700 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/remove-default-ints/README.rst0000664000175000017500000000047200000000000022372 0ustar00zuulzuul00000000000000This element removes any default network interfaces from the interface configuration in the image. These are not needed in the amphora as cloud-init will create the required default interface configuration files. For Ubuntu this element will remove the network configuration files from /etc/network/interfaces.d. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/remove-default-ints/post-install.d/0000775000175000017500000000000000000000000023553 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/remove-default-ints/post-install.d/91-remove-default-ints0000775000175000017500000000017400000000000027624 0ustar00zuulzuul00000000000000#!/bin/bash set -eu set -o xtrace if [[ "$DISTRO_NAME" == "ubuntu" ]]; then sudo rm -f /etc/network/interfaces.d/* fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/remove-sshd/0000775000175000017500000000000000000000000017242 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/remove-sshd/README.rst0000664000175000017500000000054500000000000020735 0ustar00zuulzuul00000000000000=========== remove-sshd =========== This element ensures that openssh server is uninstalled and will not start. Note ---- Most cloud images come with the openssh server service installed and enabled during boot. However, sometimes this is not appropriate. In these cases, using this element may be helpful to ensure your image will not accessible via SSH. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/remove-sshd/package-installs.yaml0000664000175000017500000000004400000000000023346 0ustar00zuulzuul00000000000000openssh-server: uninstall: True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/root-passwd/0000775000175000017500000000000000000000000017270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/root-passwd/README.rst0000664000175000017500000000031200000000000020753 0ustar00zuulzuul00000000000000This element assigns a password to the root account in the image and enables password login via ssh. This is useful when booting outside of a cloud environment (e.g. manually via kvm) and for testing. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/root-passwd/post-install.d/0000775000175000017500000000000000000000000022143 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/root-passwd/post-install.d/99-setup0000775000175000017500000000074700000000000023500 0ustar00zuulzuul00000000000000#!/bin/bash if [ -z "$DIB_PASSWORD" ]; then echo "Error during setup password for root" exit 1 fi sed -i "s/disable_root: true/disable_root: false/" /etc/cloud/cloud.cfg install-packages augeas-tools openssh-server openssh-client augtool -s set /files/etc/ssh/sshd_config/PasswordAuthentication yes augtool -s set /files/etc/ssh/sshd_config/PermitRootLogin yes augtool -s set /files/etc/ssh/ssh_config/PasswordAuthentication yes echo -e "$DIB_PASSWORD\n$DIB_PASSWORD\n" | passwd ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3662167 octavia-6.2.2/elements/sos/0000775000175000017500000000000000000000000015612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/sos/README.rst0000664000175000017500000000077400000000000017311 0ustar00zuulzuul00000000000000Element to install sosreport. sosreport is a tool that collects information about a system. The sos plugin for Octavia can gather information of installed packages, log and configuration files for Octavia controller components and amphora agent. The result is a generated report that can be used for troubleshooting. The plugin redacts confidential data such as passwords, certificates and secrets. At present sos only installs in Red Hat family images as the plugin does not support other distributions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/sos/element-deps0000664000175000017500000000003100000000000020111 0ustar00zuulzuul00000000000000package-installs pkg-map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/sos/package-installs.yaml0000664000175000017500000000000500000000000021713 0ustar00zuulzuul00000000000000sos: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/elements/sos/pkg-map0000664000175000017500000000014100000000000017065 0ustar00zuulzuul00000000000000{ "family": { "redhat": { "sos": "sos" } }, "default": { "sos": "" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/etc/0000775000175000017500000000000000000000000013745 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/etc/audit/0000775000175000017500000000000000000000000015053 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/etc/audit/octavia_api_audit_map.conf.sample0000664000175000017500000000145700000000000023513 0ustar00zuulzuul00000000000000[DEFAULT] # default target endpoint type # should match the endpoint type defined in service catalog target_endpoint_type = load-balancer [custom_actions] failover = failover # possible end path of API requests # path of api requests for CADF target typeURI # Just need to include top resource path to identify class # of resources. Ex: Log audit event for API requests # path containing "nodes" keyword and node uuid. [path_keywords] amphorae = amphora defaults = None failover = None healthmonitors = healthmonitor l7policies = l7policy listeners = listener loadbalancers = loadbalancer members = member pools = pool providers = None quotas = quota rules = rule stats = None status = None # map endpoint type defined in service catalog to CADF typeURI [service_endpoints] load-balancer = service/load-balancer ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/etc/certificates/0000775000175000017500000000000000000000000016412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/etc/certificates/openssl.cnf0000664000175000017500000002527100000000000020574 0ustar00zuulzuul00000000000000# # OpenSSL example configuration file. # This is mostly being used for generation of certificate requests. # # This definition stops the following lines choking if HOME isn't # defined. HOME = . RANDFILE = $ENV::HOME/.rnd # Extra OBJECT IDENTIFIER info: #oid_file = $ENV::HOME/.oid oid_section = new_oids # To use this configuration file with the "-extfile" option of the # "openssl x509" utility, name here the section containing the # X.509v3 extensions to use: # extensions = # (Alternatively, use a configuration file that has only # X.509v3 extensions in its main [= default] section.) [ new_oids ] # We can add new OIDs in here for use by 'ca', 'req' and 'ts'. # Add a simple OID like this: # testoid1=1.2.3.4 # Or use config file substitution like this: # testoid2=${testoid1}.5.6 # Policies used by the TSA examples. tsa_policy1 = 1.2.3.4.1 tsa_policy2 = 1.2.3.4.5.6 tsa_policy3 = 1.2.3.4.5.7 #################################################################### [ ca ] default_ca = CA_default # The default ca section #################################################################### [ CA_default ] dir = ./ # Where everything is kept certs = $dir/certs # Where the issued certs are kept crl_dir = $dir/crl # Where the issued crl are kept database = $dir/index.txt # database index file. #unique_subject = no # Set to 'no' to allow creation of # several ctificates with same subject. new_certs_dir = $dir/newcerts # default place for new certs. certificate = $dir/ca_01.pem # The CA certificate serial = $dir/serial # The current serial number crlnumber = $dir/crlnumber # the current crl number # must be commented out to leave a V1 CRL crl = $dir/crl.pem # The current CRL private_key = $dir/private/cakey.pem# The private key RANDFILE = $dir/private/.rand # private random number file x509_extensions = usr_cert # The extensions to add to the cert # Comment out the following two lines for the "traditional" # (and highly broken) format. name_opt = ca_default # Subject Name options cert_opt = ca_default # Certificate field options # Extension copying option: use with caution. # copy_extensions = copy # Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs # so this is commented out by default to leave a V1 CRL. # crlnumber must also be commented out to leave a V1 CRL. # crl_extensions = crl_ext default_days = 365 # how long to certify for default_crl_days= 30 # how long before next CRL default_md = default # use public key default MD preserve = no # keep passed DN ordering # A few difference way of specifying how similar the request should look # For type CA, the listed attributes must be the same, and the optional # and supplied fields are just that :-) policy = policy_match # For the CA policy [ policy_match ] countryName = match stateOrProvinceName = match organizationName = match organizationalUnitName = optional commonName = supplied emailAddress = optional # For the 'anything' policy # At this point in time, you must list all acceptable 'object' # types. [ policy_anything ] countryName = optional stateOrProvinceName = optional localityName = optional organizationName = optional organizationalUnitName = optional commonName = supplied emailAddress = optional #################################################################### [ req ] default_bits = 2048 default_keyfile = privkey.pem distinguished_name = req_distinguished_name attributes = req_attributes x509_extensions = v3_ca # The extensions to add to the self signed cert # Passwords for private keys if not present they will be prompted for # input_password = secret # output_password = secret # This sets a mask for permitted string types. There are several options. # default: PrintableString, T61String, BMPString. # pkix : PrintableString, BMPString (PKIX recommendation before 2004) # utf8only: only UTF8Strings (PKIX recommendation after 2004). # nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings). # MASK:XXXX a literal mask value. # WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings. string_mask = utf8only # req_extensions = v3_req # The extensions to add to a certificate request [ req_distinguished_name ] countryName = Country Name (2 letter code) countryName_default = AU countryName_min = 2 countryName_max = 2 stateOrProvinceName = State or Province Name (full name) stateOrProvinceName_default = Some-State localityName = Locality Name (eg, city) 0.organizationName = Organization Name (eg, company) 0.organizationName_default = Internet Widgits Pty Ltd # we can do this but it is not needed normally :-) #1.organizationName = Second Organization Name (eg, company) #1.organizationName_default = World Wide Web Pty Ltd organizationalUnitName = Organizational Unit Name (eg, section) #organizationalUnitName_default = commonName = Common Name (e.g. server FQDN or YOUR name) commonName_max = 64 emailAddress = Email Address emailAddress_max = 64 # SET-ex3 = SET extension number 3 [ req_attributes ] challengePassword = A challenge password challengePassword_min = 4 challengePassword_max = 20 unstructuredName = An optional company name [ usr_cert ] # These extensions are added when 'ca' signs a request. # This goes against PKIX guidelines but some CAs do it and some software # requires this to avoid interpreting an end user certificate as a CA. basicConstraints=CA:FALSE # Here are some examples of the usage of nsCertType. If it is omitted # the certificate can be used for anything *except* object signing. # This is OK for an SSL server. # nsCertType = server # For an object signing certificate this would be used. # nsCertType = objsign # For normal client use this is typical # nsCertType = client, email # and for everything including object signing: # nsCertType = client, email, objsign # This is typical in keyUsage for a client certificate. # keyUsage = nonRepudiation, digitalSignature, keyEncipherment # This will be displayed in Netscape's comment listbox. nsComment = "OpenSSL Generated Certificate" # PKIX recommendations harmless if included in all certificates. subjectKeyIdentifier=hash authorityKeyIdentifier=keyid,issuer # This stuff is for subjectAltName and issuerAltname. # Import the email address. # subjectAltName=email:copy # An alternative to produce certificates that aren't # deprecated according to PKIX. # subjectAltName=email:move # Copy subject details # issuerAltName=issuer:copy #nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem #nsBaseUrl #nsRevocationUrl #nsRenewalUrl #nsCaPolicyUrl #nsSslServerName # This is required for TSA certificates. # extendedKeyUsage = critical,timeStamping [ v3_req ] # Extensions to add to a certificate request basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment [ v3_ca ] # Extensions for a typical CA # PKIX recommendation. subjectKeyIdentifier=hash authorityKeyIdentifier=keyid:always,issuer # This is what PKIX recommends but some broken software chokes on critical # extensions. #basicConstraints = critical,CA:true # So we do this instead. basicConstraints = CA:true # Key usage: this is typical for a CA certificate. However since it will # prevent it being used as an test self-signed certificate it is best # left out by default. # keyUsage = cRLSign, keyCertSign # Some might want this also # nsCertType = sslCA, emailCA # Include email address in subject alt name: another PKIX recommendation # subjectAltName=email:copy # Copy issuer details # issuerAltName=issuer:copy # DER hex encoding of an extension: beware experts only! # obj=DER:02:03 # Where 'obj' is a standard or added object # You can even override a supported extension: # basicConstraints= critical, DER:30:03:01:01:FF [ crl_ext ] # CRL extensions. # Only issuerAltName and authorityKeyIdentifier make any sense in a CRL. # issuerAltName=issuer:copy authorityKeyIdentifier=keyid:always [ proxy_cert_ext ] # These extensions should be added when creating a proxy certificate # This goes against PKIX guidelines but some CAs do it and some software # requires this to avoid interpreting an end user certificate as a CA. basicConstraints=CA:FALSE # Here are some examples of the usage of nsCertType. If it is omitted # the certificate can be used for anything *except* object signing. # This is OK for an SSL server. # nsCertType = server # For an object signing certificate this would be used. # nsCertType = objsign # For normal client use this is typical # nsCertType = client, email # and for everything including object signing: # nsCertType = client, email, objsign # This is typical in keyUsage for a client certificate. # keyUsage = nonRepudiation, digitalSignature, keyEncipherment # This will be displayed in Netscape's comment listbox. nsComment = "OpenSSL Generated Certificate" # PKIX recommendations harmless if included in all certificates. subjectKeyIdentifier=hash authorityKeyIdentifier=keyid,issuer # This stuff is for subjectAltName and issuerAltname. # Import the email address. # subjectAltName=email:copy # An alternative to produce certificates that aren't # deprecated according to PKIX. # subjectAltName=email:move # Copy subject details # issuerAltName=issuer:copy #nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem #nsBaseUrl #nsRevocationUrl #nsRenewalUrl #nsCaPolicyUrl #nsSslServerName # This really needs to be in place for it to be a proxy certificate. proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo #################################################################### [ tsa ] default_tsa = tsa_config1 # the default TSA section [ tsa_config1 ] # These are used by the TSA reply generation only. dir = ./demoCA # TSA root directory serial = $dir/tsaserial # The current serial number (mandatory) crypto_device = builtin # OpenSSL engine to use for signing signer_cert = $dir/tsacert.pem # The TSA signing certificate # (optional) certs = $dir/cacert.pem # Certificate chain to include in reply # (optional) signer_key = $dir/private/tsakey.pem # The TSA private key (optional) default_policy = tsa_policy1 # Policy if request did not specify it # (optional) other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional) digests = md5, sha1 # Acceptable message digests (mandatory) accuracy = secs:1, millisecs:500, microsecs:100 # (optional) clock_precision_digits = 0 # number of digits after dot. (optional) ordering = yes # Is ordering defined for timestamps? # (optional, default: no) tsa_name = yes # Must the TSA name be included in the reply? # (optional, default: no) ess_cert_id_chain = no # Must the ESS cert id chain be included? # (optional, default: no) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/etc/dhcp/0000775000175000017500000000000000000000000014663 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/etc/dhcp/dhclient.conf0000664000175000017500000000011700000000000017323 0ustar00zuulzuul00000000000000request subnet-mask,broadcast-address,interface-mtu; do-forward-updates false; ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/etc/octavia.conf0000664000175000017500000006043400000000000016251 0ustar00zuulzuul00000000000000[DEFAULT] # Print debugging output (set logging level to DEBUG instead of default WARNING level). # debug = False # Plugin options are hot_plug_plugin (Hot-pluggable controller plugin) # octavia_plugins = hot_plug_plugin # Hostname to be used by the host machine for services running on it. # The default value is the hostname of the host machine. # host = # AMQP Transport URL # For Single Host, specify one full transport URL: # transport_url = rabbit://:@127.0.0.1:5672/ # For HA, specify queue nodes in cluster, comma delimited: # transport_url = rabbit://:@server01,:@server02/ # transport_url = # How long in seconds to wait for octavia worker to exit before killing them. # graceful_shutdown_timeout = 60 [api_settings] # bind_host = 127.0.0.1 # bind_port = 9876 # How should authentication be handled (keystone, noauth) # auth_strategy = keystone # allow_pagination = True # allow_sorting = True # pagination_max_limit = 1000 # Base URI for the API for use in pagination links. # This will be autodetected from the request if not overridden here. # Example: # api_base_uri = http://localhost:9876 # api_base_uri = # Enable/disable ability for users to create TLS Terminated listeners # allow_tls_terminated_listeners = True # Enable/disable ability for users to create PING type Health Monitors # allow_ping_health_monitors = True # Dictionary of enabled provider driver names and descriptions # A comma separated list of dictionaries of the enabled provider driver names # and descriptions. # enabled_provider_drivers = amphora:The Octavia Amphora driver.,octavia: \ # Deprecated alias of the Octavia Amphora driver. # Default provider driver # default_provider_driver = amphora # The minimum health monitor delay interval for UDP-CONNECT Health Monitor type # udp_connect_min_interval_health_monitor = 3 # Boolean to enable/disable oslo middleware /healthcheck in the Octavia API # healthcheck_enabled = False # The interval healthcheck plugins should cache results, in seconds. # healthcheck_refresh_interval = 5 # Default cipher string for new TLS-terminated listeners # Cipher strings are in OpenSSL format, see https://www.openssl.org/docs/man1.1.1/man1/ciphers.html # This example is the "Broad Compatibility" cipher string from OWASP, # see https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html # default_listener_ciphers = TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256 # Default cipher string for new TLS-enabled pools, in OpenSSL format. # Cipher strings are in OpenSSL format, see https://www.openssl.org/docs/man1.1.1/man1/ciphers.html # This example is the "Broad Compatibility" cipher string from OWASP, # see https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html # default_pool_ciphers = TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256 [database] # This line MUST be changed to actually run the plugin. # Example: # connection = mysql+pymysql://root:pass@127.0.0.1:3306/octavia # Replace 127.0.0.1 above with the IP address of the database used by the # main octavia server. (Leave it as is if the database runs on this host.) # connection = mysql+pymysql:// # NOTE: In deployment the [database] section and its connection attribute may # be set in the corresponding core plugin '.ini' file. However, it is suggested # to put the [database] section and its connection attribute in this # configuration file. [health_manager] # bind_ip = 127.0.0.1 # bind_port = 5555 # controller_ip_port_list example: 127.0.0.1:5555, 127.0.0.1:5555 # controller_ip_port_list = # failover_threads = 10 # status_update_threads will default to the number of processors on the host. # This setting is deprecated and if you specify health_update_threads and # stats_update_threads, they override this parameter. # status_update_threads = # health_update_threads will default to the number of processors on the host # health_update_threads = # stats_update_threads will default to the number of processors on the host # stats_update_threads = # heartbeat_interval = 10 # heartbeat_key = # heartbeat_timeout = 60 # health_check_interval = 3 # sock_rlimit = 0 # Health/StatsUpdate options are # *_db # *_logger # health_update_driver = health_db # stats_update_driver = stats_db [keystone_authtoken] # This group of config options are imported from keystone middleware. Thus the # option names should match the names declared in the middleware. # The www_authenticate_uri is the public endpoint and is returned in headers on a 401 # www_authenticate_uri = https://localhost:5000/v3 # The auth_url is the admin endpoint actually used for validating tokens # auth_url = https://localhost:5000/v3 # username = octavia # password = password # project_name = service # Domain names must be set, these are *not* default but work for most clouds # project_domain_name = Default # user_domain_name = Default # insecure = False # cafile = [certificates] # Certificate Generator options are local_cert_generator # cert_generator = local_cert_generator # For local certificate signing: # ca_certificate = /etc/ssl/certs/ssl-cert-snakeoil.pem # ca_private_key = /etc/ssl/private/ssl-cert-snakeoil.key # ca_private_key_passphrase = # server_certs_key_passphrase = insecure-key-do-not-use-this-key # signing_digest = sha256 # cert_validity_time = 2592000 # 30 days = 30d * 24h * 60m * 60s = 2592000s # storage_path = /var/lib/octavia/certificates/ # For the TLS management # Certificate Manager options are local_cert_manager # barbican_cert_manager # castellan_cert_manager # cert_manager = barbican_cert_manager # For Barbican authentication (if using any Barbican based cert class) # barbican_auth = barbican_acl_auth # Settings for the key manager endpoint (such as Barbican) # Region in Identity service catalog to use for communication with the key manager service. # region_name = # Endpoint type to use for communication with the key manager service. # endpoint_type = publicURL # CA certificates file to verify key manager connections when TLS is enabled # ca_certificates_file = # Disable certificate validation on SSL connections # insecure = False [compute] # The maximum attempts to retry an action with the compute service. # max_retries = 15 # Seconds to wait before retrying an action with the compute service. # retry_interval = 1 # The seconds to backoff retry attempts # retry_backoff = 1 # The maximum interval in seconds between retry attempts # retry_max = 10 [networking] # The maximum attempts to retry an action with the networking service. # max_retries = 15 # Seconds to wait before retrying an action with the networking service. # retry_interval = 1 # The seconds to backoff retry attempts # retry_backoff = 1 # The maximum interval in seconds between retry attempts # retry_max = 10 # The maximum time to wait, in seconds, for a port to detach from an amphora # port_detach_timeout = 300 # Allow/disallow specific network object types when creating VIPs. # allow_vip_network_id = True # allow_vip_subnet_id = True # allow_vip_port_id = True # List of network_ids that are valid for VIP creation. # If this field empty, no validation is performed. # valid_vip_networks = # List of reserved IP addresses that cannot be used for member addresses # The default is the nova metadata service address # reserved_ips = ['169.254.169.254'] # When True, users can use network resources they cannot normally see as VIP # or member subnets. Making this True may allow users to access resources on # subnets they do not normally have access to via neutron RBAC policies. # allow_invisible_resource_usage = False [haproxy_amphora] # base_path = /var/lib/octavia # base_cert_dir = /var/lib/octavia/certs # Absolute path to a custom HAProxy template file # haproxy_template = # connection_logging = True # connection_max_retries = 120 # connection_retry_interval = 5 # build_rate_limit = -1 # build_active_retries = 120 # build_retry_interval = 5 # Maximum number of entries that can fit in the stick table. # The size supports "k", "m", "g" suffixes. # haproxy_stick_size = 10k # REST Driver specific # bind_host = 0.0.0.0 # bind_port = 9443 # # This setting is only needed with IPv6 link-local addresses (fe80::/64) are # used for communication between Octavia and its Amphora, if IPv4 or other IPv6 # addresses are used it can be ignored. # lb_network_interface = o-hm0 # # haproxy_cmd = /usr/sbin/haproxy # respawn_count = 2 # respawn_interval = 2 # client_cert = /etc/octavia/certs/client.pem # server_ca = /etc/octavia/certs/server_ca.pem # # This setting is deprecated. It is now automatically discovered. # use_upstart = True # # rest_request_conn_timeout = 10 # rest_request_read_timeout = 60 # # These "active" timeouts are used once the amphora should already # be fully up and active. These values are lower than the other values to # facilitate "fail fast" scenarios like failovers # active_connection_max_retries = 15 # active_connection_rety_interval = 2 # These "failover" timeouts are used during the failover process to probe # amphorae that are part of the load balancer being failed over. # These values are very low to facilitate "fail fast" should an amphora # not respond in a failure situation. # failover_connection_max_retries = 2 # failover_connection_retry_interval = 5 # The user flow log format for HAProxy. # {{ project_id }} and {{ lb_id }} will be automatically substituted by the # controller when configuring HAProxy if they are present in the string. # user_log_format = '{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST %B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt %tsc' # API messaging / database commit retries # This is many times the controller worker retries waiting for the API to # complete a database commit for a message received over the queue. # api_db_commit_retry_attempts = 15 # api_db_commit_retry_initial_delay = 1 # api_db_commit_retry_backoff = 1 # api_db_commit_retry_max = 5 # Default connection_limit for listeners, this value is used when setting "-1" # or when unsetting "connection_limit" with the listener API. # default_connection_limit = 50000 [controller_worker] # workers = 1 # amp_active_retries = 30 # amp_active_wait_sec = 10 # Glance parameters to extract image ID to use for amphora. Only one of # parameters is needed. Using tags is the recommended way to refer to images. # amp_image_id = # amp_image_tag = # Optional owner ID used to restrict glance images to one owner ID. # This is a recommended security setting. # amp_image_owner_id = # Nova parameters to use when booting amphora # amp_flavor_id = # Upload the ssh key as the service_auth user described elsewhere in this config. # Leaving this variable blank will install no ssh key on the amphora. # amp_ssh_key_name = # Networks to attach to the Amphorae examples: # - One primary network # - - amp_boot_network_list = 22222222-3333-4444-5555-666666666666 # - Multiple networks # - - amp_boot_network_list = 11111111-2222-33333-4444-555555555555, 22222222-3333-4444-5555-666666666666 # - All networks defined in the list will be attached to each amphora # amp_boot_network_list = # amp_secgroup_list = # client_ca = /etc/octavia/certs/ca_01.pem # Amphora driver options are amphora_noop_driver, # amphora_haproxy_rest_driver # # amphora_driver = amphora_noop_driver # # Compute driver options are compute_noop_driver # compute_nova_driver # # compute_driver = compute_noop_driver # # Network driver options are network_noop_driver # allowed_address_pairs_driver # # network_driver = network_noop_driver # Volume driver options are volume_noop_driver # volume_cinder_driver # # volume_driver = volume_noop_driver # # Distributor driver options are distributor_noop_driver # single_VIP_amphora # # distributor_driver = distributor_noop_driver # # Load balancer topology options are SINGLE, ACTIVE_STANDBY # loadbalancer_topology = SINGLE # user_data_config_drive = False # amphora_delete_retries = 5 # amphora_delete_retry_interval = 5 [task_flow] # TaskFlow engine options are: # - serial: Runs all tasks on a single thread. # - parallel: Schedules tasks onto different threads to allow # for running non-dependent tasks simultaneously # # engine = parallel # max_workers = 5 # # This setting prevents the controller worker from reverting taskflow flows. # This will leave resources in an inconsistent state and should only be used # for debugging purposes. # disable_revert = False # Persistence database, which will be used to store tasks states. # Database connection url with db name (string value) #persistence_connection = sqlite:// # Jobboard backend driver that will monitor job state. (string value) # Possible values: # - redis_taskflow_driver: Driver that will use Redis to store job states. # - zookeeper_taskflow_driver: Driver that will use Zookeeper to store job # states. #jobboard_backend_driver = redis_taskflow_driver # Jobboard backend server host(s). (list value) #jobboard_backend_hosts = 127.0.0.1 # Jobboard backend server port (port value) # Minimum value: 0 # Maximum value: 65535 #jobboard_backend_port = 6379 # Jobboard backend server password (string value) #jobboard_backend_password = # Jobboard name that should be used to store taskflow job id and # claims for it. (string value) #jobboard_backend_namespace = octavia_jobboard # Redis jobboard backend ssl configuration options. (dict value) # SSL is disabled by default #jobboard_redis_backend_ssl_options = ssl:False,ssl_ca_certs:None,ssl_cert_reqs:required,ssl_certfile:None,ssl_keyfile:None # Zookeeper jobboard backend ssl configuration options. (dict value) # SSL is disabled by default #jobboard_zookeeper_ssl_options = use_ssl:False,certfile:None,keyfile:None,keyfile_password:None,verify_certs:True # For backends like redis claiming jobs requiring setting the expiry - # how many seconds the claim should be retained for. (integer value) #jobboard_expiration_time = 30 # If for analysis required saving logbooks info, set this parameter to # True. By default remove logbook from persistence backend when job # completed. (boolean value) #jobboard_save_logbook = false [oslo_messaging] # Queue Consumer Thread Pool Size # rpc_thread_pool_size = 2 # Topic (i.e. Queue) Name # topic = octavia_prov [oslo_middleware] # HTTPProxyToWSGI middleware enabled # enable_proxy_headers_parsing = False [house_keeping] # Interval in seconds to initiate spare amphora checks # spare_check_interval = 30 # spare_amphora_pool_size = 0 # Cleanup interval for Deleted amphora # cleanup_interval = 30 # Amphora expiry age in seconds. Default is 1 week # amphora_expiry_age = 604800 # Load balancer expiry age in seconds. Default is 1 week # load_balancer_expiry_age = 604800 [amphora_agent] # agent_server_ca = /etc/octavia/certs/client_ca.pem # agent_server_cert = /etc/octavia/certs/server.pem # Defaults for agent_server_network_dir when not specified here are: # Ubuntu: /etc/netns/amphora-haproxy/network/interfaces.d/ # Centos/fedora/rhel: /etc/netns/amphora-haproxy/sysconfig/network-scripts/ # # agent_server_network_dir = # agent_server_network_file = # agent_request_read_timeout = 180 # Minimum TLS protocol, eg: TLS, TLSv1.1, TLSv1.2, TLSv1.3 (if available) # agent_tls_protocol = TLSv1.2 # Amphora default UDP driver is keepalived_lvs # # amphora_udp_driver = keepalived_lvs ##### Log offloading # # Note: The admin and tenant logs can point to the same endpoints. # # List of log server ip and port pairs for Administrative logs. # Additional hosts are backup to the primary server. If none are # specified, remote logging is disabled. # Example 192.0.2.1:10514, 2001:db8:1::10:10514' # # admin_log_targets = # # List of log server ip and port pairs for tenant traffic logs. # Additional hosts are backup to the primary server. If none are # specified, remote logging is disabled. # Example 192.0.2.1:10514, 2001:db8:2::15:10514' # # tenant_log_targets = # Sets the syslog LOG_LOCAL[0-7] facility number for amphora log offloading. # user_log_facility will receive the traffic flow logs. # administrative_log_facility will receive the amphora processes logs. # Note: Some processes only support LOG_LOCAL, so we are restricted to the # LOG_LOCAL facilities. # # user_log_facility = 0 # administrative_log_facility = 1 # The log forwarding protocol to use. One of TCP or UDP. # log_protocol = UDP # The maximum attempts to retry connecting to the logging host. # log_retry_count = 5 # The time, in seconds, to wait between retries connecting to the logging host. # log_retry_interval = 2 # The queue size (messages) to buffer log messages. # log_queue_size = 10000 # Controller local path to a custom logging configuration template. # Currently this is an rsyslog configuration file template. # logging_template_override = # When True, the amphora will forward all of the system logs (except tenant # traffice logs) to the admin log target(s). When False, only amphora specific # admin logs will be forwarded. # forward_all_logs = False # When True, no logs will be written to the amphora filesystem. When False, # log files will be written to the local filesystem. # disable_local_log_storage = False [keepalived_vrrp] # Amphora Role/Priority advertisement interval in seconds # vrrp_advert_int = 1 # Service health check interval and success/fail count # vrrp_check_interval = 5 # vrrp_fail_count = 2 # vrrp_success_count = 2 # Amphora MASTER gratuitous ARP refresh settings # vrrp_garp_refresh_interval = 5 # vrrp_garp_refresh_count = 2 [service_auth] # memcached_servers = # cafile = /opt/stack/data/ca-bundle.pem # project_domain_name = Default # project_name = admin # user_domain_name = Default # password = password # username = admin # auth_type = password # auth_url = http://localhost:5555/ [nova] # The name of the nova service in the keystone catalog # service_name = # Custom nova endpoint if override is necessary # endpoint = # Region in Identity service catalog to use for communication with the # OpenStack services. # region_name = # Endpoint type in Identity service catalog to use for communication with # the OpenStack services. # endpoint_type = publicURL # CA certificates file to verify neutron connections when TLS is enabled # ca_certificates_file = # Disable certificate validation on SSL connections # insecure = False # If non-zero, generate a random name of the length provided for each amphora, # in the format "a[A-Z0-9]*". # Otherwise, the default name format will be used: "amphora-{UUID}". # random_amphora_name_length = 0 # # Availability zone to use for creating Amphorae # availability_zone = # Enable anti-affinity in nova # enable_anti_affinity = False # Set the anti-affinity policy to what is suitable. # Nova supports: anti-affinity and soft-anti-affinity # anti_affinity_policy = anti-affinity [cinder] # The name of the cinder service in the keystone catalog # service_name = # Custom cinder endpoint if override is necessary # endpoint = # Region in Identity service catalog to use for communication with the # OpenStack services. # region_name = # Endpoint type in Identity service catalog to use for communication with # the OpenStack services. # endpoint_type = publicURL # Availability zone to use for creating Volume # availability_zone = # CA certificates file to verify cinder connections when TLS is enabled # insecure = False # ca_certificates_file = # Size of root volume in GB for Amphora Instance when use Cinder # In some storage backends such as ScaleIO, the size of volume is multiple of 8 # volume_size = 16 # Volume type to be used for Amphora Instance root disk # If not specified, default_volume_type from cinder.conf will be used # volume_type = # Interval time to wait until volume becomes available # volume_create_retry_interval = 5 # Timeout to wait for volume creation success # volume_create_timeout = 300 # Maximum number of retries to create volume # volume_create_max_retries = 5 [glance] # The name of the glance service in the keystone catalog # service_name = # Custom glance endpoint if override is necessary # endpoint = # Region in Identity service catalog to use for communication with the # OpenStack services. # region_name = # Endpoint type in Identity service catalog to use for communication with # the OpenStack services. # endpoint_type = publicURL # CA certificates file to verify neutron connections when TLS is enabled # insecure = False # ca_certificates_file = [neutron] # The name of the neutron service in the keystone catalog # service_name = # Custom neutron endpoint if override is necessary # endpoint = # Region in Identity service catalog to use for communication with the # OpenStack services. # region_name = # Endpoint type in Identity service catalog to use for communication with # the OpenStack services. # endpoint_type = publicURL # CA certificates file to verify neutron connections when TLS is enabled # insecure = False # ca_certificates_file = [quotas] # default_load_balancer_quota = -1 # default_listener_quota = -1 # default_member_quota = -1 # default_pool_quota = -1 # default_health_monitor_quota = -1 [audit] # Enable auditing of API requests. # enabled = False # Path to audit map file for octavia-api service. Used only # when API audit is enabled. # audit_map_file = /etc/octavia/octavia_api_audit_map.conf # Comma separated list of REST API HTTP methods to be # ignored during audit. For example: auditing will not be done # on any GET or POST requests if this is set to "GET,POST". It # is used only when API audit is enabled. # ignore_req_list = [audit_middleware_notifications] # Note: This section comes from openstack/keystonemiddleware # It is included here for documentation convenience and may be out of date # Indicate whether to use oslo_messaging as the notifier. If set to False, # the local logger will be used as the notifier. If set to True, the # oslo_messaging package must also be present. Otherwise, the local will be # used instead. # use_oslo_messaging = True # The Driver to handle sending notifications. Possible values are messaging, # messagingv2, routing, log, test, noop. If not specified, then value from # oslo_messaging_notifications conf section is used. # driver = # List of AMQP topics used for OpenStack notifications. If not specified, # then value from oslo_messaging_notifications conf section is used. # topics = # A URL representing messaging driver to use for notification. If not # specified, we fall back to the same configuration used for RPC. # transport_url = [driver_agent] # status_socket_path = /var/run/octavia/status.sock # stats_socket_path = /var/run/octavia/stats.sock # get_socket_path = /var/run/octavia/get.sock # Maximum time to wait for a status message before checking for shutdown # status_request_timeout = 5 # Maximum number of status processes per driver-agent # status_max_processes = 50 # Maximum time to wait for a stats message before checking for shutdown # stats_request_timeout = 5 # Maximum number of stats processes per driver-agent # stats_max_processes = 50 # Percentage of max_processes (both status and stats) in use to start # logging warning messages about an overloaded driver-agent. # max_process_warning_percent = .75 # How long in seconds to wait for provider agents to exit before killing them. # provider_agent_shutdown_timeout = 60 # List of enabled provider agents. # enabled_provider_agents = [healthcheck] # WARNING: Enabling the 'detailed' setting will expose sensitive details about # the API process. Do not enabled this unless you are sure it will # not pose a security risk to your API instances. # We highly recommend you do not enable this. # detailed = False # This is a list of oslo middleware healthcheck backend plugins to enable for # the oslo middleware health check. # # Plugins provided by oslo middleware: # disable_by_file # disable_by_files_ports # Plugins provided by Octavia: # octavia_db_check # # backends = ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/etc/policy/0000775000175000017500000000000000000000000015244 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/etc/policy/README.rst0000664000175000017500000000116500000000000016736 0ustar00zuulzuul00000000000000=========================== Octavia Sample Policy Files =========================== The sample policy.json files described here can be copied into /etc/octavia/policy.json to override the default RBAC policy for Octavia. admin_or_owner-policy.json -------------------------- This policy file disables the requirement for load-balancer service users to have one of the load-balancer:* roles. It provides a similar policy to legacy OpenStack policies where any user or admin has access to load-balancer resources that they own. Users with the admin role has access to all load-balancer resources, whether they own them or not. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/etc/policy/admin_or_owner-policy.json0000664000175000017500000000070000000000000022433 0ustar00zuulzuul00000000000000{ "context_is_admin": "role:admin or role:load-balancer_admin", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "load-balancer:read": "rule:admin_or_owner", "load-balancer:read-global": "is_admin:True", "load-balancer:write": "rule:admin_or_owner", "load-balancer:read-quota": "rule:admin_or_owner", "load-balancer:read-quota-global": "is_admin:True", "load-balancer:write-quota": "is_admin:True" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/etc/policy/octavia-policy-generator.conf0000664000175000017500000000013100000000000023015 0ustar00zuulzuul00000000000000[DEFAULT] format = json output_file = etc/octavia/policy.json.sample namespace = octavia ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/httpd/0000775000175000017500000000000000000000000014315 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/httpd/octavia-api.conf0000664000175000017500000000112300000000000017356 0ustar00zuulzuul00000000000000Listen 9876 WSGIDaemonProcess octavia-wsgi processes=5 threads=1 user=octavia group=octavia display-name=%{GROUP} WSGIProcessGroup octavia-wsgi WSGIScriptAlias / /usr/local/bin/octavia-wsgi WSGIApplicationGroup %{GLOBAL} ErrorLog /var/log/apache2/octavia-wsgi.log WSGIProcessGroup octavia-wsgi = 2.4> Require all granted Order allow,deny Allow from all ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/octavia/0000775000175000017500000000000000000000000014620 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/__init__.py0000664000175000017500000000125400000000000016733 0ustar00zuulzuul00000000000000# Copyright 2011-2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext gettext.install('octavia') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/octavia/amphorae/0000775000175000017500000000000000000000000016414 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/__init__.py0000664000175000017500000000107400000000000020527 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/octavia/amphorae/backends/0000775000175000017500000000000000000000000020166 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/__init__.py0000664000175000017500000000107400000000000022301 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/octavia/amphorae/backends/agent/0000775000175000017500000000000000000000000021264 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/__init__.py0000664000175000017500000000107400000000000023377 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/agent_jinja_cfg.py0000664000175000017500000000551700000000000024736 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import jinja2 from octavia.common.config import cfg from octavia.common import constants CONF = cfg.CONF TEMPLATES_DIR = (os.path.dirname(os.path.realpath(__file__)) + constants.AGENT_API_TEMPLATES + '/') class AgentJinjaTemplater(object): def __init__(self): template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname( TEMPLATES_DIR)) jinja_env = jinja2.Environment(loader=template_loader, autoescape=True) self.agent_template = jinja_env.get_template( constants.AGENT_CONF_TEMPLATE) def build_agent_config(self, amphora_id, topology): return self.agent_template.render( {'agent_server_ca': CONF.amphora_agent.agent_server_ca, 'agent_server_cert': CONF.amphora_agent.agent_server_cert, 'agent_server_network_dir': CONF.amphora_agent.agent_server_network_dir, 'agent_server_network_file': CONF.amphora_agent.agent_server_network_file, 'agent_request_read_timeout': CONF.amphora_agent.agent_request_read_timeout, 'amphora_id': amphora_id, 'base_cert_dir': CONF.haproxy_amphora.base_cert_dir, 'base_path': CONF.haproxy_amphora.base_path, 'bind_host': CONF.haproxy_amphora.bind_host, 'bind_port': CONF.haproxy_amphora.bind_port, 'controller_list': CONF.health_manager.controller_ip_port_list, 'debug': CONF.debug, 'haproxy_cmd': CONF.haproxy_amphora.haproxy_cmd, 'heartbeat_interval': CONF.health_manager.heartbeat_interval, 'heartbeat_key': CONF.health_manager.heartbeat_key, 'use_upstart': CONF.haproxy_amphora.use_upstart, 'respawn_count': CONF.haproxy_amphora.respawn_count, 'respawn_interval': CONF.haproxy_amphora.respawn_interval, 'amphora_udp_driver': CONF.amphora_agent.amphora_udp_driver, 'agent_tls_protocol': CONF.amphora_agent.agent_tls_protocol, 'topology': topology, 'administrative_log_facility': CONF.amphora_agent.administrative_log_facility, 'user_log_facility': CONF.amphora_agent.user_log_facility}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/0000775000175000017500000000000000000000000023423 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/__init__.py0000664000175000017500000000116600000000000025540 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. VERSION = '1.0' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/amphora_info.py0000664000175000017500000001643100000000000026444 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import socket import subprocess import pyroute2 import webob from octavia.amphorae.backends.agent import api_server from octavia.amphorae.backends.agent.api_server import util from octavia.amphorae.backends.utils import network_utils from octavia.common import constants as consts from octavia.common import exceptions class AmphoraInfo(object): def __init__(self, osutils): self._osutils = osutils def compile_amphora_info(self, extend_udp_driver=None): extend_body = {} if extend_udp_driver: extend_body = self._get_extend_body_from_udp_driver( extend_udp_driver) body = {'hostname': socket.gethostname(), 'haproxy_version': self._get_version_of_installed_package('haproxy'), 'api_version': api_server.VERSION} if extend_body: body.update(extend_body) return webob.Response(json=body) def compile_amphora_details(self, extend_udp_driver=None): haproxy_listener_list = sorted(util.get_listeners()) extend_body = {} udp_listener_list = [] if extend_udp_driver: udp_listener_list = util.get_udp_listeners() extend_data = self._get_extend_body_from_udp_driver( extend_udp_driver) udp_count = self._count_udp_listener_processes(extend_udp_driver, udp_listener_list) extend_body['udp_listener_process_count'] = udp_count extend_body.update(extend_data) meminfo = self._get_meminfo() cpu = self._cpu() st = os.statvfs('/') body = {'hostname': socket.gethostname(), 'haproxy_version': self._get_version_of_installed_package('haproxy'), 'api_version': api_server.VERSION, 'networks': self._get_networks(), 'active': True, 'haproxy_count': self._count_haproxy_processes(haproxy_listener_list), 'cpu': { 'total': cpu['total'], 'user': cpu['user'], 'system': cpu['system'], 'soft_irq': cpu['softirq'], }, 'memory': { 'total': meminfo['MemTotal'], 'free': meminfo['MemFree'], 'buffers': meminfo['Buffers'], 'cached': meminfo['Cached'], 'swap_used': meminfo['SwapCached'], 'shared': meminfo['Shmem'], 'slab': meminfo['Slab'], }, 'disk': { 'used': (st.f_blocks - st.f_bfree) * st.f_frsize, 'available': st.f_bavail * st.f_frsize}, 'load': self._load(), 'topology': consts.TOPOLOGY_SINGLE, 'topology_status': consts.TOPOLOGY_STATUS_OK, 'listeners': sorted(list( set(haproxy_listener_list + udp_listener_list))) if udp_listener_list else haproxy_listener_list, 'packages': {}} if extend_body: body.update(extend_body) return webob.Response(json=body) def _get_version_of_installed_package(self, name): cmd = self._osutils.cmd_get_version_of_installed_package(name) version = subprocess.check_output(cmd.split()) return version def _count_haproxy_processes(self, lb_list): num = 0 for lb_id in lb_list: if util.is_lb_running(lb_id): # optional check if it's still running num += 1 return num def _count_udp_listener_processes(self, udp_driver, listener_list): num = 0 for listener_id in listener_list: if udp_driver.is_listener_running(listener_id): # optional check if it's still running num += 1 return num def _get_extend_body_from_udp_driver(self, extend_udp_driver): extend_info = extend_udp_driver.get_subscribed_amp_compile_info() extend_data = {} for extend in extend_info: package_version = self._get_version_of_installed_package(extend) extend_data['%s_version' % extend] = package_version return extend_data def _get_meminfo(self): re_parser = re.compile(r'^(?P\S*):\s*(?P\d*)\s*kB') result = dict() with open('/proc/meminfo', 'r') as meminfo: for line in meminfo: match = re_parser.match(line) if not match: continue # skip lines that don't parse key, value = match.groups(['key', 'value']) result[key] = int(value) return result def _cpu(self): with open('/proc/stat') as f: cpu = f.readline() vals = cpu.split(' ') return { 'user': vals[2], 'nice': vals[3], 'system': vals[4], 'idle': vals[5], 'iowait': vals[6], 'irq': vals[7], 'softirq': vals[8], 'total': sum([int(i) for i in vals[2:]]) } def _load(self): with open('/proc/loadavg') as f: load = f.readline() vals = load.split(' ') return vals[:3] def _get_networks(self): networks = dict() with pyroute2.NetNS(consts.AMPHORA_NAMESPACE) as netns: for interface in netns.get_links(): interface_name = None for item in interface['attrs']: if (item[0] == 'IFLA_IFNAME' and not item[1].startswith('eth')): break if item[0] == 'IFLA_IFNAME': interface_name = item[1] if item[0] == 'IFLA_STATS64': networks[interface_name] = dict( network_tx=item[1]['tx_bytes'], network_rx=item[1]['rx_bytes']) return networks def get_interface(self, ip_addr): try: interface = network_utils.get_interface_name( ip_addr, net_ns=consts.AMPHORA_NAMESPACE) except exceptions.InvalidIPAddress: return webob.Response(json=dict(message="Invalid IP address"), status=400) except exceptions.NotFound: return webob.Response( json=dict(message="Error interface not found for IP address"), status=404) return webob.Response(json=dict(message='OK', interface=interface), status=200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/certificate_update.py0000664000175000017500000000224000000000000027617 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat import flask from oslo_config import cfg import webob BUFFER = 1024 CONF = cfg.CONF def upload_server_cert(): stream = flask.request.stream file_path = CONF.amphora_agent.agent_server_cert flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00600 mode = stat.S_IRUSR | stat.S_IWUSR with os.fdopen(os.open(file_path, flags, mode), 'wb') as crt_file: b = stream.read(BUFFER) while b: crt_file.write(b) b = stream.read(BUFFER) return webob.Response(json={'message': 'OK'}, status=202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/haproxy_compatibility.py0000664000175000017500000000341700000000000030425 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import subprocess from oslo_log import log as logging LOG = logging.getLogger(__name__) def get_haproxy_versions(): """Get major and minor version number from haproxy :returns major_version: The major version digit :returns minor_version: The minor version digit """ cmd = "haproxy -v" version = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) version_re = re.search(r'.*version (.+?)\.(.+?)(\.|-dev).*', version.decode('utf-8')) major_version = int(version_re.group(1)) minor_version = int(version_re.group(2)) return major_version, minor_version def process_cfg_for_version_compat(haproxy_cfg): major, minor = get_haproxy_versions() # Versions less than 1.6 do not support external health checks # Removed those configuration times if major < 2 and minor < 6: LOG.warning("Found %(major)s.%(minor)s version of haproxy. " "Disabling external checks. Health monitor of type " "PING will revert to TCP.", {'major': major, 'minor': minor}) haproxy_cfg = re.sub(r" * ?.*external-check ?.*\s", "", haproxy_cfg) return haproxy_cfg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/keepalived.py0000664000175000017500000001573500000000000026121 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat import subprocess import flask import jinja2 from oslo_config import cfg from oslo_log import log as logging import webob from octavia.amphorae.backends.agent.api_server import loadbalancer from octavia.amphorae.backends.agent.api_server import util from octavia.common import constants as consts BUFFER = 100 CONF = cfg.CONF LOG = logging.getLogger(__name__) j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader( os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES)) UPSTART_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_UPSTART) SYSVINIT_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_SYSVINIT) SYSTEMD_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_SYSTEMD) check_script_template = j2_env.get_template(consts.CHECK_SCRIPT_CONF) class Keepalived(object): def upload_keepalived_config(self): stream = loadbalancer.Wrapped(flask.request.stream) if not os.path.exists(util.keepalived_dir()): os.makedirs(util.keepalived_dir()) if not os.path.exists(util.keepalived_check_scripts_dir()): os.makedirs(util.keepalived_check_scripts_dir()) conf_file = util.keepalived_cfg_path() flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) init_system = util.get_os_init_system() file_path = util.keepalived_init_path(init_system) if init_system == consts.INIT_SYSTEMD: template = SYSTEMD_TEMPLATE init_enable_cmd = "systemctl enable octavia-keepalived" # Render and install the network namespace systemd service util.install_netns_systemd_service() util.run_systemctl_command( consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX) elif init_system == consts.INIT_UPSTART: template = UPSTART_TEMPLATE elif init_system == consts.INIT_SYSVINIT: template = SYSVINIT_TEMPLATE init_enable_cmd = "insserv {file}".format(file=file_path) else: raise util.UnknownInitError() if init_system == consts.INIT_SYSTEMD: # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH else: # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) if not os.path.exists(file_path): with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: text = template.render( keepalived_pid=util.keepalived_pid_path(), keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_cfg_path(), keepalived_log=util.keepalived_log_path(), amphora_nsname=consts.AMPHORA_NAMESPACE, amphora_netns=consts.AMP_NETNS_SVC_PREFIX, administrative_log_facility=( CONF.amphora_agent.administrative_log_facility), ) text_file.write(text) # Renders the Keepalived check script keepalived_path = util.keepalived_check_script_path() # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) open_obj = os.open(keepalived_path, flags, mode) with os.fdopen(open_obj, 'w') as text_file: text = check_script_template.render( check_scripts_dir=util.keepalived_check_scripts_dir() ) text_file.write(text) # Configure the monitoring of haproxy util.vrrp_check_script_update(None, consts.AMP_ACTION_START) # Make sure the new service is enabled on boot if init_system != consts.INIT_UPSTART: try: subprocess.check_output(init_enable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug('Failed to enable octavia-keepalived service: ' '%(err)s %(output)s', {'err': e, 'output': e.output}) return webob.Response(json=dict( message="Error enabling octavia-keepalived service", details=e.output), status=500) res = webob.Response(json={'message': 'OK'}, status=200) res.headers['ETag'] = stream.get_md5() return res def manager_keepalived_service(self, action): action = action.lower() if action not in [consts.AMP_ACTION_START, consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD]: return webob.Response(json=dict( message='Invalid Request', details="Unknown action: {0}".format(action)), status=400) if action == consts.AMP_ACTION_START: keepalived_pid_path = util.keepalived_pid_path() try: # Is there a pid file for keepalived? with open(keepalived_pid_path, 'r') as pid_file: pid = int(pid_file.readline()) os.kill(pid, 0) # If we got here, it means the keepalived process is running. # We should reload it instead of trying to start it again. action = consts.AMP_ACTION_RELOAD except (IOError, OSError): pass cmd = ("/usr/sbin/service octavia-keepalived {action}".format( action=action)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug('Failed to %s octavia-keepalived service: %s %s', action, e, e.output) return webob.Response(json=dict( message="Failed to {0} octavia-keepalived service".format( action), details=e.output), status=500) return webob.Response( json=dict(message='OK', details='keepalived {action}ed'.format(action=action)), status=202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/keepalivedlvs.py0000664000175000017500000003465500000000000026650 0ustar00zuulzuul00000000000000# Copyright 2011-2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import shutil import stat import subprocess import flask import jinja2 from oslo_config import cfg from oslo_log import log as logging import webob from werkzeug import exceptions from octavia.amphorae.backends.agent.api_server import loadbalancer from octavia.amphorae.backends.agent.api_server import udp_listener_base from octavia.amphorae.backends.agent.api_server import util from octavia.common import constants as consts BUFFER = 100 CHECK_SCRIPT_NAME = 'udp_check.sh' CONF = cfg.CONF KEEPALIVED_CHECK_SCRIPT_NAME = 'lvs_udp_check.sh' LOG = logging.getLogger(__name__) j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader( os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES)) UPSTART_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_UPSTART) SYSVINIT_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_SYSVINIT) SYSTEMD_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_SYSTEMD) check_script_file_template = j2_env.get_template( consts.KEEPALIVED_CHECK_SCRIPT) class KeepalivedLvs(udp_listener_base.UdpListenerApiServerBase): _SUBSCRIBED_AMP_COMPILE = ['keepalived', 'ipvsadm'] def upload_udp_listener_config(self, listener_id): stream = loadbalancer.Wrapped(flask.request.stream) NEED_CHECK = True if not os.path.exists(util.keepalived_lvs_dir()): os.makedirs(util.keepalived_lvs_dir()) if not os.path.exists(util.keepalived_backend_check_script_dir()): current_file_dir, _ = os.path.split(os.path.abspath(__file__)) try: script_dir = os.path.join(os.path.abspath( os.path.join(current_file_dir, '../..')), 'utils') assert True is os.path.exists(script_dir) assert True is os.path.exists(os.path.join( script_dir, CHECK_SCRIPT_NAME)) except Exception: raise exceptions.Conflict( description='%(file_name)s not Found for ' 'UDP Listener %(listener_id)s' % {'file_name': CHECK_SCRIPT_NAME, 'listener_id': listener_id}) os.makedirs(util.keepalived_backend_check_script_dir()) shutil.copy2(os.path.join(script_dir, CHECK_SCRIPT_NAME), util.keepalived_backend_check_script_path()) os.chmod(util.keepalived_backend_check_script_path(), stat.S_IEXEC) # Based on current topology setting, only the amphora instances in # Active-Standby topology will create the directory below. So for # Single topology, it should not create the directory and the check # scripts for status change. if (CONF.controller_worker.loadbalancer_topology != consts.TOPOLOGY_ACTIVE_STANDBY): NEED_CHECK = False conf_file = util.keepalived_lvs_cfg_path(listener_id) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f: b = stream.read(BUFFER) while b: f.write(b) b = stream.read(BUFFER) init_system = util.get_os_init_system() file_path = util.keepalived_lvs_init_path(init_system, listener_id) if init_system == consts.INIT_SYSTEMD: template = SYSTEMD_TEMPLATE # Render and install the network namespace systemd service util.install_netns_systemd_service() util.run_systemctl_command( consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX) elif init_system == consts.INIT_UPSTART: template = UPSTART_TEMPLATE elif init_system == consts.INIT_SYSVINIT: template = SYSVINIT_TEMPLATE else: raise util.UnknownInitError() # Render and install the keepalivedlvs init script if init_system == consts.INIT_SYSTEMD: # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH else: # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( listener_id) if not os.path.exists(file_path): with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: text = template.render( keepalived_pid=keepalived_pid, vrrp_pid=vrrp_pid, check_pid=check_pid, keepalived_cmd=consts.KEEPALIVED_CMD, keepalived_cfg=util.keepalived_lvs_cfg_path(listener_id), amphora_nsname=consts.AMPHORA_NAMESPACE, amphora_netns=consts.AMP_NETNS_SVC_PREFIX, administrative_log_facility=( CONF.amphora_agent.administrative_log_facility), ) text_file.write(text) # Make sure the keepalivedlvs service is enabled on boot if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.ENABLE, "octavia-keepalivedlvs-%s" % str(listener_id)) elif init_system == consts.INIT_SYSVINIT: init_enable_cmd = "insserv {file}".format(file=file_path) try: subprocess.check_output(init_enable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug('Failed to enable ' 'octavia-keepalivedlvs service: ' '%(err)s', {'err': str(e)}) return webob.Response(json=dict( message="Error enabling " "octavia-keepalivedlvs service", details=e.output), status=500) if NEED_CHECK: # inject the check script for keepalived process script_path = os.path.join(util.keepalived_check_scripts_dir(), KEEPALIVED_CHECK_SCRIPT_NAME) if not os.path.exists(script_path): if not os.path.exists(util.keepalived_check_scripts_dir()): os.makedirs(util.keepalived_check_scripts_dir()) with os.fdopen(os.open(script_path, flags, stat.S_IEXEC), 'w') as script_file: text = check_script_file_template.render( consts=consts, init_system=init_system, keepalived_lvs_pid_dir=util.keepalived_lvs_dir() ) script_file.write(text) util.vrrp_check_script_update(None, consts.AMP_ACTION_START) res = webob.Response(json={'message': 'OK'}, status=200) res.headers['ETag'] = stream.get_md5() return res def _check_udp_listener_exists(self, listener_id): if not os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): raise exceptions.HTTPException( response=webob.Response(json=dict( message='UDP Listener Not Found', details="No UDP listener with UUID: {0}".format( listener_id)), status=404)) def get_udp_listener_config(self, listener_id): """Gets the keepalivedlvs config :param listener_id: the id of the listener """ self._check_udp_listener_exists(listener_id) with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as file: cfg = file.read() resp = webob.Response(cfg, content_type='text/plain') return resp def manage_udp_listener(self, listener_id, action): action = action.lower() if action not in [consts.AMP_ACTION_START, consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD]: return webob.Response(json=dict( message='Invalid Request', details="Unknown action: {0}".format(action)), status=400) # When octavia requests a reload of keepalived, force a restart since # a keepalived reload doesn't restore members in their initial state. # # TODO(gthiemonge) remove this when keepalived>=2.0.14 is widely use if action == consts.AMP_ACTION_RELOAD: action = consts.AMP_ACTION_RESTART self._check_udp_listener_exists(listener_id) if action == consts.AMP_ACTION_RELOAD: if consts.OFFLINE == self._check_udp_listener_status(listener_id): action = consts.AMP_ACTION_START cmd = ("/usr/sbin/service " "octavia-keepalivedlvs-{listener_id} " "{action}".format(listener_id=listener_id, action=action)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.debug('Failed to %s keepalivedlvs listener %s', listener_id + ' : ' + action, str(e)) return webob.Response(json=dict( message=("Failed to {0} keepalivedlvs listener {1}" .format(action, listener_id)), details=e.output), status=500) return webob.Response( json=dict(message='OK', details='keepalivedlvs listener {listener_id} ' '{action}ed'.format(listener_id=listener_id, action=action)), status=202) def _check_udp_listener_status(self, listener_id): if os.path.exists(util.keepalived_lvs_pids_path(listener_id)[0]): if os.path.exists(os.path.join( '/proc', util.get_keepalivedlvs_pid(listener_id))): # Check if the listener is disabled with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as file: cfg = file.read() m = re.search('virtual_server', cfg) if m: return consts.ACTIVE return consts.OFFLINE return consts.ERROR return consts.OFFLINE def get_all_udp_listeners_status(self): """Gets the status of all UDP listeners Gets the status of all UDP listeners on the amphora. """ listeners = list() for udp_listener in util.get_udp_listeners(): status = self._check_udp_listener_status(udp_listener) listeners.append({ 'status': status, 'uuid': udp_listener, 'type': 'UDP', }) return listeners def delete_udp_listener(self, listener_id): try: self._check_udp_listener_exists(listener_id) except exceptions.HTTPException: return webob.Response(json={'message': 'OK'}) # check if that keepalived is still running and if stop it keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( listener_id) if os.path.exists(keepalived_pid) and os.path.exists( os.path.join('/proc', util.get_keepalivedlvs_pid(listener_id))): cmd = ("/usr/sbin/service " "octavia-keepalivedlvs-{0} stop".format(listener_id)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to stop keepalivedlvs service: %s", str(e)) return webob.Response(json=dict( message="Error stopping keepalivedlvs", details=e.output), status=500) # Since the lvs check script based on the keepalived pid file for # checking whether it is alived. So here, we had stop the keepalived # process by the previous step, must make sure the pid files are not # exist. if (os.path.exists(keepalived_pid) or os.path.exists(vrrp_pid) or os.path.exists(check_pid)): for pid in [keepalived_pid, vrrp_pid, check_pid]: os.remove(pid) # disable the service init_system = util.get_os_init_system() init_path = util.keepalived_lvs_init_path(init_system, listener_id) if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.DISABLE, "octavia-keepalivedlvs-%s" % str(listener_id)) elif init_system == consts.INIT_SYSVINIT: init_disable_cmd = "insserv -r {file}".format(file=init_path) elif init_system != consts.INIT_UPSTART: raise util.UnknownInitError() if init_system == consts.INIT_SYSVINIT: try: subprocess.check_output(init_disable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to disable " "octavia-keepalivedlvs-%(list)s service: " "%(err)s", {'list': listener_id, 'err': str(e)}) return webob.Response(json=dict( message=( "Error disabling octavia-keepalivedlvs-" "{0} service".format(listener_id)), details=e.output), status=500) # delete init script ,config file and log file for that listener if os.path.exists(init_path): os.remove(init_path) if os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): os.remove(util.keepalived_lvs_cfg_path(listener_id)) return webob.Response(json={'message': 'OK'}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/loadbalancer.py0000664000175000017500000004441100000000000026410 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import io import os import re import shutil import stat import subprocess import flask import jinja2 from oslo_config import cfg from oslo_log import log as logging import webob from werkzeug import exceptions from octavia.amphorae.backends.agent.api_server import haproxy_compatibility from octavia.amphorae.backends.agent.api_server import osutils from octavia.amphorae.backends.agent.api_server import util from octavia.common import constants as consts from octavia.common import utils as octavia_utils LOG = logging.getLogger(__name__) BUFFER = 100 CONF = cfg.CONF UPSTART_CONF = 'upstart.conf.j2' SYSVINIT_CONF = 'sysvinit.conf.j2' SYSTEMD_CONF = 'systemd.conf.j2' JINJA_ENV = jinja2.Environment( autoescape=True, loader=jinja2.FileSystemLoader(os.path.dirname( os.path.realpath(__file__) ) + consts.AGENT_API_TEMPLATES)) UPSTART_TEMPLATE = JINJA_ENV.get_template(UPSTART_CONF) SYSVINIT_TEMPLATE = JINJA_ENV.get_template(SYSVINIT_CONF) SYSTEMD_TEMPLATE = JINJA_ENV.get_template(SYSTEMD_CONF) # Wrap a stream so we can compute the md5 while reading class Wrapped(object): def __init__(self, stream_): self.stream = stream_ self.hash = hashlib.md5() # nosec def read(self, line): block = self.stream.read(line) if block: self.hash.update(block) return block def get_md5(self): return self.hash.hexdigest() def __getattr__(self, attr): return getattr(self.stream, attr) class Loadbalancer(object): def __init__(self): self._osutils = osutils.BaseOS.get_os_util() def get_haproxy_config(self, lb_id): """Gets the haproxy config :param listener_id: the id of the listener """ self._check_lb_exists(lb_id) with open(util.config_path(lb_id), 'r') as file: cfg = file.read() resp = webob.Response(cfg, content_type='text/plain') resp.headers['ETag'] = ( hashlib.md5(octavia_utils.b(cfg)).hexdigest()) # nosec return resp def upload_haproxy_config(self, amphora_id, lb_id): """Upload the haproxy config :param amphora_id: The id of the amphora to update :param lb_id: The id of the loadbalancer """ stream = Wrapped(flask.request.stream) # We have to hash here because HAProxy has a string length limitation # in the configuration file "peer " lines peer_name = octavia_utils.base64_sha1_string(amphora_id).rstrip('=') if not os.path.exists(util.haproxy_dir(lb_id)): os.makedirs(util.haproxy_dir(lb_id)) name = os.path.join(util.haproxy_dir(lb_id), 'haproxy.cfg.new') flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00600 mode = stat.S_IRUSR | stat.S_IWUSR b = stream.read(BUFFER) s_io = io.StringIO() while b: # Write haproxy configuration to StringIO s_io.write(b.decode('utf8')) b = stream.read(BUFFER) # Since haproxy user_group is now auto-detected by the amphora agent, # remove it from haproxy configuration in case it was provided # by an older Octavia controller. This is needed in order to prevent # a duplicate entry for 'group' in haproxy configuration, which will # result an error when haproxy starts. new_config = re.sub(r"\s+group\s.+", "", s_io.getvalue()) # Handle any haproxy version compatibility issues new_config = haproxy_compatibility.process_cfg_for_version_compat( new_config) with os.fdopen(os.open(name, flags, mode), 'w') as file: file.write(new_config) # use haproxy to check the config cmd = "haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format( config_file=name, peer=peer_name, haproxy_ug=consts.HAPROXY_USER_GROUP_CFG) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to verify haproxy file: %s %s", e, e.output) # Save the last config that failed validation for debugging os.rename(name, ''.join([name, '-failed'])) return webob.Response( json=dict(message="Invalid request", details=e.output), status=400) # file ok - move it os.rename(name, util.config_path(lb_id)) try: init_system = util.get_os_init_system() LOG.debug('Found init system: %s', init_system) init_path = util.init_path(lb_id, init_system) if init_system == consts.INIT_SYSTEMD: template = SYSTEMD_TEMPLATE # Render and install the network namespace systemd service util.install_netns_systemd_service() util.run_systemctl_command( consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX + '.service') elif init_system == consts.INIT_UPSTART: template = UPSTART_TEMPLATE elif init_system == consts.INIT_SYSVINIT: template = SYSVINIT_TEMPLATE init_enable_cmd = "insserv {file}".format(file=init_path) else: raise util.UnknownInitError() except util.UnknownInitError: LOG.error("Unknown init system found.") return webob.Response(json=dict( message="Unknown init system in amphora", details="The amphora image is running an unknown init " "system. We can't create the init configuration " "file for the load balancing process."), status=500) if init_system == consts.INIT_SYSTEMD: # mode 00644 mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) else: # mode 00755 mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) hap_major, hap_minor = haproxy_compatibility.get_haproxy_versions() if not os.path.exists(init_path): with os.fdopen(os.open(init_path, flags, mode), 'w') as text_file: text = template.render( peer_name=peer_name, haproxy_pid=util.pid_path(lb_id), haproxy_cmd=util.CONF.haproxy_amphora.haproxy_cmd, haproxy_cfg=util.config_path(lb_id), haproxy_user_group_cfg=consts.HAPROXY_USER_GROUP_CFG, respawn_count=util.CONF.haproxy_amphora.respawn_count, respawn_interval=(util.CONF.haproxy_amphora. respawn_interval), amphora_netns=consts.AMP_NETNS_SVC_PREFIX, amphora_nsname=consts.AMPHORA_NAMESPACE, HasIFUPAll=self._osutils.has_ifup_all(), haproxy_major_version=hap_major, haproxy_minor_version=hap_minor ) text_file.write(text) # Make sure the new service is enabled on boot if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.ENABLE, "haproxy-{lb_id}".format(lb_id=lb_id)) elif init_system == consts.INIT_SYSVINIT: try: subprocess.check_output(init_enable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to enable haproxy-%(lb_id)s service: " "%(err)s %(out)s", {'lb_id': lb_id, 'err': e, 'out': e.output}) return webob.Response(json=dict( message="Error enabling haproxy-{0} service".format( lb_id), details=e.output), status=500) res = webob.Response(json={'message': 'OK'}, status=202) res.headers['ETag'] = stream.get_md5() return res def start_stop_lb(self, lb_id, action): action = action.lower() if action not in [consts.AMP_ACTION_START, consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD]: return webob.Response(json=dict( message='Invalid Request', details="Unknown action: {0}".format(action)), status=400) self._check_lb_exists(lb_id) is_vrrp = (CONF.controller_worker.loadbalancer_topology == consts.TOPOLOGY_ACTIVE_STANDBY) if is_vrrp: util.vrrp_check_script_update(lb_id, action) # HAProxy does not start the process when given a reload # so start it if haproxy is not already running if action == consts.AMP_ACTION_RELOAD: if consts.OFFLINE == self._check_haproxy_status(lb_id): action = consts.AMP_ACTION_START cmd = ("/usr/sbin/service haproxy-{lb_id} {action}".format( lb_id=lb_id, action=action)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if b'Job is already running' not in e.output: LOG.debug( "Failed to %(action)s haproxy-%(lb_id)s service: %(err)s " "%(out)s", {'action': action, 'lb_id': lb_id, 'err': e, 'out': e.output}) return webob.Response(json=dict( message="Error {0}ing haproxy".format(action), details=e.output), status=500) # If we are not in active/standby we need to send an IP # advertisement (GARP or NA). Keepalived handles this for # active/standby load balancers. if not is_vrrp and action in [consts.AMP_ACTION_START, consts.AMP_ACTION_RELOAD]: util.send_vip_advertisements(lb_id) if action in [consts.AMP_ACTION_STOP, consts.AMP_ACTION_RELOAD]: return webob.Response(json=dict( message='OK', details='Listener {lb_id} {action}ed'.format( lb_id=lb_id, action=action)), status=202) details = ( 'Configuration file is valid\n' 'haproxy daemon for {0} started'.format(lb_id) ) return webob.Response(json=dict(message='OK', details=details), status=202) def delete_lb(self, lb_id): try: self._check_lb_exists(lb_id) except exceptions.HTTPException: return webob.Response(json={'message': 'OK'}) # check if that haproxy is still running and if stop it if os.path.exists(util.pid_path(lb_id)) and os.path.exists( os.path.join('/proc', util.get_haproxy_pid(lb_id))): cmd = "/usr/sbin/service haproxy-{0} stop".format(lb_id) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to stop haproxy-%s service: %s %s", lb_id, e, e.output) return webob.Response(json=dict( message="Error stopping haproxy", details=e.output), status=500) # parse config and delete stats socket try: stats_socket = util.parse_haproxy_file(lb_id)[0] os.remove(stats_socket) except Exception: pass # Since this script should be deleted at LB delete time # we can check for this path to see if VRRP is enabled # on this amphora and not write the file if VRRP is not in use if os.path.exists(util.keepalived_check_script_path()): util.vrrp_check_script_update( lb_id, action=consts.AMP_ACTION_STOP) # delete the ssl files try: shutil.rmtree(self._cert_dir(lb_id)) except Exception: pass # disable the service init_system = util.get_os_init_system() init_path = util.init_path(lb_id, init_system) if init_system == consts.INIT_SYSTEMD: util.run_systemctl_command( consts.DISABLE, "haproxy-{lb_id}".format( lb_id=lb_id)) elif init_system == consts.INIT_SYSVINIT: init_disable_cmd = "insserv -r {file}".format(file=init_path) elif init_system != consts.INIT_UPSTART: raise util.UnknownInitError() if init_system == consts.INIT_SYSVINIT: try: subprocess.check_output(init_disable_cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to disable haproxy-%(lb_id)s service: " "%(err)s %(out)s", {'lb_id': lb_id, 'err': e, 'out': e.output}) return webob.Response(json=dict( message="Error disabling haproxy-{0} service".format( lb_id), details=e.output), status=500) # delete the directory + init script for that listener shutil.rmtree(util.haproxy_dir(lb_id)) if os.path.exists(init_path): os.remove(init_path) return webob.Response(json={'message': 'OK'}) def get_all_listeners_status(self, other_listeners=None): """Gets the status of all listeners This method will not consult the stats socket so a listener might show as ACTIVE but still be in ERROR Currently type==SSL is also not detected """ listeners = list() for lb in util.get_loadbalancers(): stats_socket, listeners_on_lb = util.parse_haproxy_file(lb) for listener_id, listener in listeners_on_lb.items(): listeners.append({ 'status': consts.ACTIVE, 'uuid': listener_id, 'type': listener['mode'], }) if other_listeners: listeners = listeners + other_listeners return webob.Response(json=listeners, content_type='application/json') def upload_certificate(self, lb_id, filename): self._check_ssl_filename_format(filename) # create directory if not already there if not os.path.exists(self._cert_dir(lb_id)): os.makedirs(self._cert_dir(lb_id)) stream = Wrapped(flask.request.stream) file = self._cert_file_path(lb_id, filename) flags = os.O_WRONLY | os.O_CREAT # mode 00600 mode = stat.S_IRUSR | stat.S_IWUSR with os.fdopen(os.open(file, flags, mode), 'wb') as crt_file: b = stream.read(BUFFER) while b: crt_file.write(b) b = stream.read(BUFFER) resp = webob.Response(json=dict(message='OK')) resp.headers['ETag'] = stream.get_md5() return resp def get_certificate_md5(self, lb_id, filename): self._check_ssl_filename_format(filename) cert_path = self._cert_file_path(lb_id, filename) path_exists = os.path.exists(cert_path) if not path_exists: return webob.Response(json=dict( message='Certificate Not Found', details="No certificate with filename: {f}".format( f=filename)), status=404) with open(cert_path, 'r') as crt_file: cert = crt_file.read() md5 = hashlib.md5(octavia_utils.b(cert)).hexdigest() # nosec resp = webob.Response(json=dict(md5sum=md5)) resp.headers['ETag'] = md5 return resp def delete_certificate(self, lb_id, filename): self._check_ssl_filename_format(filename) if os.path.exists(self._cert_file_path(lb_id, filename)): os.remove(self._cert_file_path(lb_id, filename)) return webob.Response(json=dict(message='OK')) def _get_listeners_on_lb(self, lb_id): if os.path.exists(util.pid_path(lb_id)): if os.path.exists( os.path.join('/proc', util.get_haproxy_pid(lb_id))): # Check if the listener is disabled with open(util.config_path(lb_id), 'r') as file: cfg = file.read() m = re.findall('^frontend (.*)$', cfg, re.MULTILINE) return m or [] else: # pid file but no process... return [] else: return [] def _check_lb_exists(self, lb_id): # check if we know about that lb if lb_id not in util.get_loadbalancers(): raise exceptions.HTTPException( response=webob.Response(json=dict( message='Loadbalancer Not Found', details="No loadbalancer with UUID: {0}".format( lb_id)), status=404)) def _check_ssl_filename_format(self, filename): # check if the format is (xxx.)*xxx.pem if not re.search(r'(\w.)+pem', filename): raise exceptions.HTTPException( response=webob.Response(json=dict( message='Filename has wrong format'), status=400)) def _cert_dir(self, lb_id): return os.path.join(util.CONF.haproxy_amphora.base_cert_dir, lb_id) def _cert_file_path(self, lb_id, filename): return os.path.join(self._cert_dir(lb_id), filename) def _check_haproxy_status(self, lb_id): if os.path.exists(util.pid_path(lb_id)): if os.path.exists( os.path.join('/proc', util.get_haproxy_pid(lb_id))): return consts.ACTIVE return consts.OFFLINE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/osutils.py0000664000175000017500000005777600000000000025525 0ustar00zuulzuul00000000000000# Copyright 2017 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import ipaddress import os import shutil import stat import subprocess import distro import jinja2 from oslo_config import cfg from oslo_log import log as logging import webob from werkzeug import exceptions from octavia.common import constants as consts from octavia.common import exceptions as octavia_exceptions from octavia.common import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader( os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES)) class BaseOS(object): def __init__(self, os_name): self.os_name = os_name self.package_name_map = {} @classmethod def _get_subclasses(cls): for subclass in cls.__subclasses__(): for sc in subclass._get_subclasses(): yield sc yield subclass @classmethod def get_os_util(cls): os_name = distro.id() for subclass in cls._get_subclasses(): if subclass.is_os_name(os_name): return subclass(os_name) raise octavia_exceptions.InvalidAmphoraOperatingSystem(os_name=os_name) def _map_package_name(self, package_name): return self.package_name_map.get(package_name, package_name) def get_network_interface_file(self, interface): if CONF.amphora_agent.agent_server_network_file: return CONF.amphora_agent.agent_server_network_file if CONF.amphora_agent.agent_server_network_dir: return os.path.join(CONF.amphora_agent.agent_server_network_dir, interface) network_dir = consts.UBUNTU_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE) return os.path.join(network_dir, interface) def create_netns_dir(self, network_dir, netns_network_dir, ignore=None): # We need to setup the netns network directory so that the ifup # commands used here and in the startup scripts "sees" the right # interfaces and scripts. try: os.makedirs('/etc/netns/' + consts.AMPHORA_NAMESPACE) shutil.copytree( network_dir, '/etc/netns/{netns}/{net_dir}'.format( netns=consts.AMPHORA_NAMESPACE, net_dir=netns_network_dir), symlinks=True, ignore=ignore) except OSError as e: # Raise the error if it's not "File exists" otherwise pass if e.errno != errno.EEXIST: raise def write_vip_interface_file(self, interface_file_path, primary_interface, vip, ip, broadcast, netmask, gateway, mtu, vrrp_ip, vrrp_version, render_host_routes, template_vip): # write interface file mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH # If we are using a consolidated interfaces file, just append # otherwise clear the per interface file as we are rewriting it # TODO(johnsom): We need a way to clean out old interfaces records if CONF.amphora_agent.agent_server_network_file: flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND else: flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC with os.fdopen(os.open(interface_file_path, flags, mode), 'w') as text_file: text = template_vip.render( consts=consts, interface=primary_interface, vip=vip, vip_ipv6=ip.version == 6, # For ipv6 the netmask is already the prefix prefix=(netmask if ip.version == 6 else utils.netmask_to_prefix(netmask)), broadcast=broadcast, netmask=netmask, gateway=gateway, network=utils.ip_netmask_to_cidr(vip, netmask), mtu=mtu, vrrp_ip=vrrp_ip, vrrp_ipv6=vrrp_version == 6, host_routes=render_host_routes, topology=CONF.controller_worker.loadbalancer_topology, ) text_file.write(text) def write_port_interface_file(self, netns_interface, fixed_ips, mtu, interface_file_path, template_port): # write interface file # If we are using a consolidated interfaces file, just append # otherwise clear the per interface file as we are rewriting it # TODO(johnsom): We need a way to clean out old interfaces records if CONF.amphora_agent.agent_server_network_file: flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND else: flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(interface_file_path, flags, mode), 'w') as text_file: text = self._generate_network_file_text( netns_interface, fixed_ips, mtu, template_port) text_file.write(text) @classmethod def _generate_network_file_text(cls, netns_interface, fixed_ips, mtu, template_port): text = '' if fixed_ips is None: text = template_port.render(interface=netns_interface) else: for index, fixed_ip in enumerate(fixed_ips, -1): try: ip_addr = fixed_ip['ip_address'] cidr = fixed_ip['subnet_cidr'] ip = ipaddress.ip_address(ip_addr) network = ipaddress.ip_network(cidr) broadcast = network.broadcast_address.exploded netmask = (network.prefixlen if ip.version == 6 else network.netmask.exploded) host_routes = cls.get_host_routes(fixed_ip) except ValueError: return webob.Response( json=dict(message="Invalid network IP"), status=400) new_text = template_port.render(interface=netns_interface, ipv6=ip.version == 6, ip_address=ip.exploded, broadcast=broadcast, netmask=netmask, mtu=mtu, host_routes=host_routes) text = '\n'.join([text, new_text]) return text @classmethod def get_host_routes(cls, fixed_ip): host_routes = [] for hr in fixed_ip.get('host_routes', []): network = ipaddress.ip_network(hr['destination']) host_routes.append({'network': network, 'gw': hr['nexthop']}) return host_routes @classmethod def _bring_if_up(cls, interface, what, flush=True): # Note, we are not using pyroute2 for this as it is not /etc/netns # aware. # Work around for bug: # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845121 int_up = "ip netns exec {ns} ip link set {int} up".format( ns=consts.AMPHORA_NAMESPACE, int=interface) addr_flush = "ip netns exec {ns} ip addr flush {int}".format( ns=consts.AMPHORA_NAMESPACE, int=interface) cmd = ("ip netns exec {ns} ifup {params}".format( ns=consts.AMPHORA_NAMESPACE, params=interface)) try: out = subprocess.check_output(int_up.split(), stderr=subprocess.STDOUT) LOG.debug(out) if flush: out = subprocess.check_output(addr_flush.split(), stderr=subprocess.STDOUT) LOG.debug(out) out = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) LOG.debug(out) except subprocess.CalledProcessError as e: LOG.error('Failed to ifup %s due to error: %s %s', interface, e, e.output) raise exceptions.HTTPException( response=webob.Response(json=dict( message='Error plugging {0}'.format(what), details=e.output), status=500)) @classmethod def _bring_if_down(cls, interface): # Note, we are not using pyroute2 for this as it is not /etc/netns # aware. cmd = ("ip netns exec {ns} ifdown {params}".format( ns=consts.AMPHORA_NAMESPACE, params=interface)) try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.info('Ignoring failure to ifdown %s due to error: %s %s', interface, e, e.output) @classmethod def bring_interfaces_up(cls, ip, primary_interface, secondary_interface): cls._bring_if_down(primary_interface) if secondary_interface: cls._bring_if_down(secondary_interface) cls._bring_if_up(primary_interface, 'VIP') if secondary_interface: cls._bring_if_up(secondary_interface, 'VIP', flush=False) def has_ifup_all(self): return True class Ubuntu(BaseOS): ETH_X_PORT_CONF = 'plug_port_ethX.conf.j2' ETH_X_VIP_CONF = 'plug_vip_ethX.conf.j2' @classmethod def is_os_name(cls, os_name): return os_name in ['ubuntu'] def cmd_get_version_of_installed_package(self, package_name): name = self._map_package_name(package_name) return "dpkg-query -W -f=${{Version}} {name}".format(name=name) def get_network_interface_file(self, interface): if CONF.amphora_agent.agent_server_network_file: return CONF.amphora_agent.agent_server_network_file if CONF.amphora_agent.agent_server_network_dir: return os.path.join(CONF.amphora_agent.agent_server_network_dir, interface + '.cfg') network_dir = consts.UBUNTU_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE) return os.path.join(network_dir, interface + '.cfg') def get_network_path(self): return '/etc/network' def get_netns_network_dir(self): network_dir = self.get_network_path() return os.path.basename(network_dir) def create_netns_dir( self, network_dir=None, netns_network_dir=None, ignore=None): if not netns_network_dir: netns_network_dir = self.get_netns_network_dir() if not network_dir: network_dir = self.get_network_path() if not ignore: ignore = shutil.ignore_patterns('eth0*', 'openssh*') super(Ubuntu, self).create_netns_dir( network_dir, netns_network_dir, ignore) def write_interfaces_file(self): name = '/etc/netns/{}/network/interfaces'.format( consts.AMPHORA_NAMESPACE) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(name, flags, mode), 'w') as int_file: int_file.write('auto lo\n') int_file.write('iface lo inet loopback\n') if not CONF.amphora_agent.agent_server_network_file: int_file.write('source /etc/netns/{}/network/' 'interfaces.d/*.cfg\n'.format( consts.AMPHORA_NAMESPACE)) def write_vip_interface_file(self, interface_file_path, primary_interface, vip, ip, broadcast, netmask, gateway, mtu, vrrp_ip, vrrp_version, render_host_routes, template_vip=None): if not template_vip: template_vip = j2_env.get_template(self.ETH_X_VIP_CONF) super(Ubuntu, self).write_vip_interface_file( interface_file_path, primary_interface, vip, ip, broadcast, netmask, gateway, mtu, vrrp_ip, vrrp_version, render_host_routes, template_vip) def write_port_interface_file(self, netns_interface, fixed_ips, mtu, interface_file_path=None, template_port=None): if not interface_file_path: interface_file_path = self.get_network_interface_file( netns_interface) if not template_port: template_port = j2_env.get_template(self.ETH_X_PORT_CONF) super(Ubuntu, self).write_port_interface_file( netns_interface, fixed_ips, mtu, interface_file_path, template_port) def has_ifup_all(self): return True class RH(BaseOS): ETH_X_PORT_CONF = 'rh_plug_port_ethX.conf.j2' ETH_X_VIP_CONF = 'rh_plug_vip_ethX.conf.j2' ETH_X_ALIAS_VIP_CONF = 'rh_plug_vip_ethX_alias.conf.j2' ROUTE_ETH_X_CONF = 'rh_route_ethX.conf.j2' RULE_ETH_X_CONF = 'rh_rule_ethX.conf.j2' # The reason of make them as jinja templates is the current scripts force # to add the iptables, so leave it now for future extending if possible. ETH_IFUP_LOCAL_SCRIPT = 'rh_plug_port_eth_ifup_local.conf.j2' ETH_IFDOWN_LOCAL_SCRIPT = 'rh_plug_port_eth_ifdown_local.conf.j2' @classmethod def is_os_name(cls, os_name): return os_name in ['fedora', 'rhel'] def cmd_get_version_of_installed_package(self, package_name): name = self._map_package_name(package_name) return "rpm -q --queryformat %{{VERSION}} {name}".format(name=name) @staticmethod def _get_network_interface_file(prefix, interface): if CONF.amphora_agent.agent_server_network_file: return CONF.amphora_agent.agent_server_network_file if CONF.amphora_agent.agent_server_network_dir: network_dir = CONF.amphora_agent.agent_server_network_dir else: network_dir = consts.RH_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE) return os.path.join(network_dir, prefix + interface) def get_network_interface_file(self, interface): return self._get_network_interface_file('ifcfg-', interface) def get_alias_network_interface_file(self, interface): return self.get_network_interface_file(interface + ':0') def get_static_routes_interface_file(self, interface, version): route = 'route6-' if version == 6 else 'route-' return self._get_network_interface_file(route, interface) def get_route_rules_interface_file(self, interface, version): rule = 'rule6-' if version == 6 else 'rule-' return self._get_network_interface_file(rule, interface) def get_network_path(self): return '/etc/sysconfig/network-scripts' def get_netns_network_dir(self): network_full_path = self.get_network_path() network_basename = os.path.basename(network_full_path) network_dirname = os.path.dirname(network_full_path) network_prefixdir = os.path.basename(network_dirname) return os.path.join(network_prefixdir, network_basename) def create_netns_dir( self, network_dir=None, netns_network_dir=None, ignore=None): if not netns_network_dir: netns_network_dir = self.get_netns_network_dir() if not network_dir: network_dir = self.get_network_path() if not ignore: ignore = shutil.ignore_patterns('ifcfg-eth0*') super(RH, self).create_netns_dir( network_dir, netns_network_dir, ignore) # Copy /etc/sysconfig/network file src = '/etc/sysconfig/network' dst = '/etc/netns/{netns}/sysconfig'.format( netns=consts.AMPHORA_NAMESPACE) shutil.copy2(src, dst) def write_interfaces_file(self): # No interfaces file in RH based flavors return def write_vip_interface_file(self, interface_file_path, primary_interface, vip, ip, broadcast, netmask, gateway, mtu, vrrp_ip, vrrp_version, render_host_routes, template_vip=None): if not template_vip: template_vip = j2_env.get_template(self.ETH_X_VIP_CONF) super(RH, self).write_vip_interface_file( interface_file_path, primary_interface, vip, ip, broadcast, netmask, gateway, mtu, vrrp_ip, vrrp_version, render_host_routes, template_vip) # keepalived will handle the VIP if we are on active/standby if (ip.version == 4 and CONF.controller_worker.loadbalancer_topology == consts.TOPOLOGY_SINGLE): # Create an IPv4 alias interface, needed in RH based flavors alias_interface_file_path = self.get_alias_network_interface_file( primary_interface) template_vip_alias = j2_env.get_template(self.ETH_X_ALIAS_VIP_CONF) super(RH, self).write_vip_interface_file( alias_interface_file_path, primary_interface, vip, ip, broadcast, netmask, gateway, mtu, vrrp_ip, vrrp_version, render_host_routes, template_vip_alias) routes_interface_file_path = ( self.get_static_routes_interface_file(primary_interface, ip.version)) template_routes = j2_env.get_template(self.ROUTE_ETH_X_CONF) self.write_static_routes_interface_file( routes_interface_file_path, primary_interface, render_host_routes, template_routes, gateway, vip, netmask) # keepalived will handle the rule(s) if we are on actvie/standby if (CONF.controller_worker.loadbalancer_topology == consts.TOPOLOGY_SINGLE): route_rules_interface_file_path = ( self.get_route_rules_interface_file(primary_interface, ip.version)) template_rules = j2_env.get_template(self.RULE_ETH_X_CONF) self.write_static_routes_interface_file( route_rules_interface_file_path, primary_interface, render_host_routes, template_rules, gateway, vip, netmask) self._write_ifup_ifdown_local_scripts_if_possible() def write_static_routes_interface_file(self, interface_file_path, interface, host_routes, template_routes, gateway, vip, netmask): # write static routes interface file mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH # TODO(johnsom): We need a way to clean out old interfaces records if CONF.amphora_agent.agent_server_network_file: flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND else: flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC with os.fdopen(os.open(interface_file_path, flags, mode), 'w') as text_file: text = template_routes.render( consts=consts, interface=interface, host_routes=host_routes, gateway=gateway, network=utils.ip_netmask_to_cidr(vip, netmask), vip=vip, topology=CONF.controller_worker.loadbalancer_topology, ) text_file.write(text) def write_port_interface_file(self, netns_interface, fixed_ips, mtu, interface_file_path=None, template_port=None): if not interface_file_path: interface_file_path = self.get_network_interface_file( netns_interface) if not template_port: template_port = j2_env.get_template(self.ETH_X_PORT_CONF) super(RH, self).write_port_interface_file( netns_interface, fixed_ips, mtu, interface_file_path, template_port) if fixed_ips: host_routes = [] host_routes_ipv6 = [] for fixed_ip in fixed_ips: ip_addr = fixed_ip['ip_address'] ip = ipaddress.ip_address(ip_addr) if ip.version == 6: host_routes_ipv6.extend(self.get_host_routes(fixed_ip)) else: host_routes.extend(self.get_host_routes(fixed_ip)) routes_interface_file_path = ( self.get_static_routes_interface_file(netns_interface, 4)) template_routes = j2_env.get_template(self.ROUTE_ETH_X_CONF) self.write_static_routes_interface_file( routes_interface_file_path, netns_interface, host_routes, template_routes, None, None, None) routes_interface_file_path_ipv6 = ( self.get_static_routes_interface_file(netns_interface, 6)) template_routes = j2_env.get_template(self.ROUTE_ETH_X_CONF) self.write_static_routes_interface_file( routes_interface_file_path_ipv6, netns_interface, host_routes_ipv6, template_routes, None, None, None) self._write_ifup_ifdown_local_scripts_if_possible() @classmethod def bring_interfaces_up(cls, ip, primary_interface, secondary_interface): if ip.version == 4: super(RH, cls).bring_interfaces_up( ip, primary_interface, secondary_interface) else: # Secondary interface is not present in IPv6 configuration cls._bring_if_down(primary_interface) cls._bring_if_up(primary_interface, 'VIP') def has_ifup_all(self): return False def _write_ifup_ifdown_local_scripts_if_possible(self): if self._check_ifup_ifdown_local_scripts_exists(): template_ifup_local = j2_env.get_template( self.ETH_IFUP_LOCAL_SCRIPT) self.write_port_interface_if_local_scripts(template_ifup_local) template_ifdown_local = j2_env.get_template( self.ETH_IFDOWN_LOCAL_SCRIPT) self.write_port_interface_if_local_scripts(template_ifdown_local, ifup=False) def _check_ifup_ifdown_local_scripts_exists(self): file_names = ['ifup-local', 'ifdown-local'] target_dir = '/sbin/' res = [] for file_name in file_names: if os.path.exists(os.path.join(target_dir, file_name)): res.append(True) else: res.append(False) # This means we only add the scripts when both of them are non-exists return not any(res) def write_port_interface_if_local_scripts( self, template_script, ifup=True): file_name = 'ifup' + '-local' mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC if not ifup: file_name = 'ifdown' + '-local' with os.fdopen( os.open(os.path.join( '/sbin/', file_name), flags, mode), 'w') as text_file: text = template_script.render() text_file.write(text) os.chmod(os.path.join('/sbin/', file_name), stat.S_IEXEC) class CentOS(RH): def __init__(self, os_name): super(CentOS, self).__init__(os_name) if distro.version() == '7': self.package_name_map.update({'haproxy': 'haproxy18'}) @classmethod def is_os_name(cls, os_name): return os_name in ['centos'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/plug.py0000664000175000017500000002465200000000000024755 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import os import socket import stat import subprocess from oslo_config import cfg from oslo_log import log as logging import pyroute2 import webob from werkzeug import exceptions from octavia.common import constants as consts CONF = cfg.CONF ETH_X_VIP_CONF = 'plug_vip_ethX.conf.j2' ETH_X_PORT_CONF = 'plug_port_ethX.conf.j2' LOG = logging.getLogger(__name__) class Plug(object): def __init__(self, osutils): self._osutils = osutils def plug_vip(self, vip, subnet_cidr, gateway, mac_address, mtu=None, vrrp_ip=None, host_routes=None): # Validate vip and subnet_cidr, calculate broadcast address and netmask try: render_host_routes = [] ip = ipaddress.ip_address(vip) network = ipaddress.ip_network(subnet_cidr) vip = ip.exploded broadcast = network.broadcast_address.exploded netmask = (network.prefixlen if ip.version == 6 else network.netmask.exploded) vrrp_version = None if vrrp_ip: vrrp_ip_obj = ipaddress.ip_address(vrrp_ip) vrrp_version = vrrp_ip_obj.version if host_routes: for hr in host_routes: network = ipaddress.ip_network(hr['destination']) render_host_routes.append({'network': network, 'gw': hr['nexthop']}) except ValueError: return webob.Response(json=dict(message="Invalid VIP"), status=400) # Check if the interface is already in the network namespace # Do not attempt to re-plug the VIP if it is already in the # network namespace if self._netns_interface_exists(mac_address): return webob.Response( json=dict(message="Interface already exists"), status=409) # Check that the interface has been fully plugged self._interface_by_mac(mac_address) # Always put the VIP interface as eth1 primary_interface = consts.NETNS_PRIMARY_INTERFACE secondary_interface = "{interface}:0".format( interface=primary_interface) interface_file_path = self._osutils.get_network_interface_file( primary_interface) self._osutils.create_netns_dir() self._osutils.write_interfaces_file() self._osutils.write_vip_interface_file( interface_file_path=interface_file_path, primary_interface=primary_interface, vip=vip, ip=ip, broadcast=broadcast, netmask=netmask, gateway=gateway, mtu=mtu, vrrp_ip=vrrp_ip, vrrp_version=vrrp_version, render_host_routes=render_host_routes) # Update the list of interfaces to add to the namespace # This is used in the amphora reboot case to re-establish the namespace self._update_plugged_interfaces_file(primary_interface, mac_address) # Create the namespace netns = pyroute2.NetNS(consts.AMPHORA_NAMESPACE, flags=os.O_CREAT) netns.close() # Load sysctl in new namespace sysctl = pyroute2.NSPopen(consts.AMPHORA_NAMESPACE, [consts.SYSCTL_CMD, '--system'], stdout=subprocess.PIPE) sysctl.communicate() sysctl.wait() sysctl.release() cmd_list = [['modprobe', 'ip_vs'], [consts.SYSCTL_CMD, '-w', 'net.ipv4.vs.conntrack=1']] if ip.version == 4: # For lvs function, enable ip_vs kernel module, enable ip_forward # conntrack in amphora network namespace. cmd_list.append([consts.SYSCTL_CMD, '-w', 'net.ipv4.ip_forward=1']) elif ip.version == 6: cmd_list.append([consts.SYSCTL_CMD, '-w', 'net.ipv6.conf.all.forwarding=1']) for cmd in cmd_list: ns_exec = pyroute2.NSPopen(consts.AMPHORA_NAMESPACE, cmd, stdout=subprocess.PIPE) ns_exec.wait() ns_exec.release() with pyroute2.IPRoute() as ipr: # Move the interfaces into the namespace idx = ipr.link_lookup(address=mac_address)[0] ipr.link('set', index=idx, net_ns_fd=consts.AMPHORA_NAMESPACE, IFLA_IFNAME=primary_interface) # In an ha amphora, keepalived should bring the VIP interface up if (CONF.controller_worker.loadbalancer_topology == consts.TOPOLOGY_ACTIVE_STANDBY): secondary_interface = None # bring interfaces up self._osutils.bring_interfaces_up( ip, primary_interface, secondary_interface) return webob.Response(json=dict( message="OK", details="VIP {vip} plugged on interface {interface}".format( vip=vip, interface=primary_interface)), status=202) def _check_ip_addresses(self, fixed_ips): if fixed_ips: for ip in fixed_ips: try: socket.inet_pton(socket.AF_INET, ip.get('ip_address')) except socket.error: socket.inet_pton(socket.AF_INET6, ip.get('ip_address')) def plug_network(self, mac_address, fixed_ips, mtu=None): # Check if the interface is already in the network namespace # Do not attempt to re-plug the network if it is already in the # network namespace if self._netns_interface_exists(mac_address): return webob.Response(json=dict( message="Interface already exists"), status=409) # This is the interface as it was initially plugged into the # default network namespace, this will likely always be eth1 try: self._check_ip_addresses(fixed_ips=fixed_ips) except socket.error: return webob.Response(json=dict( message="Invalid network port"), status=400) default_netns_interface = self._interface_by_mac(mac_address) # We need to determine the interface name when inside the namespace # to avoid name conflicts with pyroute2.NetNS(consts.AMPHORA_NAMESPACE, flags=os.O_CREAT) as netns: # 1 means just loopback, but we should already have a VIP. This # works for the add/delete/add case as we don't delete interfaces # Note, eth0 is skipped because that is the VIP interface netns_interface = 'eth{0}'.format(len(netns.get_links())) LOG.info('Plugged interface %s will become %s in the namespace %s', default_netns_interface, netns_interface, consts.AMPHORA_NAMESPACE) interface_file_path = self._osutils.get_network_interface_file( netns_interface) self._osutils.write_port_interface_file( netns_interface=netns_interface, fixed_ips=fixed_ips, mtu=mtu, interface_file_path=interface_file_path) # Update the list of interfaces to add to the namespace self._update_plugged_interfaces_file(netns_interface, mac_address) with pyroute2.IPRoute() as ipr: # Move the interfaces into the namespace idx = ipr.link_lookup(address=mac_address)[0] ipr.link('set', index=idx, net_ns_fd=consts.AMPHORA_NAMESPACE, IFLA_IFNAME=netns_interface) self._osutils._bring_if_down(netns_interface) self._osutils._bring_if_up(netns_interface, 'network') return webob.Response(json=dict( message="OK", details="Plugged on interface {interface}".format( interface=netns_interface)), status=202) def _interface_by_mac(self, mac): try: with pyroute2.IPRoute() as ipr: idx = ipr.link_lookup(address=mac)[0] addr = ipr.get_links(idx)[0] for attr in addr['attrs']: if attr[0] == 'IFLA_IFNAME': return attr[1] except Exception as e: LOG.info('Unable to find interface with MAC: %s, rescanning ' 'and returning 404. Reported error: %s', mac, str(e)) # Poke the kernel to re-enumerate the PCI bus. # We have had cases where nova hot plugs the interface but # the kernel doesn't get the memo. filename = '/sys/bus/pci/rescan' flags = os.O_WRONLY if os.path.isfile(filename): with os.fdopen(os.open(filename, flags), 'w') as rescan_file: rescan_file.write('1') raise exceptions.HTTPException( response=webob.Response(json=dict( details="No suitable network interface found"), status=404)) def _update_plugged_interfaces_file(self, interface, mac_address): # write interfaces to plugged_interfaces file and prevent duplicates plug_inf_file = consts.PLUGGED_INTERFACES flags = os.O_RDWR | os.O_CREAT # mode 0644 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH with os.fdopen(os.open(plug_inf_file, flags, mode), 'r+') as text_file: inf_list = [inf.split()[0].rstrip() for inf in text_file] if mac_address not in inf_list: text_file.write("{mac_address} {interface}\n".format( mac_address=mac_address, interface=interface)) def _netns_interface_exists(self, mac_address): with pyroute2.NetNS(consts.AMPHORA_NAMESPACE, flags=os.O_CREAT) as netns: for link in netns.get_links(): for attr in link['attrs']: if attr[0] == 'IFLA_ADDRESS' and attr[1] == mac_address: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/server.py0000664000175000017500000002550100000000000025306 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat import flask from oslo_config import cfg from oslo_log import log as logging import webob from werkzeug import exceptions from octavia.amphorae.backends.agent import api_server from octavia.amphorae.backends.agent.api_server import amphora_info from octavia.amphorae.backends.agent.api_server import certificate_update from octavia.amphorae.backends.agent.api_server import keepalived from octavia.amphorae.backends.agent.api_server import loadbalancer from octavia.amphorae.backends.agent.api_server import osutils from octavia.amphorae.backends.agent.api_server import plug from octavia.amphorae.backends.agent.api_server import udp_listener_base from octavia.amphorae.backends.agent.api_server import util BUFFER = 1024 CONF = cfg.CONF PATH_PREFIX = '/' + api_server.VERSION LOG = logging.getLogger(__name__) # make the error pages all json def make_json_error(ex): code = ex.code if isinstance(ex, exceptions.HTTPException) else 500 response = webob.Response(json={'error': str(ex), 'http_code': code}) response.status_code = code return response def register_app_error_handler(app): for code in exceptions.default_exceptions: app.register_error_handler(code, make_json_error) class Server(object): def __init__(self): self.app = flask.Flask(__name__) self._osutils = osutils.BaseOS.get_os_util() self._keepalived = keepalived.Keepalived() self._loadbalancer = loadbalancer.Loadbalancer() self._udp_listener = (udp_listener_base.UdpListenerApiServerBase. get_server_driver()) self._plug = plug.Plug(self._osutils) self._amphora_info = amphora_info.AmphoraInfo(self._osutils) register_app_error_handler(self.app) self.app.add_url_rule(rule='/', view_func=self.version_discovery, methods=['GET']) self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer///haproxy', view_func=self.upload_haproxy_config, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/listeners//' '/udp_listener', view_func=self.upload_udp_listener_config, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer//haproxy', view_func=self.get_haproxy_config, methods=['GET']) self.app.add_url_rule(rule=PATH_PREFIX + '/listeners//udp_listener', view_func=self.get_udp_listener_config, methods=['GET']) self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer//', view_func=self.start_stop_lb_object, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/listeners/', view_func=self.delete_lb_object, methods=['DELETE']) self.app.add_url_rule(rule=PATH_PREFIX + '/config', view_func=self.upload_config, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/details', view_func=self.get_details, methods=['GET']) self.app.add_url_rule(rule=PATH_PREFIX + '/info', view_func=self.get_info, methods=['GET']) self.app.add_url_rule(rule=PATH_PREFIX + '/listeners', view_func=self.get_all_listeners_status, methods=['GET']) self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer/' '/certificates/', view_func=self.upload_certificate, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer/' '/certificates/', view_func=self.get_certificate_md5, methods=['GET']) self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer/' '/certificates/', view_func=self.delete_certificate, methods=['DELETE']) self.app.add_url_rule(rule=PATH_PREFIX + '/plug/vip/', view_func=self.plug_vip, methods=['POST']) self.app.add_url_rule(rule=PATH_PREFIX + '/plug/network', view_func=self.plug_network, methods=['POST']) self.app.add_url_rule(rule=PATH_PREFIX + '/certificate', view_func=self.upload_cert, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/vrrp/upload', view_func=self.upload_vrrp_config, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/vrrp/', view_func=self.manage_service_vrrp, methods=['PUT']) self.app.add_url_rule(rule=PATH_PREFIX + '/interface/', view_func=self.get_interface, methods=['GET']) def upload_haproxy_config(self, amphora_id, lb_id): return self._loadbalancer.upload_haproxy_config(amphora_id, lb_id) def upload_udp_listener_config(self, amphora_id, listener_id): return self._udp_listener.upload_udp_listener_config(listener_id) def get_haproxy_config(self, lb_id): return self._loadbalancer.get_haproxy_config(lb_id) def get_udp_listener_config(self, listener_id): return self._udp_listener.get_udp_listener_config(listener_id) def start_stop_lb_object(self, object_id, action): protocol = util.get_protocol_for_lb_object(object_id) if protocol == 'UDP': return self._udp_listener.manage_udp_listener( listener_id=object_id, action=action) return self._loadbalancer.start_stop_lb(lb_id=object_id, action=action) def delete_lb_object(self, object_id): protocol = util.get_protocol_for_lb_object(object_id) if protocol == 'UDP': return self._udp_listener.delete_udp_listener(object_id) return self._loadbalancer.delete_lb(object_id) def get_details(self): return self._amphora_info.compile_amphora_details( extend_udp_driver=self._udp_listener) def get_info(self): return self._amphora_info.compile_amphora_info( extend_udp_driver=self._udp_listener) def get_all_listeners_status(self): udp_listeners = self._udp_listener.get_all_udp_listeners_status() return self._loadbalancer.get_all_listeners_status( other_listeners=udp_listeners) def upload_certificate(self, lb_id, filename): return self._loadbalancer.upload_certificate(lb_id, filename) def get_certificate_md5(self, lb_id, filename): return self._loadbalancer.get_certificate_md5(lb_id, filename) def delete_certificate(self, lb_id, filename): return self._loadbalancer.delete_certificate(lb_id, filename) def plug_vip(self, vip): # Catch any issues with the subnet info json try: net_info = flask.request.get_json() assert type(net_info) is dict assert 'subnet_cidr' in net_info assert 'gateway' in net_info assert 'mac_address' in net_info except Exception: raise exceptions.BadRequest( description='Invalid subnet information') return self._plug.plug_vip(vip, net_info['subnet_cidr'], net_info['gateway'], net_info['mac_address'], net_info.get('mtu'), net_info.get('vrrp_ip'), net_info.get('host_routes')) def plug_network(self): try: port_info = flask.request.get_json() assert type(port_info) is dict assert 'mac_address' in port_info except Exception: raise exceptions.BadRequest(description='Invalid port information') return self._plug.plug_network(port_info['mac_address'], port_info.get('fixed_ips'), port_info.get('mtu')) def upload_cert(self): return certificate_update.upload_server_cert() def upload_vrrp_config(self): return self._keepalived.upload_keepalived_config() def manage_service_vrrp(self, action): return self._keepalived.manager_keepalived_service(action) def get_interface(self, ip_addr): return self._amphora_info.get_interface(ip_addr) def upload_config(self): try: stream = flask.request.stream file_path = cfg.find_config_files(project=CONF.project, prog=CONF.prog)[0] flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00600 mode = stat.S_IRUSR | stat.S_IWUSR with os.fdopen(os.open(file_path, flags, mode), 'wb') as cfg_file: b = stream.read(BUFFER) while b: cfg_file.write(b) b = stream.read(BUFFER) CONF.mutate_config_files() except Exception as e: LOG.error("Unable to update amphora-agent configuration: %s", str(e)) return webob.Response(json=dict( message="Unable to update amphora-agent configuration.", details=str(e)), status=500) return webob.Response(json={'message': 'OK'}, status=202) def version_discovery(self): return webob.Response(json={'api_version': api_server.VERSION}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/0000775000175000017500000000000000000000000025421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j20000664000175000017500000000410100000000000032115 0ustar00zuulzuul00000000000000[Unit] Description=Configure {{ amphora_nsname }} network namespace StopWhenUnneeded=true [Service] Type=oneshot RemainAfterExit=yes # Re-add the namespace ExecStart=-/sbin/ip netns add {{ amphora_nsname }} # Load the system sysctl into the new namespace ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl --system # Set nf_conntrack_buckets sysctl in the main namespace (nf_conntrack_buckets # cannot be set in another net namespace, but its value is inherited from the # main namespace) ExecStart=-/sbin/sysctl -w net.netfilter.nf_conntrack_buckets=125000 # Enable kernel module ip_vs for lvs function in amphora network namespace ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} modprobe ip_vs # Enable ip_forward and conntrack kernel configuration ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.ip_forward=1 ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.vs.conntrack=1 ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv6.conf.all.forwarding=1 # We need the plugged_interfaces file sorted to join the host interfaces ExecStart=-/bin/sh -c '/usr/bin/sort -k 1 /var/lib/octavia/plugged_interfaces > /var/lib/octavia/plugged_interfaces.sorted' # Assign the interfaces into the namespace with the appropriate name ExecStart=-/bin/sh -c '/sbin/ip link | awk \'{getline n; print $0,n}\' | awk \'{sub(":","",$2)} { for(i=1;i<=NF;i++) if ($i == "link/ether") {print $(i+1) " " $2} }\' | sort -k 1 | join -j 1 - /var/lib/octavia/plugged_interfaces.sorted | awk \'{system("ip link set "$2" netns {{ amphora_nsname }} name "$3"")}\'' # Bring up all of the namespace interfaces {%- if HasIFUPAll %} # Ubuntu seems to not correctly set up the lo iface when calling ifup -a # Disable it first, before setting it up. ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} ifdown lo ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} ifup -a {%- else %} ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} ifup lo ExecStart=-/bin/awk '{system("/sbin/ip netns exec {{ amphora_nsname }} ifup " $2)}' /var/lib/octavia/plugged_interfaces {%- endif %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/keepalived.systemd.j20000664000175000017500000000152700000000000031463 0ustar00zuulzuul00000000000000[Unit] Description=Keepalive Daemon (LVS and VRRP) After=network-online.target {{ amphora_netns }}.service Wants=network-online.target Requires={{ amphora_netns }}.service [Service] # Force context as we start keepalived under "ip netns exec" SELinuxContext=system_u:system_r:keepalived_t:s0 Type=forking KillMode=process {% if vrrp_pid and check_pid %} ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ keepalived_cmd }} --log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }} -r {{ vrrp_pid }} -c {{ check_pid }} {% else %} ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ keepalived_cmd }} --log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }} {% endif %} ExecReload=/bin/kill -HUP $MAINPID PIDFile={{ keepalived_pid }} [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/keepalived.sysvinit.j20000664000175000017500000000457300000000000031667 0ustar00zuulzuul00000000000000#!/bin/sh # # keepalived LVS cluster monitor daemon. # # Written by Andres Salomon # ### BEGIN INIT INFO # Provides: keepalived # Required-Start: $syslog $network $remote_fs # Required-Stop: $syslog $network $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Starts keepalived # Description: Starts keepalived lvs loadbalancer ### END INIT INFO PATH=/sbin:/bin:/usr/sbin:/usr/bin DAEMON="ip netns exec {{ amphora_nsname }} {{ keepalived_cmd }}" NAME=octavia-keepalived DESC=octavia-keepalived TMPFILES="/tmp/.vrrp /tmp/.healthcheckers" {% if vrrp_pid and check_pid %} DAEMON_ARGS="--log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }} -r {{ vrrp_pid }} -c {{ check_pid }}" {% else %} DAEMON_ARGS="-log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }}" {% endif %} #includes lsb functions . /lib/lsb/init-functions test -f $DAEMON || exit 0 # Read configuration variable file if it is present [ -r /etc/default/$NAME ] && . /etc/default/$NAME case "$1" in start) log_daemon_msg "Starting $DESC" "$NAME" for file in $TMPFILES do test -e $file && test ! -L $file && rm $file done if start-stop-daemon --start --quiet --pidfile {{ keepalived_pid }} \ --exec $DAEMON -- $DAEMON_ARGS; then log_end_msg 0 else log_end_msg 1 fi ;; stop) log_daemon_msg "Stopping $DESC" "$NAME" if start-stop-daemon --oknodo --stop --quiet --pidfile {{ keepalived_pid }} \ --exec $DAEMON; then log_end_msg 0 else log_end_msg 1 fi ;; reload|force-reload) log_action_begin_msg "Reloading $DESC configuration..." if start-stop-daemon --stop --quiet --signal 1 --pidfile \ {{ keepalived_pid }} --exec $DAEMON; then log_end_msg 0 else log_action_end_msg 1 fi ;; restart) log_action_begin_msg "Restarting $DESC" "$NAME" start-stop-daemon --stop --quiet --pidfile \ {{ keepalived_pid }} --exec $DAEMON || true sleep 1 if start-stop-daemon --start --quiet --pidfile \ {{ keepalived_pid }} --exec $DAEMON -- $DAEMON_ARGS; then log_end_msg 0 else log_end_msg 1 fi ;; *) echo "Usage: /etc/init.d/$NAME {start|stop|restart|reload|force-reload}" >&2 exit 1 ;; esac exit 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/keepalived.upstart.j20000664000175000017500000000224400000000000031472 0ustar00zuulzuul00000000000000{# # Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} description "Octavia keepalived" start on runlevel [2345] stop on runlevel [!2345] respawn {% if vrrp_pid and check_pid %} exec /sbin/ip netns exec {{ amphora_nsname }} {{ keepalived_cmd }} -n --log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }} -r {{ vrrp_pid }} -c {{ check_pid }} {% else %} exec /sbin/ip netns exec {{ amphora_nsname }} {{ keepalived_cmd }} -n --log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }} {% endif %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/keepalived_check_script.conf.j20000664000175000017500000000155000000000000033435 0ustar00zuulzuul00000000000000{# # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # -#} #!/bin/bash # Don't try to run the directory when it is empty shopt -s nullglob status=0 for file in {{ check_scripts_dir }}/* do echo "Running check script: " $file bash $file status=$(( $status + $? )) done exit $status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/keepalived_lvs_check_script.sh.j20000664000175000017500000000100300000000000033777 0ustar00zuulzuul00000000000000#!/bin/bash # Don't try to run the directory when it is empty shopt -s nullglob status=0 for file in {{ keepalived_lvs_pid_dir }}/* do file_ext=${file#*.} case $file_ext in pid) echo "Check keepalived pid file: " $file;; *) continue;; esac {% if init_system == consts.INIT_SYSTEMD %} systemctl status $(basename $file .pid) > /dev/null {% elif init_system in (consts.INIT_UPSTART, consts.INIT_SYSVINIT) %} kill -0 `cat $file` {% endif %} status=$(( $status + $? )) done exit $status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/plug_port_ethX.conf.j20000664000175000017500000000323100000000000031604 0ustar00zuulzuul00000000000000{# # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent auto {{ interface }} {%- if ip_address %} iface {{ interface }} inet{{ '6' if ipv6 }} static address {{ ip_address }} broadcast {{ broadcast }} netmask {{ netmask }} {%- if mtu %} mtu {{ mtu }} {%- endif %} {%- for hr in host_routes %} {%- if ((hr.network.version == 4 and hr.network.prefixlen == 32) or (hr.network.version == 6 and hr.network.prefixlen == 128)) %} up route add -host {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} down route del -host {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} {%- else %} up route add -net {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} down route del -net {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} {%- endif %} {%- endfor %} post-up /sbin/ip{{ '6' if ipv6 }}tables -t nat -A POSTROUTING -p udp -o {{ interface }} -j MASQUERADE post-down /sbin/ip{{ '6' if ipv6 }}tables -t nat -D POSTROUTING -p udp -o {{ interface }} -j MASQUERADE {%- else %} iface {{ interface }} inet dhcp auto {{ interface }}:0 iface {{ interface }}:0 inet6 auto {%- endif %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/plug_vip_ethX.conf.j20000664000175000017500000000650600000000000031426 0ustar00zuulzuul00000000000000{# # Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent {%- if topology == consts.TOPOLOGY_SINGLE %} auto {{ interface }} {{ interface }}:0 {%- else %} auto {{ interface }} {%- endif %} {%- if vrrp_ip %} iface {{ interface }} inet{{ '6' if vrrp_ipv6 }} static address {{ vrrp_ip }} broadcast {{ broadcast }} netmask {{ netmask }} {%- if gateway %} gateway {{ gateway }} {%- endif %} {%- if mtu %} mtu {{ mtu }} {%- endif %} {%- for hr in host_routes %} {%- if ((hr.network.version == 4 and hr.network.prefixlen == 32) or (hr.network.version == 6 and hr.network.prefixlen == 128)) %} up route add -host {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} down route del -host {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} {%- else %} up route add -net {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} down route del -net {{ hr.network }} gw {{ hr.gw }} dev {{ interface }} {%- endif %} {%- endfor %} {%- else %} iface {{ interface }} inet{{ '6' if vip_ipv6 }} {{ 'auto' if vip_ipv6 else 'dhcp' }} {%- endif %} {%- if topology == consts.TOPOLOGY_SINGLE %} iface {{ interface }}:0 inet{{ '6' if vip_ipv6 }} static address {{ vip }} broadcast {{ broadcast }} netmask {{ netmask }} {%- endif %} # Add a source routing table to allow members to access the VIP {%- if gateway %} post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}route add default via {{ gateway }} dev {{ interface }} onlink table 1 post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}route del default via {{ gateway }} dev {{ interface }} onlink table 1 {# Keepalived will insert and remove this route in active/standby #} {%- if topology == consts.TOPOLOGY_SINGLE %} post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}route add {{ network }} dev {{ interface }} src {{ vip }} scope link table 1 post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}route del {{ network }} dev {{ interface }} src {{ vip }} scope link table 1 {%- endif %} {%- endif %} {%- for hr in host_routes %} post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}route add {{ hr.network }} via {{ hr.gw }} dev {{ interface }} onlink table 1 post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}route del {{ hr.network }} via {{ hr.gw }} dev {{ interface }} onlink table 1 {%- endfor %} {# Keepalived will insert and remove this rule in active/standby #} {%- if topology == consts.TOPOLOGY_SINGLE %} post-up /sbin/ip {{ '-6 ' if vip_ipv6 }}rule add from {{ vip }}/{{ '128' if vip_ipv6 else '32' }} table 1 priority 100 post-down /sbin/ip {{ '-6 ' if vip_ipv6 }}rule del from {{ vip }}/{{ '128' if vip_ipv6 else '32' }} table 1 priority 100 {%- endif %} post-up /sbin/ip{{ '6' if vip_ipv6 }}tables -t nat -A POSTROUTING -p udp -o {{ interface }} -j MASQUERADE post-down /sbin/ip{{ '6' if vip_ipv6 }}tables -t nat -D POSTROUTING -p udp -o {{ interface }} -j MASQUERADE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_ethX.conf.j20000664000175000017500000000217200000000000032300 0ustar00zuulzuul00000000000000{# # Copyright 2017 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent NM_CONTROLLED="no" DEVICE="{{ interface }}" ONBOOT="yes" TYPE="Ethernet" USERCTL="yes" {%- if ipv6 %} IPV6INIT="yes" {%- if mtu %} IPV6_MTU="{{ mtu }}" {%- endif %} {%- if ip_address %} IPV6_AUTOCONF="no" IPV6ADDR="{{ ip_address }}" {%- else %} IPV6_AUTOCONF="yes" {%- endif %} {%- else %} IPV6INIT="no" {%- if mtu %} MTU="{{ mtu }}" {%- endif %} {%- if ip_address %} BOOTPROTO="static" IPADDR="{{ ip_address }}" NETMASK="{{ netmask }}" {%- else %} BOOTPROTO="dhcp" PERSISTENT_DHCLIENT="1" {%- endif %} {%- endif %} ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_eth_ifdown_local.conf.j2 22 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_eth_ifdown_local.con0000664000175000017500000000135600000000000034213 0ustar00zuulzuul00000000000000{# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent #!/bin/bash if [[ "$1" != "lo" ]] then /sbin/iptables -t nat -D POSTROUTING -o $1 -p udp -j MASQUERADE /sbin/ip6tables -t nat -D POSTROUTING -o $1 -p udp -j MASQUERADE fi ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_eth_ifup_local.conf.j2 22 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_eth_ifup_local.conf.0000664000175000017500000000135600000000000034114 0ustar00zuulzuul00000000000000{# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent #!/bin/bash if [[ "$1" != "lo" ]] then /sbin/iptables -t nat -A POSTROUTING -o $1 -p udp -j MASQUERADE /sbin/ip6tables -t nat -A POSTROUTING -o $1 -p udp -j MASQUERADE fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_plug_vip_ethX.conf.j20000664000175000017500000000276700000000000032124 0ustar00zuulzuul00000000000000{# # Copyright 2017 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent NM_CONTROLLED="no" DEVICE="{{ interface }}" ONBOOT="yes" TYPE="Ethernet" USERCTL="yes" {%- if vrrp_ip %} {%- if vrrp_ipv6 %} IPV6INIT="yes" IPV6_DEFROUTE="yes" IPV6_AUTOCONF="no" IPV6ADDR="{{ vrrp_ip }}/{{ prefix }}" {%- if gateway %} IPV6_DEFAULTGW="{{ gateway }}" {%- endif %} {%- if mtu %} IPV6_MTU="{{ mtu }}" {%- endif %} {%- else %} {# not vrrp_ipv6 #} BOOTPROTO="static" IPADDR="{{ vrrp_ip }}" NETMASK="{{ netmask }}" {%- if gateway %} GATEWAY="{{ gateway }}" {%- endif %} MTU="{{ mtu }}" {%- endif %} {# end if vrrp_ipv6 #} {%- else %} {# not vrrp_ip #} {%- if vip_ipv6 %} IPV6INIT="yes" IPV6_DEFROUTE="yes" IPV6_AUTOCONF="yes" {%- else %} BOOTPROTO="dhcp" PERSISTENT_DHCLIENT="1" {%- endif %} {# end if vip_ipv6 #} {%- endif %} {# end if vrrp_ip #} {%- if topology == consts.TOPOLOGY_SINGLE -%} {%- if vip_ipv6 %} IPV6ADDR_SECONDARIES="{{ vip }}/{{ prefix }}" {%- endif %} {%- endif %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_plug_vip_ethX_alias.conf.j20000664000175000017500000000151700000000000033265 0ustar00zuulzuul00000000000000{# # Copyright 2017 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent NM_CONTROLLED="no" DEVICE="{{ interface }}:0" NAME="{{ interface }}:0" ONBOOT="yes" ARPCHECK="no" IPV6INIT="no" {%- if mtu %} MTU="{{ mtu }}" {%- endif %} BOOTPROTO="static" IPADDR="{{ vip }}" NETMASK="{{ netmask }}" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_route_ethX.conf.j20000664000175000017500000000211700000000000031422 0ustar00zuulzuul00000000000000{# # Copyright 2017 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent {%- for hr in host_routes %} {{ hr.network }} via {{ hr.gw }} dev {{ interface }} {%- endfor %} # Add a source routing table to allow members to access the VIP {%- if gateway %} {%- if topology == consts.TOPOLOGY_SINGLE %} {{ network }} dev {{ interface }} src {{ vip }} scope link table 1 {%- endif %} default table 1 via {{ gateway }} dev {{ interface }} {%- endif %} {%- for hr in host_routes %} {{ hr.network }} table 1 via {{ hr.gw }} dev {{ interface }} {%- endfor %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/rh_rule_ethX.conf.j20000664000175000017500000000117400000000000031235 0ustar00zuulzuul00000000000000{# # Copyright 2017 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} # Generated by Octavia agent from {{ vip }} table 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/systemd.conf.j20000664000175000017500000000210100000000000030264 0ustar00zuulzuul00000000000000[Unit] Description=HAProxy Load Balancer After=network.target syslog.service {{ amphora_netns }}.service Before=octavia-keepalived.service Wants=syslog.service Requires={{ amphora_netns }}.service [Service] # Force context as we start haproxy under "ip netns exec" SELinuxContext=system_u:system_r:haproxy_t:s0 Environment="CONFIG={{ haproxy_cfg }}" "USERCONFIG={{ haproxy_user_group_cfg }}" "PIDFILE={{ haproxy_pid }}" ExecStartPre={{ haproxy_cmd }} -f $CONFIG -f $USERCONFIG -c -q -L {{ peer_name }} ExecReload={{ haproxy_cmd }} -c -f $CONFIG -f $USERCONFIG -L {{ peer_name }} ExecReload=/bin/kill -USR2 $MAINPID {%- if haproxy_major_version < 2 and haproxy_minor_version < 8 %} ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ haproxy_cmd }}-systemd-wrapper -f $CONFIG -f $USERCONFIG -p $PIDFILE -L {{ peer_name }} {%- else %} ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ haproxy_cmd }} -Ws -f $CONFIG -f $USERCONFIG -p $PIDFILE -L {{ peer_name }} Type=notify {%- endif %} KillMode=mixed Restart=always LimitNOFILE=2600000 [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/sysvinit.conf.j20000664000175000017500000001262300000000000030476 0ustar00zuulzuul00000000000000#!/bin/sh {#- # Copyright 2015 Rackspace. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Inspired by https://gist.github.com/gfrey/8472007 #} ### BEGIN INIT INFO # Provides: octavia-amp-{{ haproxy_pid }} # Required-Start: $local_fs $network # Required-Stop: $local_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: fast and reliable load balancing reverse proxy # Description: This file should be used to start and stop haproxy. ### END INIT INFO PATH=/sbin:/usr/sbin:/bin:/usr/bin PIDFILE={{ haproxy_pid }} CONFIG={{ haproxy_cfg }} PEER_NAME={{ peer_name }} HAPROXY={{ haproxy_cmd }} USER_GROUP_CONF_PATH={{ haproxy_user_group_cfg }} EXTRAOPTS= ENABLED=1 test -x $HAPROXY || exit 0 test -f "$CONFIG" -f "$USER_GROUP_CONF_PATH" || exit 0 if [ -e /etc/default/haproxy ]; then . /etc/default/haproxy fi test "$ENABLED" != "0" || exit 0 [ -f /etc/default/rcS ] && . /etc/default/rcS . /lib/lsb/init-functions HAPROXY="ip netns exec {{ amphora_nsname }} $HAPROXY" haproxy_start() { # Re-add the namespace ip netns add {{ amphora_nsname }} || true # Load the system sysctl into the new namespace ip netns exec {{ amphora_nsname }} sysctl --system || true # We need the plugged_interfaces file sorted to join the host interfaces sort -k 1 /var/lib/octavia/plugged_interfaces > /var/lib/octavia/plugged_interfaces.sorted || true # Assign the interfaces into the namespace with the appropriate name ip link | awk '{getline n; print $0,n}' | awk '{sub(":","",$2)} { for(i=1;i<=NF;i++) if ($i == "link/ether") {print $(i+1) " " $2} }' | sort -k 1 | join -j 1 - /var/lib/octavia/plugged_interfaces.sorted | awk '{system("ip link set "$2" netns {{ amphora_nsname }} name "$3"")}' || true # Bring up all of the namespace interfaces {%- if HasIFUPAll %} ip netns exec {{ amphora_nsname }} ifup -a || true {%- else %} awk '{system("/sbin/ip netns exec {{ amphora_nsname }} ifup " $2)}' /var/lib/octavia/plugged_interfaces || true {%- endif %} start-stop-daemon --start --pidfile "$PIDFILE" \ --exec $HAPROXY -- -f "$CONFIG" -f "$USER_GROUP_CONF_PATH" -L "$PEER_NAME" -D -p "$PIDFILE" \ $EXTRAOPTS || return 2 return 0 } haproxy_stop() { if [ ! -f $PIDFILE ] ; then # This is a success according to LSB return 0 fi for pid in $(cat $PIDFILE) ; do /bin/kill $pid || return 4 done rm -f $PIDFILE return 0 } haproxy_reload() { $HAPROXY -f "$CONFIG" -f "$USER_GROUP_CONF_PATH" -L "$PEER_NAME" -p $PIDFILE -D $EXTRAOPTS -sf $(cat $PIDFILE) \ || return 2 return 0 } haproxy_checkconf() { rcode=0 $HAPROXY -c -f "$CONFIG" -f "$USER_GROUP_CONF_PATH" -L "$PEER_NAME" if [ $? -ne 0 ]; then rcode=1 fi return $rcode } haproxy_status() { if [ ! -f $PIDFILE ] ; then # program not running return 3 fi for pid in $(cat $PIDFILE) ; do if ! ps --no-headers p "$pid" | grep haproxy > /dev/null ; then # program running, bogus pidfile return 1 fi done return 0 } case "$1" in checkconf) haproxy_checkconf exit $? ;; start) log_daemon_msg "Starting haproxy" "haproxy" haproxy_start ret=$? case "$ret" in 0) log_end_msg 0 ;; 1) log_end_msg 1 echo "pid file '$PIDFILE' found, haproxy not started." ;; 2) log_end_msg 1 ;; esac exit $ret ;; stop) log_daemon_msg "Stopping haproxy" "haproxy" haproxy_stop ret=$? case "$ret" in 0|1) log_end_msg 0 ;; 2) log_end_msg 1 ;; esac exit $ret ;; reload|force-reload) echo "Checking HAProxy configuration first" haproxy_checkconf case "$?" in 0) echo "Everything looks fine" ;; 1) echo "Errors..." exit 1 ;; esac log_daemon_msg "Reloading haproxy" "haproxy" haproxy_reload case "$?" in 0|1) log_end_msg 0 ;; 2) log_end_msg 1 ;; esac ;; restart) echo "Checking HAProxy configuration first" haproxy_checkconf case "$?" in 0) echo "Everything looks fine" ;; 1) echo "Errors..." exit 1 ;; esac log_daemon_msg "Restarting haproxy" "haproxy" haproxy_stop haproxy_start case "$?" in 0) log_end_msg 0 ;; 1) log_end_msg 1 ;; 2) log_end_msg 1 ;; esac ;; status) haproxy_status ret=$? case "$ret" in 0) echo "haproxy is running." ;; 1) echo "haproxy dead, but $PIDFILE exists." ;; *) echo "haproxy not running." ;; esac exit $ret ;; *) echo "Usage: /etc/init.d/haproxy {start|stop|reload|restart|status|checkconf}" exit 2 ;; esac : ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/agent/api_server/templates/upstart.conf.j20000664000175000017500000000551200000000000030307 0ustar00zuulzuul00000000000000{# # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Inspired by https://gist.github.com/gfrey/8472007 #} description "Properly handle haproxy" start on runlevel [2345] stop on runlevel [!2345] env PID_PATH={{ haproxy_pid }} env BIN_PATH={{ haproxy_cmd }} env CONF_PATH={{ haproxy_cfg }} env USER_GROUP_CONF_PATH={{ haproxy_user_group_cfg }} env PEER_NAME={{ peer_name }} respawn respawn limit {{ respawn_count }} {{respawn_interval}} pre-start script [ -r $CONF_PATH ] # Re-add the namespace ip netns add {{ amphora_nsname }} || true # Load the system sysctl into the new namespace ip netns exec {{ amphora_nsname }} sysctl --system || true # We need the plugged_interfaces file sorted to join with the host # interfaces sort -k 1 /var/lib/octavia/plugged_interfaces > /var/lib/octavia/plugged_interfaces.sorted || true # Assign the interfaces into the namespace with the appropriate name ip link | awk '{getline n; print $0,n}' | awk '{sub(":","",$2)} { for(i=1;i<=NF;i++) if ($i == "link/ether") {print $(i+1) " " $2} }' | sort -k 1 | join -j 1 - /var/lib/octavia/plugged_interfaces.sorted | awk '{system("ip link set "$2" netns {{ amphora_nsname }} name "$3"")}' || true # Bring up all of the namespace interfaces {%- if HasIFUPAll %} ip netns exec {{ amphora_nsname }} ifup -a || true {%- else %} awk '{system("/sbin/ip netns exec {{ amphora_nsname }} ifup " $2)}' /var/lib/octavia/plugged_interfaces || true {%- endif %} end script script exec /bin/bash <", # "seq": 67, # "listeners": { # "": { # "status": "OPEN", # "stats": { # "tx": 0, # "rx": 0, # "conns": 0, # "totconns": 0, # "ereq": 0 # } # } # }, # "pools": { # ":": { # "status": "UP", # "members": { # "": "no check" # } # } # }, # "ver": 2 # } global SEQ msg = {'id': CONF.amphora_agent.amphora_id, 'seq': SEQ, 'listeners': {}, 'pools': {}, 'ver': MSG_VER} SEQ += 1 stat_sock_files = list_sock_stat_files() # TODO(rm_work) There should only be one of these in the new config system for lb_id, stat_sock_file in stat_sock_files.items(): if util.is_lb_running(lb_id): (stats, pool_status) = get_stats(stat_sock_file) for row in stats: if row['svname'] == 'FRONTEND': listener_id = row['pxname'] msg['listeners'][listener_id] = { 'status': row['status'], 'stats': {'tx': int(row['bout']), 'rx': int(row['bin']), 'conns': int(row['scur']), 'totconns': int(row['stot']), 'ereq': int(row['ereq'])}} for pool_id, pool in pool_status.items(): msg['pools'][pool_id] = {"status": pool['status'], "members": pool['members']} # UDP listener part udp_listener_ids = util.get_udp_listeners() if udp_listener_ids: listeners_stats = keepalivedlvs_query.get_udp_listeners_stats() if listeners_stats: for listener_id, listener_stats in listeners_stats.items(): pool_status = keepalivedlvs_query.get_udp_listener_pool_status( listener_id) udp_listener_dict = dict() udp_listener_dict['status'] = listener_stats['status'] udp_listener_dict['stats'] = { 'tx': listener_stats['stats']['bout'], 'rx': listener_stats['stats']['bin'], 'conns': listener_stats['stats']['scur'], 'totconns': listener_stats['stats']['stot'], 'ereq': listener_stats['stats']['ereq'] } if pool_status: pool_id = pool_status['lvs']['uuid'] msg['pools'][pool_id] = { "status": pool_status['lvs']['status'], "members": pool_status['lvs']['members'] } msg['listeners'][listener_id] = udp_listener_dict return msg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/health_daemon/health_sender.py0000664000175000017500000000665400000000000026150 0ustar00zuulzuul00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_config import cfg from oslo_log import log as logging from octavia.amphorae.backends.health_daemon import status_message CONF = cfg.CONF LOG = logging.getLogger(__name__) def round_robin_addr(addrinfo_list): if not addrinfo_list: return None addrinfo = addrinfo_list.pop(0) addrinfo_list.append(addrinfo) return addrinfo class UDPStatusSender(object): def __init__(self): self._update_dests() self.v4sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.v6sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) def update(self, dest, port): addrlist = socket.getaddrinfo(dest, port, 0, socket.SOCK_DGRAM) # addrlist = [(family, socktype, proto, canonname, sockaddr) ...] # e.g. 4 = sockaddr - what we actually need for addr in addrlist: self.dests.append(addr) # Just grab the first match break def _send_msg(self, dest, msg): # Note: heartbeat_key is mutable and must be looked up for each call envelope_str = status_message.wrap_envelope( msg, str(CONF.health_manager.heartbeat_key)) # dest = (family, socktype, proto, canonname, sockaddr) # e.g. 0 = sock family, 4 = sockaddr - what we actually need try: if dest[0] == socket.AF_INET: self.v4sock.sendto(envelope_str, dest[4]) elif dest[0] == socket.AF_INET6: self.v6sock.sendto(envelope_str, dest[4]) except socket.error: # Pass here as on amp boot it will get one or more # error: [Errno 101] Network is unreachable # while the networks are coming up # No harm in trying to send as it will still failover # if the message isn't received pass # The controller_ip_port_list configuration has mutated, reload it. def _update_dests(self): self.dests = [] for ipport in CONF.health_manager.controller_ip_port_list: try: ip, port = ipport.rsplit(':', 1) except ValueError: LOG.error("Invalid ip and port '%s' in health_manager " "controller_ip_port_list", ipport) break self.update(ip, port) self.current_controller_ip_port_list = ( CONF.health_manager.controller_ip_port_list) def dosend(self, obj): # Check for controller_ip_port_list mutation if not (self.current_controller_ip_port_list == CONF.health_manager.controller_ip_port_list): self._update_dests() dest = round_robin_addr(self.dests) if dest is None: LOG.error('No controller address found. Unable to send heartbeat.') return self._send_msg(dest, obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/health_daemon/status_message.py0000664000175000017500000000563700000000000026372 0ustar00zuulzuul00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import hashlib import hmac import zlib from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import secretutils from octavia.common import exceptions LOG = logging.getLogger(__name__) hash_algo = hashlib.sha256 hash_len = 32 hex_hash_len = 64 def to_hex(byte_array): return binascii.hexlify(byte_array).decode() def encode_obj(obj): json_bytes = jsonutils.dumps(obj).encode('utf-8') binary_array = zlib.compress(json_bytes, 9) return binary_array def decode_obj(binary_array): json_str = zlib.decompress(binary_array).decode('utf-8') obj = jsonutils.loads(json_str) return obj def wrap_envelope(obj, key, hex=True): payload = encode_obj(obj) hmc = get_hmac(payload, key, hex=hex) envelope = payload + hmc return envelope def unwrap_envelope(envelope, key): """A backward-compatible way to get data. We may still receive package from amphorae that are using digest() instead of hexdigest() """ try: return get_payload(envelope, key, hex=True) except Exception: return get_payload(envelope, key, hex=False) def get_payload(envelope, key, hex=True): len = hex_hash_len if hex else hash_len payload = envelope[:-len] expected_hmc = envelope[-len:] calculated_hmc = get_hmac(payload, key, hex=hex) if not secretutils.constant_time_compare(expected_hmc, calculated_hmc): LOG.warning( 'calculated hmac(hex=%(hex)s): %(s1)s not equal to msg hmac: ' '%(s2)s dropping packet', { 'hex': hex, 's1': to_hex(calculated_hmc), 's2': to_hex(expected_hmc) } ) fmt = 'calculated hmac: {0} not equal to msg hmac: {1} dropping packet' raise exceptions.InvalidHMACException(fmt.format( to_hex(calculated_hmc), to_hex(expected_hmc))) obj = decode_obj(payload) return obj def get_hmac(payload, key, hex=True): """Get digest for the payload. The hex param is for backward compatibility, so the package data sent from the existing amphorae can still be checked in the previous approach. """ hmc = hmac.new(key.encode("utf-8"), payload, hashlib.sha256) return hmc.hexdigest().encode("utf-8") if hex else hmc.digest() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/backends/utils/0000775000175000017500000000000000000000000021326 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/utils/__init__.py0000664000175000017500000000107400000000000023441 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/utils/haproxy_query.py0000664000175000017500000001117200000000000024621 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import csv import socket from octavia.common import constants as consts from octavia.common import utils as octavia_utils from octavia.i18n import _ class HAProxyQuery(object): """Class used for querying the HAProxy statistics socket. The CSV output is defined in the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/configuration-1.4.html#9 """ def __init__(self, stats_socket): """stats_socket Path to the HAProxy statistics socket file. """ self.socket = stats_socket def _query(self, query): """Send the given query to the haproxy statistics socket. :returns: the output of a successful query as a string with trailing newlines removed, or raise an Exception if the query fails. """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(self.socket) except socket.error: raise Exception(_("HAProxy '{0}' query failed.").format(query)) try: sock.send(octavia_utils.b(query + '\n')) data = u'' while True: x = sock.recv(1024) if not x: break data += x.decode('ascii') if ( isinstance(x, bytes)) else x return data.rstrip() finally: sock.close() def show_info(self): """Get and parse output from 'show info' command.""" results = self._query('show info') dict_results = dict() for r in results.split('\n'): vals = r.split(":", 1) dict_results[vals[0].strip()] = vals[1].strip() return dict_results def show_stat(self, proxy_iid=-1, object_type=-1, server_id=-1): """Get and parse output from 'show stat' command. :param proxy_iid: Proxy ID (column 27 in CSV output). -1 for all. :param object_type: Select the type of dumpable object. Values can be ORed. -1 - everything 1 - frontends 2 - backends 4 - servers :param server_id: Server ID (column 28 in CSV output?), or -1 for everything. :returns: stats (split into an array by newline) """ results = self._query( 'show stat {proxy_iid} {object_type} {server_id}'.format( proxy_iid=proxy_iid, object_type=object_type, server_id=server_id)) list_results = results[2:].split('\n') csv_reader = csv.DictReader(list_results) return list(csv_reader) def get_pool_status(self): """Get status for each server and the pool as a whole. :returns: pool data structure {: { 'uuid': , 'status': 'UP'|'DOWN', 'members': [: 'UP'|'DOWN'|'DRAIN'|'no check'] }} """ results = self.show_stat(object_type=6) # servers + pool final_results = {} for line in results: # pxname: pool, svname: server_name, status: status # Due to a bug in some versions of HAProxy, DRAIN mode isn't # calculated correctly, but we can spoof the correct value here. if line['status'] == consts.UP and line['weight'] == 0: line['status'] = consts.DRAIN if line['pxname'] not in final_results: final_results[line['pxname']] = dict(members={}) if line['svname'] == 'BACKEND': pool_id, listener_id = line['pxname'].split(':') final_results[line['pxname']]['pool_uuid'] = pool_id final_results[line['pxname']]['listener_uuid'] = listener_id final_results[line['pxname']]['status'] = line['status'] else: final_results[line['pxname']]['members'][line['svname']] = ( line['status']) return final_results ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/utils/ip_advertisement.py0000664000175000017500000001556700000000000025260 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fcntl import socket from struct import pack from struct import unpack from oslo_log import log as logging from octavia.amphorae.backends.utils import network_namespace from octavia.common import constants from octavia.common import utils as common_utils LOG = logging.getLogger(__name__) def garp(interface, ip_address, net_ns=None): """Sends a gratuitous ARP for ip_address on the interface. :param interface: The interface name to send the GARP on. :param ip_address: The IP address to advertise in the GARP. :param net_ns: The network namespace to send the GARP from. :returns: None """ ARP_ETHERTYPE = 0x0806 BROADCAST_MAC = b'\xff\xff\xff\xff\xff\xff' # Get a socket, optionally inside a network namespace garp_socket = None if net_ns: with network_namespace.NetworkNamespace(net_ns): garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) else: garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) # Bind the socket with the ARP ethertype protocol garp_socket.bind((interface, ARP_ETHERTYPE)) # Get the MAC address of the interface source_mac = garp_socket.getsockname()[4] garp_msg = [ pack('!h', 1), # Hardware type ethernet pack('!h', 0x0800), # Protocol type IPv4 pack('!B', 6), # Hardware size pack('!B', 4), # Protocol size pack('!h', 1), # Opcode request source_mac, # Sender MAC address socket.inet_aton(ip_address), # Sender IP address BROADCAST_MAC, # Target MAC address socket.inet_aton(ip_address)] # Target IP address garp_ethernet = [ BROADCAST_MAC, # Ethernet destination source_mac, # Ethernet source pack('!h', ARP_ETHERTYPE), # Ethernet type b''.join(garp_msg)] # The GARP message garp_socket.send(b''.join(garp_ethernet)) garp_socket.close() def calculate_icmpv6_checksum(packet): """Calculate the ICMPv6 checksum for a packet. :param packet: The packet bytes to checksum. :returns: The checksum integer. """ total = 0 # Add up 16-bit words num_words = len(packet) // 2 for chunk in unpack("!%sH" % num_words, packet[0:num_words * 2]): total += chunk # Add any left over byte if len(packet) % 2: total += packet[-1] << 8 # Fold 32-bits into 16-bits total = (total >> 16) + (total & 0xffff) total += total >> 16 return ~total + 0x10000 & 0xffff def neighbor_advertisement(interface, ip_address, net_ns=None): """Sends a unsolicited neighbor advertisement for an ip on the interface. :param interface: The interface name to send the GARP on. :param ip_address: The IP address to advertise in the GARP. :param net_ns: The network namespace to send the GARP from. :returns: None """ ALL_NODES_ADDR = 'ff02::1' SIOCGIFHWADDR = 0x8927 # Get a socket, optionally inside a network namespace na_socket = None if net_ns: with network_namespace.NetworkNamespace(net_ns): na_socket = socket.socket( socket.AF_INET6, socket.SOCK_RAW, socket.getprotobyname(constants.IPV6_ICMP)) else: na_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.getprotobyname(constants.IPV6_ICMP)) # Per RFC 4861 section 4.4, the hop limit should be 255 na_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) # Bind the socket with the source address na_socket.bind((ip_address, 0)) # Get the byte representation of the MAC address of the interface # Note: You can't use getsockname() to get the MAC on this type of socket source_mac = fcntl.ioctl(na_socket.fileno(), SIOCGIFHWADDR, pack('256s', bytes(interface, 'utf-8')))[18:24] # Get the byte representation of the source IP address source_ip_bytes = socket.inet_pton(socket.AF_INET6, ip_address) icmpv6_na_msg_prefix = [ pack('!B', 136), # ICMP Type Neighbor Advertisement pack('!B', 0)] # ICMP Code icmpv6_na_msg_postfix = [ pack('!I', 0xa0000000), # Flags (Router, Override) source_ip_bytes, # Target address pack('!B', 2), # ICMPv6 option type target link-layer address pack('!B', 1), # ICMPv6 option length source_mac] # ICMPv6 option link-layer address # Calculate the ICMPv6 checksum icmpv6_pseudo_header = [ source_ip_bytes, # Source IP address socket.inet_pton(socket.AF_INET6, ALL_NODES_ADDR), # Destination IP pack('!I', 58), # IPv6 next header (ICMPv6) pack('!h', 32)] # IPv6 payload length icmpv6_tmp_chksum = pack('!H', 0) # Checksum are zeros for calculation tmp_chksum_msg = b''.join(icmpv6_pseudo_header + icmpv6_na_msg_prefix + [icmpv6_tmp_chksum] + icmpv6_pseudo_header) checksum = pack('!H', calculate_icmpv6_checksum(tmp_chksum_msg)) # Build the ICMPv6 unsolicitated neighbor advertisement icmpv6_msg = b''.join(icmpv6_na_msg_prefix + [checksum] + icmpv6_na_msg_postfix) na_socket.sendto(icmpv6_msg, (ALL_NODES_ADDR, 0, 0, 0)) na_socket.close() def send_ip_advertisement(interface, ip_address, net_ns=None): """Send an address advertisement. This method will send either GARP (IPv4) or neighbor advertisements (IPv6) for the ip address specified. :param interface: The interface name to send the advertisement on. :param ip_address: The IP address to advertise. :param net_ns: The network namespace to send the advertisement from. :returns: None """ try: if common_utils.is_ipv4(ip_address): garp(interface, ip_address, net_ns) elif common_utils.is_ipv6(ip_address): neighbor_advertisement(interface, ip_address, net_ns) else: LOG.error('Unknown IP version for address: "%s". Skipping', ip_address) except Exception as e: LOG.warning('Unable to send address advertisement for address: "%s", ' 'error: %s. Skipping', ip_address, str(e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/utils/keepalivedlvs_query.py0000664000175000017500000004575000000000000025776 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import os import re import subprocess from oslo_log import log as logging from octavia.amphorae.backends.agent.api_server import util from octavia.common import constants LOG = logging.getLogger(__name__) KERNEL_LVS_PATH = '/proc/net/ip_vs' KERNEL_LVS_STATS_PATH = '/proc/net/ip_vs_stats' LVS_KEY_REGEX = re.compile(r"RemoteAddress:Port\s+(.*$)") V4_RS_VALUE_REGEX = re.compile(r"(\w{8}:\w{4})\s+(.*$)") V4_HEX_IP_REGEX = re.compile(r"(\w{2})(\w{2})(\w{2})(\w{2})") V6_RS_VALUE_REGEX = re.compile(r"(\[[[\w{4}:]+\b\]:\w{4})\s+(.*$)") NS_REGEX = re.compile(r"net_namespace\s(\w+-\w+)") V4_VS_REGEX = re.compile(r"virtual_server\s([\d+\.]+\b)\s(\d{1,5})") V4_RS_REGEX = re.compile(r"real_server\s([\d+\.]+\b)\s(\d{1,5})") V6_VS_REGEX = re.compile(r"virtual_server\s([\w*:]+\b)\s(\d{1,5})") V6_RS_REGEX = re.compile(r"real_server\s([\w*:]+\b)\s(\d{1,5})") CONFIG_COMMENT_REGEX = re.compile( r"#\sConfiguration\sfor\s(\w+)\s(\w{8}-\w{4}-\w{4}-\w{4}-\w{12})") DISABLED_CONFIG_COMMENT_REGEX = re.compile( r"#\s(\w+)\s(\w{8}-\w{4}-\w{4}-\w{4}-\w{12}) is disabled") CHECKER_REGEX = re.compile(r"(MISC_CHECK|HTTP_GET|TCP_CHECK)") def read_kernel_file(ns_name, file_path): cmd = ("ip netns exec {ns} cat {lvs_stat_path}".format( ns=ns_name, lvs_stat_path=file_path)) try: output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.error("Failed to get kernel lvs status in ns %(ns_name)s " "%(kernel_lvs_path)s: %(err)s %(out)s", {'ns_name': ns_name, 'kernel_lvs_path': file_path, 'err': e, 'out': e.output}) raise e # py3 treat the output as bytes type. if isinstance(output, bytes): output = output.decode('utf-8') return output def get_listener_realserver_mapping(ns_name, listener_ip_port, health_monitor_enabled): # returned result: # actual_member_result = {'rs_ip:listened_port': { # 'status': 'UP', # 'Forward': forward_type, # 'Weight': 5, # 'ActiveConn': 0, # 'InActConn': 0 # }} listener_ip, listener_port = listener_ip_port.rsplit(':', 1) ip_obj = ipaddress.ip_address(listener_ip.strip('[]')) output = read_kernel_file(ns_name, KERNEL_LVS_PATH).split('\n') if ip_obj.version == 4: ip_to_hex_format = "%.8X" % ip_obj._ip else: ip_to_hex_format = r'\[' + ip_obj.exploded + r'\]' port_hex_format = "%.4X" % int(listener_port) idex = ip_to_hex_format + ':' + port_hex_format if health_monitor_enabled: member_status = constants.UP else: member_status = constants.NO_CHECK actual_member_result = {} find_target_block = False result_keys = [] for line in output: if 'RemoteAddress:Port' in line: result_keys = re.split(r'\s+', LVS_KEY_REGEX.findall(line)[0].strip()) elif line.startswith('UDP') and find_target_block: break elif line.startswith('UDP') and re.match(r'^UDP\s+%s\s+\w+' % idex, line): find_target_block = True elif find_target_block and line: rs_is_ipv4 = True all_values = V4_RS_VALUE_REGEX.findall(line) # If can not get all_values with ipv4 regex, then this line must be # a ipv6 real server record. if not all_values: all_values = V6_RS_VALUE_REGEX.findall(line) rs_is_ipv4 = False all_values = all_values[0] ip_port = all_values[0] result_values = re.split(r"\s+", all_values[1].strip()) member_ip, member_port = ip_port.rsplit(':', 1) port_string = str(int(member_port, 16)) if rs_is_ipv4: ip_string = ipaddress.ip_address(int(member_ip, 16)).compressed member_ip_port_string = ip_string + ':' + port_string else: ip_string = ipaddress.ip_address( member_ip.strip('[]')).compressed member_ip_port_string = '[' + ip_string + ']:' + port_string result_key_count = len(result_keys) for index in range(result_key_count): if member_ip_port_string not in actual_member_result: actual_member_result[ member_ip_port_string] = {'status': member_status, result_keys[index]: result_values[index]} else: # The other values include the weight actual_member_result[ member_ip_port_string][ result_keys[index]] = result_values[index] continue return find_target_block, actual_member_result def get_udp_listener_resource_ipports_nsname(listener_id): # resource_ipport_mapping = {'Listener': {'id': listener-id, # 'ipport': ipport}, # 'Pool': {'id': pool-id}, # 'Members': [{'id': member-id-1, # 'ipport': ipport}, # {'id': member-id-2, # 'ipport': ipport}], # 'HealthMonitor': {'id': healthmonitor-id}} resource_ipport_mapping = {} with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as f: cfg = f.read() ns_name = NS_REGEX.findall(cfg)[0] listener_ip_port = V4_VS_REGEX.findall(cfg) if not listener_ip_port: listener_ip_port = V6_VS_REGEX.findall(cfg) listener_ip_port = listener_ip_port[0] if listener_ip_port else [] disabled_resource_ids = DISABLED_CONFIG_COMMENT_REGEX.findall(cfg) listener_disabled = any(True for resource in disabled_resource_ids if resource[0] == 'Listener') if listener_disabled: return None, ns_name if not listener_ip_port: # If not get listener_ip_port from the lvs config file, # that means the udp listener's default pool have no enabled member # yet. But at this moment, we can get listener_id and ns_name, so # for this function, we will just return ns_name return resource_ipport_mapping, ns_name cfg_line = cfg.split('\n') rs_ip_port_list = [] for line in cfg_line: if 'real_server' in line: res = V4_RS_REGEX.findall(line) if not res: res = V6_RS_REGEX.findall(line) rs_ip_port_list.append(res[0]) resource_type_ids = CONFIG_COMMENT_REGEX.findall(cfg) for resource_type, resource_id in resource_type_ids: value = {'id': resource_id} if resource_type == 'Member': resource_type = '%ss' % resource_type if resource_type not in resource_ipport_mapping: value = [value] if resource_type not in resource_ipport_mapping: resource_ipport_mapping[resource_type] = value elif resource_type == 'Members': resource_ipport_mapping[resource_type].append(value) disabled_member_ids = [ resource[1] for resource in disabled_resource_ids if resource[0] == 'Member' ] resource_type = 'Members' for member_id in disabled_member_ids: value = {'id': member_id, 'ipport': None} if resource_type not in resource_ipport_mapping: resource_ipport_mapping[resource_type] = [] resource_ipport_mapping[resource_type].append(value) if rs_ip_port_list: rs_ip_port_count = len(rs_ip_port_list) for index in range(rs_ip_port_count): member_ip = ipaddress.ip_address( rs_ip_port_list[index][0]) if member_ip.version == 6: rs_ip_port_list[index] = ( '[' + member_ip.compressed + ']', rs_ip_port_list[index][1]) resource_ipport_mapping['Members'][index]['ipport'] = ( rs_ip_port_list[index][0] + ':' + rs_ip_port_list[index][1]) listener_ip = ipaddress.ip_address(listener_ip_port[0]) if listener_ip.version == 6: listener_ip_port = ( '[' + listener_ip.compressed + ']', listener_ip_port[1]) resource_ipport_mapping['Listener']['ipport'] = ( listener_ip_port[0] + ':' + listener_ip_port[1]) return resource_ipport_mapping, ns_name def get_udp_listener_pool_status(listener_id): (resource_ipport_mapping, ns_name) = get_udp_listener_resource_ipports_nsname(listener_id) if 'Pool' not in resource_ipport_mapping: return {} if 'Members' not in resource_ipport_mapping: return {'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], 'status': constants.UP, 'members': {} }} config_path = util.keepalived_lvs_cfg_path(listener_id) pids_pathes = util.keepalived_lvs_pids_path(listener_id) config_stat = os.stat(config_path) check_pid_stat = os.stat(pids_pathes[2]) # Indicates that keepalived configuration has been updated but the service # has yet to be restarted. # NOTE: It only works if we are doing a RESTART on configuration change, # Iaa34db6cb1dfed98e96a585c5d105e263c7efa65 forces a RESTART instead of a # RELOAD, we need to be careful if we want to switch back to RELOAD after # updating to a recent keepalived release. restarting = config_stat.st_mtime > check_pid_stat.st_mtime with open(util.keepalived_lvs_cfg_path(listener_id), 'r') as f: cfg = f.read() hm_enabled = len(CHECKER_REGEX.findall(cfg)) > 0 _, realserver_result = get_listener_realserver_mapping( ns_name, resource_ipport_mapping['Listener']['ipport'], hm_enabled) pool_status = constants.UP member_results = {} if realserver_result: member_ip_port_list = [ member['ipport'] for member in resource_ipport_mapping['Members']] down_member_ip_port_set = set( member_ip_port_list) - set(list(realserver_result.keys())) for member_ip_port in member_ip_port_list: member_id = None for member in resource_ipport_mapping['Members']: if member['ipport'] == member_ip_port: member_id = member['id'] if member_ip_port is None: status = constants.MAINT elif member_ip_port in down_member_ip_port_set: status = ( constants.RESTARTING if restarting else constants.DOWN) elif int(realserver_result[member_ip_port]['Weight']) == 0: status = constants.DRAIN else: status = realserver_result[member_ip_port]['status'] if member_id: member_results[member_id] = status else: if hm_enabled: pool_status = constants.DOWN for member in resource_ipport_mapping['Members']: if member['ipport'] is None: member_results[member['id']] = constants.MAINT elif hm_enabled: member_results[member['id']] = ( constants.RESTARTING if restarting else constants.DOWN) else: member_results[member['id']] = constants.NO_CHECK return { 'lvs': { 'uuid': resource_ipport_mapping['Pool']['id'], 'status': pool_status, 'members': member_results } } def get_ipvsadm_info(ns_name, is_stats_cmd=False): cmd_list = ['ip', 'netns', 'exec', ns_name, 'ipvsadm', '-Ln'] # use --exact to ensure output is integer only if is_stats_cmd: cmd_list += ['--stats', '--exact'] output = subprocess.check_output(cmd_list, stderr=subprocess.STDOUT) if isinstance(output, bytes): output = output.decode('utf-8') output = output.split('\n') fields = [] # mapping = {'listeneripport': {'Linstener': vs_values, # 'members': [rs_values1, rs_values2]}} last_key = None value_mapping = dict() output_line_num = len(output) def split_line(line): return re.sub(r'\s+', ' ', line.strip()).split(' ') for line_num in range(output_line_num): # ipvsadm -Ln if 'Flags' in output[line_num]: fields = split_line(output[line_num]) elif fields and 'Flags' in fields and fields.index('Flags') == len( fields) - 1: fields.extend(split_line(output[line_num])) # ipvsadm -Ln --stats elif 'Prot' in output[line_num]: fields = split_line(output[line_num]) elif 'RemoteAddress' in output[line_num]: start = fields.index('LocalAddress:Port') + 1 temp_fields = fields[start:] fields.extend(split_line(output[line_num])) fields.extend(temp_fields) # here we get the all fields elif constants.PROTOCOL_UDP in output[line_num]: # if UDP/TCP in this line, we can know this line is # VS configuration. vs_values = split_line(output[line_num]) for value in vs_values: if ':' in value: value_mapping[value] = {'Listener': vs_values, 'Members': []} last_key = value break # here the line must be a RS which belongs to a VS elif '->' in output[line_num] and last_key: rs_values = split_line(output[line_num]) rs_values.remove('->') value_mapping[last_key]['Members'].append(rs_values) index = fields.index('->') vs_fields = fields[:index] if 'Flags' in vs_fields: vs_fields.remove('Flags') rs_fields = fields[index + 1:] for key in list(value_mapping.keys()): value_mapping[key]['Listener'] = list( zip(vs_fields, value_mapping[key]['Listener'])) member_res = [] for member_value in value_mapping[key]['Members']: member_res.append(list(zip(rs_fields, member_value))) value_mapping[key]['Members'] = member_res return value_mapping def get_udp_listeners_stats(): udp_listener_ids = util.get_udp_listeners() need_check_listener_ids = [ listener_id for listener_id in udp_listener_ids if util.is_udp_listener_running(listener_id)] ipport_mapping = dict() listener_stats_res = dict() for check_listener_id in need_check_listener_ids: # resource_ipport_mapping = {'Listener': {'id': listener-id, # 'ipport': ipport}, # 'Pool': {'id': pool-id}, # 'Members': [{'id': member-id-1, # 'ipport': ipport}, # {'id': member-id-2, # 'ipport': ipport}], # 'HealthMonitor': {'id': healthmonitor-id}} (resource_ipport_mapping, ns_name) = get_udp_listener_resource_ipports_nsname(check_listener_id) # Listener is disabled, we don't need to send an update if resource_ipport_mapping is None: continue # Since we found the keepalived running, acknowledge the listener # in the heartbeat. If this listener has a pool and members, # the stats will be updated later in the code flow. listener_stats_res.update({ check_listener_id: { 'stats': { 'bout': 0, 'bin': 0, 'scur': 0, 'stot': 0, 'ereq': 0}, 'status': constants.OPEN}}) # If we can not read the lvs configuration from file, that means # the pool of this listener may own zero enabled member, but the # keepalived process is running. So we need to skip it. if not resource_ipport_mapping: continue ipport_mapping.update({check_listener_id: resource_ipport_mapping}) # So here, if we can not get any ipport_mapping, # we do nothing, just return if not ipport_mapping: return listener_stats_res # contains bout, bin, scur, stot, ereq, status # bout(OutBytes), bin(InBytes), stot(Conns) from cmd ipvsadm -Ln --stats # scur(ActiveConn) from cmd ipvsadm -Ln # status, can see configuration in any cmd, treat it as OPEN # ereq is still 0, as UDP case does not support it. scur_res = get_ipvsadm_info(constants.AMPHORA_NAMESPACE) stats_res = get_ipvsadm_info(constants.AMPHORA_NAMESPACE, is_stats_cmd=True) for listener_id, ipport in ipport_mapping.items(): listener_ipport = ipport['Listener']['ipport'] # This would be in Error, wait for the next loop to sync for the # listener at this moment. Also this is for skip the case no enabled # member in UDP listener, so we don't check it for failover. if listener_ipport not in scur_res or listener_ipport not in stats_res: continue scur, bout, bin, stot, ereq = 0, 0, 0, 0, 0 # As all results contain this listener, so its status should be OPEN status = constants.OPEN # Get scur for m in scur_res[listener_ipport]['Members']: for item in m: if item[0] == 'ActiveConn': scur += int(item[1]) # Get bout, bin, stot for item in stats_res[listener_ipport]['Listener']: if item[0] == 'Conns': stot = int(item[1]) elif item[0] == 'OutBytes': bout = int(item[1]) elif item[0] == 'InBytes': bin = int(item[1]) listener_stats_res.update({ listener_id: { 'stats': { 'bout': bout, 'bin': bin, 'scur': scur, 'stot': stot, 'ereq': ereq}, 'status': status}}) return listener_stats_res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/utils/network_namespace.py0000664000175000017500000000351300000000000025407 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ctypes import os class NetworkNamespace(object): """A network namespace context manager. Runs wrapped code inside the specified network namespace. :param netns: The network namespace name to enter. """ # from linux/sched.h - We want to enter a network namespace CLONE_NEWNET = 0x40000000 @staticmethod def _error_handler(result, func, arguments): if result == -1: errno = ctypes.get_errno() raise OSError(errno, os.strerror(errno)) def __init__(self, netns): self.current_netns = '/proc/{pid}/ns/net'.format(pid=os.getpid()) self.target_netns = '/var/run/netns/{netns}'.format(netns=netns) # reference: man setns(2) self.set_netns = ctypes.CDLL('libc.so.6', use_errno=True).setns self.set_netns.errcheck = self._error_handler def __enter__(self): # Save the current network namespace self.current_netns_fd = open(self.current_netns) with open(self.target_netns) as fd: self.set_netns(fd.fileno(), self.CLONE_NEWNET) def __exit__(self, *args): # Return to the previous network namespace self.set_netns(self.current_netns_fd.fileno(), self.CLONE_NEWNET) self.current_netns_fd.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/utils/network_utils.py0000664000175000017500000000723100000000000024614 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import pyroute2 from octavia.common import exceptions def _find_interface(ip_address, rtnl_api, normalized_addr): """Find the interface using a routing netlink API. :param ip_address: The IP address to search with. :param rtnl_api: A pyroute2 rtnl_api instance. (IPRoute, NetNS, etc.) :returns: The interface name if found, None if not found. :raises exceptions.InvalidIPAddress: Invalid IP address provided. """ for addr in rtnl_api.get_addr(address=ip_address): # Save the interface index as IPv6 records don't list a textual # interface interface_idx = addr['index'] # Search through the attributes of each address record for attr in addr['attrs']: # Look for the attribute name/value pair for the address if attr[0] == 'IFA_ADDRESS': # Compare the normalized address with the address we are # looking for. Since we have matched the name above, attr[1] # is the address value if normalized_addr == ipaddress.ip_address(attr[1]).compressed: # Lookup the matching interface name by getting the # interface with the index we found in the above address # search lookup_int = rtnl_api.get_links(interface_idx) # Search through the attributes of the matching interface # record for int_attr in lookup_int[0]['attrs']: # Look for the attribute name/value pair that includes # the interface name if int_attr[0] == 'IFLA_IFNAME': # Return the matching interface name that is in # int_attr[1] for the matching interface attribute # name return int_attr[1] # We didn't find an interface with that IP address. return None def get_interface_name(ip_address, net_ns=None): """Gets the interface name from an IP address. :param ip_address: The IP address to lookup. :param net_ns: The network namespace to find the interface in. :returns: The interface name. :raises exceptions.InvalidIPAddress: Invalid IP address provided. :raises octavia.common.exceptions.NotFound: No interface was found. """ # We need to normalize the address as IPv6 has multiple representations # fe80:0000:0000:0000:f816:3eff:fef2:2058 == fe80::f816:3eff:fef2:2058 try: normalized_addr = ipaddress.ip_address(ip_address).compressed except ValueError: raise exceptions.InvalidIPAddress(ip_addr=ip_address) if net_ns: with pyroute2.NetNS(net_ns) as rtnl_api: interface = _find_interface(ip_address, rtnl_api, normalized_addr) else: with pyroute2.IPRoute() as rtnl_api: interface = _find_interface(ip_address, rtnl_api, normalized_addr) if interface is not None: return interface raise exceptions.NotFound(resource='IP address', id=ip_address) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/backends/utils/udp_check.sh0000664000175000017500000000041200000000000023604 0ustar00zuulzuul00000000000000#!/bin/bash nc_cmd=`which nc` nc_flavor=$($nc_cmd --version 2>&1 | grep -o nmap) case "$nc_flavor" in nmap) nc_flavor_opts="-i1" ;; *) # default, probably openbsd nc_flavor_opts="-w1" ;; esac $nc_cmd -uzv $nc_flavor_opts $1 $2 > /dev/null exit $? ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/driver_exceptions/0000775000175000017500000000000000000000000022150 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/driver_exceptions/__init__.py0000664000175000017500000000107400000000000024263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/driver_exceptions/exceptions.py0000664000175000017500000000634200000000000024710 0ustar00zuulzuul00000000000000# Copyright 2011-2014 OpenStack Foundation,author: Min Wang,German Eichberger # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import excutils from octavia.i18n import _ class AmphoraDriverError(Exception): message = _("A super class for all other exceptions and the catch.") def __init__(self, **kwargs): try: super(AmphoraDriverError, self).__init__(self.message % kwargs) self.msg = self.message % kwargs except Exception: with excutils.save_and_reraise_exception() as ctxt: if not self.use_fatal_exceptions(): ctxt.reraise = False # at least get the core message out if something happened super(AmphoraDriverError, self).__init__(self.message) def __unicode__(self): return self.msg @staticmethod def use_fatal_exceptions(): """Return True if use fatal exceptions by raising them.""" return False class NotFoundError(AmphoraDriverError): message = _('this amphora couldn\'t be found') class InfoException(AmphoraDriverError): message = _('gathering information about this amphora failed') class MetricsException(AmphoraDriverError): message = _('gathering metrics failed') class UnauthorizedException(AmphoraDriverError): message = _('the driver can\'t access the amphora') class StatisticsException(AmphoraDriverError): message = _('gathering statistics failed') class TimeOutException(AmphoraDriverError): message = _('contacting the amphora timed out') class DeleteFailed(AmphoraDriverError): message = _('this load balancer couldn\'t be deleted') class SuspendFailed(AmphoraDriverError): message = _('this load balancer couldn\'t be suspended') class EnableFailed(AmphoraDriverError): message = _('this load balancer couldn\'t be enabled') class ArchiveException(AmphoraDriverError): message = _('couldn\'t archive the logs') class ProvisioningErrors(AmphoraDriverError): message = _('Super class for provisioning amphora errors') class ListenerProvisioningError(ProvisioningErrors): message = _('couldn\'t provision Listener') class LoadBalancerProvisoningError(ProvisioningErrors): message = _('couldn\'t provision LoadBalancer') class HealthMonitorProvisioningError(ProvisioningErrors): message = _('couldn\'t provision HealthMonitor') class NodeProvisioningError(ProvisioningErrors): message = _('couldn\'t provision Node') class AmpDriverNotImplementedError(AmphoraDriverError): message = _('Amphora does not implement this feature.') class AmpConnectionRetry(AmphoraDriverError): message = _('Could not connect to amphora, exception caught: ' '%(exception)s') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/drivers/0000775000175000017500000000000000000000000020072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/__init__.py0000664000175000017500000000107400000000000022205 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/driver_base.py0000664000175000017500000003223600000000000022737 0ustar00zuulzuul00000000000000# Copyright 2011-2014 OpenStack Foundation,author: Min Wang,German Eichberger # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta): @abc.abstractmethod def update_amphora_listeners(self, loadbalancer, amphora, timeout_dict): """Update the amphora with a new configuration. :param loadbalancer: List of listeners to update. :type loadbalancer: list(octavia.db.models.Listener) :param amphora: The index of the specific amphora to update :type amphora: octavia.db.models.Amphora :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval :type timeout_dict: dict :returns: None Builds a new configuration, pushes it to the amphora, and reloads the listener on one amphora. """ @abc.abstractmethod def update(self, loadbalancer): """Update the amphora with a new configuration. :param loadbalancer: loadbalancer object, need to use its vip.ip_address property :type loadbalancer: octavia.db.models.LoadBalancer :returns: None At this moment, we just build the basic structure for testing, will add more function along with the development. """ @abc.abstractmethod def start(self, loadbalancer, amphora, timeout_dict=None): """Start the listeners on the amphora. :param loadbalancer: loadbalancer object to start listeners :type loadbalancer: octavia.db.models.LoadBalancer :param amphora: Amphora to start. If None, start on all amphora :type amphora: octavia.db.models.Amphora :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval :type timeout_dict: dict :returns: return a value list (listener, vip, status flag--enable) At this moment, we just build the basic structure for testing, will add more function along with the development. """ @abc.abstractmethod def reload(self, loadbalancer, amphora, timeout_dict=None): """Reload the listeners on the amphora. :param loadbalancer: loadbalancer object to reload listeners :type loadbalancer: octavia.db.models.LoadBalancer :param amphora: Amphora to start. If None, reload on all amphora :type amphora: octavia.db.models.Amphora :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval :type timeout_dict: dict :returns: return a value list (listener, vip, status flag--enable) At this moment, we just build the basic structure for testing, will add more function along with the development. """ @abc.abstractmethod def delete(self, listener): """Delete the listener on the vip. :param listener: listener object, need to use its protocol_port property :type listener: octavia.db.models.Listener :returns: return a value list (listener, vip, status flag--delete) At this moment, we just build the basic structure for testing, will add more function along with the development. """ @abc.abstractmethod def get_info(self, amphora, raise_retry_exception=False): """Returns information about the amphora. :param amphora: amphora object, need to use its id property :type amphora: octavia.db.models.Amphora :param raise_retry_exception: Flag if outside task should be retried :type boolean: False by default :returns: return a value list (amphora.id, status flag--'info') At this moment, we just build the basic structure for testing, will add more function along with the development, eventually, we want it to return information as: {"Rest Interface": "1.0", "Amphorae": "1.0", "packages":{"ha proxy":"1.5"}} some information might come from querying the amphora """ @abc.abstractmethod def get_diagnostics(self, amphora): """Return ceilometer ready diagnostic data. :param amphora: amphora object, need to use its id property :type amphora: octavia.db.models.Amphora :returns: return a value list (amphora.id, status flag--'ge t_diagnostics') At this moment, we just build the basic structure for testing, will add more function along with the development, eventually, we want it run some expensive self tests to determine if the amphora and the lbs are healthy the idea is that those tests are triggered more infrequent than the health gathering. """ @abc.abstractmethod def finalize_amphora(self, amphora): """Finalize the amphora before any listeners are configured. :param amphora: amphora object, need to use its id property :type amphora: octavia.db.models.Amphora :returns: None At this moment, we just build the basic structure for testing, will add more function along with the development. This is a hook for drivers who need to do additional work before an amphora becomes ready to accept listeners. Please keep in mind that amphora might be kept in an offline pool after this call. """ def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, vrrp_port=None, vip_subnet=None): """Called after network driver has allocated and plugged the VIP :param amphora: :type amphora: octavia.db.models.Amphora :param load_balancer: A load balancer that just had its vip allocated and plugged in the network driver. :type load_balancer: octavia.common.data_models.LoadBalancer :param amphorae_network_config: A data model containing information about the subnets and ports that an amphorae owns. :param vrrp_port: VRRP port associated with the load balancer :type vrrp_port: octavia.network.data_models.Port :param vip_subnet: VIP subnet associated with the load balancer :type vip_subnet: octavia.network.data_models.Subnet :type vip_network: octavia.network.data_models.AmphoraNetworkConfig :returns: None This is to do any additional work needed on the amphorae to plug the vip, such as bring up interfaces. """ def post_network_plug(self, amphora, port): """Called after amphora added to network :param amphora: amphora object, needs id and network ip(s) :type amphora: octavia.db.models.Amphora :param port: contains information of the plugged port :type port: octavia.network.data_models.Port This method is optional to implement. After adding an amphora to a network, there may be steps necessary on the amphora to allow it to access said network. Ex: creating an interface on an amphora for a neutron network to utilize. """ def start_health_check(self, health_mixin): """Start health checks. :param health_mixin: health mixin object :type health_mixin: HealthMixin Starts listener process and calls HealthMixin to update databases information. """ def stop_health_check(self): """Stop health checks. Stops listener process and calls HealthMixin to update databases information. """ def upload_cert_amp(self, amphora, pem_file): """Upload cert info to the amphora. :param amphora: amphora object, needs id and network ip(s) :type amphora: octavia.db.models.Amphora :param pem_file: a certificate file :type pem_file: file object Upload cert file to amphora for Controller Communication. """ def update_amphora_agent_config(self, amphora, agent_config): """Upload and update the amphora agent configuration. :param amphora: amphora object, needs id and network ip(s) :type amphora: octavia.db.models.Amphora :param agent_config: The new amphora agent configuration file. :type agent_config: string """ @abc.abstractmethod def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): """Get the interface name from an IP address. :param amphora: The amphora to query. :type amphora: octavia.db.models.Amphora :param ip_address: The IP address to lookup. (IPv4 or IPv6) :type ip_address: string :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval :type timeout_dict: dict """ class HealthMixin(object, metaclass=abc.ABCMeta): @abc.abstractmethod def update_health(self, health): """Return ceilometer ready health :param health: health information emitted from the amphora :type health: bool :returns: return health At this moment, we just build the basic structure for testing, will add more function along with the development, eventually, we want it return: map: {"amphora-status":HEALTHY, loadbalancers: {"loadbalancer-id": {"loadbalancer-status": HEALTHY, "listeners":{"listener-id":{"listener-status":HEALTHY, "nodes":{"node-id":HEALTHY, ...}}, ...}, ...}} only items whose health has changed need to be submitted awesome update code """ class StatsMixin(object, metaclass=abc.ABCMeta): @abc.abstractmethod def update_stats(self, stats): """Return ceilometer ready stats :param stats: statistic information emitted from the amphora :type stats: string :returns: return stats At this moment, we just build the basic structure for testing, will add more function along with the development, eventually, we want it return: uses map {"loadbalancer-id":{"listener-id": {"bytes-in": 123, "bytes_out":123, "active_connections":123, "total_connections", 123}, ...} elements are named to keep it extsnsible for future versions awesome update code and code to send to ceilometer """ class VRRPDriverMixin(object, metaclass=abc.ABCMeta): """Abstract mixin class for VRRP support in loadbalancer amphorae Usage: To plug VRRP support in another service driver XYZ, use: @plug_mixin(XYZ) class XYZ: ... """ @abc.abstractmethod def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, timeout_dict=None): """Update amphorae of the loadbalancer with a new VRRP configuration :param loadbalancer: loadbalancer object :param amphorae_network_config: amphorae network configurations :param amphora: The amphora object to update. :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval """ @abc.abstractmethod def stop_vrrp_service(self, loadbalancer): """Stop the vrrp services running on the loadbalancer's amphorae :param loadbalancer: loadbalancer object """ @abc.abstractmethod def start_vrrp_service(self, amphora, timeout_dict=None): """Start the VRRP services on the amphora :param amphora: The amphora object to start the service on. :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval """ @abc.abstractmethod def reload_vrrp_service(self, loadbalancer): """Reload the VRRP services of all amphorae of the loadbalancer :param loadbalancer: loadbalancer object """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/drivers/haproxy/0000775000175000017500000000000000000000000021564 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/haproxy/__init__.py0000664000175000017500000000107400000000000023677 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/haproxy/data_models.py0000664000175000017500000000651000000000000024414 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import octavia.common.data_models as models class Topology(models.BaseDataModel): def __init__(self, hostname=None, uuid=None, topology=None, role=None, ip=None, ha_ip=None): self.hostname = hostname self.uuid = uuid self.topology = topology self.role = role self.ip = ip self.ha_ip = ha_ip class Info(models.BaseDataModel): def __init__(self, hostname=None, uuid=None, version=None, api_version=None): self.hostname = hostname self.uuid = uuid self.version = version self.api_version = api_version class Details(models.BaseDataModel): def __init__(self, hostname=None, uuid=None, version=None, api_version=None, network_tx=None, network_rx=None, active=None, haproxy_count=None, cpu=None, memory=None, disk=None, load=None, listeners=None, packages=None): self.hostname = hostname self.uuid = uuid self.version = version self.api_version = api_version self.network_tx = network_tx self.network_rx = network_rx self.active = active self.haproxy_count = haproxy_count self.cpu = cpu self.memory = memory self.disk = disk self.load = load or [] self.listeners = listeners or [] self.packages = packages or [] class CPU(models.BaseDataModel): def __init__(self, total=None, user=None, system=None, soft_irq=None): self.total = total self.user = user self.system = system self.soft_irq = soft_irq class Memory(models.BaseDataModel): def __init__(self, total=None, free=None, available=None, buffers=None, cached=None, swap_used=None, shared=None, slab=None, committed_as=None): self.total = total self.free = free self.available = available self.buffers = buffers self.cached = cached self.swap_used = swap_used self.shared = shared self.slab = slab self.committed_as = committed_as class Disk(models.BaseDataModel): def __init__(self, used=None, available=None): self.used = used self.available = available class ListenerStatus(models.BaseDataModel): def __init__(self, status=None, uuid=None, provisioning_status=None, type=None, pools=None): self.status = status self.uuid = uuid self.provisioning_status = provisioning_status self.type = type self.pools = pools or [] class Pool(models.BaseDataModel): def __init__(self, uuid=None, status=None, members=None): self.uuid = uuid self.status = status self.members = members or [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/haproxy/exceptions.py0000664000175000017500000000445400000000000024326 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from oslo_log import log as logging LOG = logging.getLogger(__name__) def check_exception(response, ignore=tuple(), log_error=True): status_code = response.status_code responses = { 400: InvalidRequest, 401: Unauthorized, 403: Forbidden, 404: NotFound, 405: InvalidRequest, 409: Conflict, 500: InternalServerError, 503: ServiceUnavailable } if (status_code not in ignore) and (status_code in responses): try: if log_error: LOG.error('Amphora agent returned unexpected result code %s ' 'with response %s', status_code, response.json()) except Exception: # Handle the odd case where there is no response body # like when using requests_mock which doesn't support has_body pass raise responses[status_code]() return response class APIException(exc.HTTPClientError): msg = "Something unknown went wrong" code = 500 def __init__(self, **kwargs): self.msg = self.msg % kwargs super(APIException, self).__init__(detail=self.msg) class InvalidRequest(APIException): msg = "Invalid request" code = 400 class Unauthorized(APIException): msg = "Unauthorized" code = 401 class Forbidden(APIException): msg = "Forbidden" code = 403 class NotFound(APIException): msg = "Not Found" code = 404 class Conflict(APIException): msg = "Conflict" code = 409 class InternalServerError(APIException): msg = "Internal Server Error" code = 500 class ServiceUnavailable(APIException): msg = "Service Unavailable" code = 503 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/haproxy/rest_api_driver.py0000664000175000017500000012614600000000000025331 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import hashlib import os import ssl import time import warnings from oslo_context import context as oslo_context from oslo_log import log as logging import requests import simplejson from stevedore import driver as stevedore_driver from octavia.amphorae.driver_exceptions import exceptions as driver_except from octavia.amphorae.drivers import driver_base from octavia.amphorae.drivers.haproxy import exceptions as exc from octavia.amphorae.drivers.keepalived import vrrp_rest_driver from octavia.common.config import cfg from octavia.common import constants as consts import octavia.common.jinja.haproxy.combined_listeners.jinja_cfg as jinja_combo import octavia.common.jinja.haproxy.split_listeners.jinja_cfg as jinja_split from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg from octavia.common.tls_utils import cert_parser from octavia.common import utils from octavia.db import api as db_apis from octavia.db import repositories as repo LOG = logging.getLogger(__name__) API_VERSION = consts.API_VERSION OCTAVIA_API_CLIENT = ( "Octavia HaProxy Rest Client/{version} " "(https://wiki.openstack.org/wiki/Octavia)").format(version=API_VERSION) CONF = cfg.CONF class HaproxyAmphoraLoadBalancerDriver( driver_base.AmphoraLoadBalancerDriver, vrrp_rest_driver.KeepalivedAmphoraDriverMixin): def __init__(self): super(HaproxyAmphoraLoadBalancerDriver, self).__init__() self.clients = { 'base': AmphoraAPIClientBase(), '0.5': AmphoraAPIClient0_5(), '1.0': AmphoraAPIClient1_0(), } self.cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', name=CONF.certificates.cert_manager, invoke_on_load=True, ).driver self.jinja_combo = jinja_combo.JinjaTemplater( base_amp_path=CONF.haproxy_amphora.base_path, base_crt_dir=CONF.haproxy_amphora.base_cert_dir, haproxy_template=CONF.haproxy_amphora.haproxy_template, connection_logging=CONF.haproxy_amphora.connection_logging) self.jinja_split = jinja_split.JinjaTemplater( base_amp_path=CONF.haproxy_amphora.base_path, base_crt_dir=CONF.haproxy_amphora.base_cert_dir, haproxy_template=CONF.haproxy_amphora.haproxy_template, connection_logging=CONF.haproxy_amphora.connection_logging) self.udp_jinja = jinja_udp_cfg.LvsJinjaTemplater() def _get_haproxy_versions(self, amphora): """Get major and minor version number from haproxy Example: ['1', '6'] :returns version_list: A list with the major and minor numbers """ self._populate_amphora_api_version(amphora) amp_info = self.clients[amphora.api_version].get_info(amphora) haproxy_version_string = amp_info['haproxy_version'] return haproxy_version_string.split('.')[:2] def _populate_amphora_api_version(self, amphora, timeout_dict=None, raise_retry_exception=False): """Populate the amphora object with the api_version This will query the amphora for version discovery and populate the api_version string attribute on the amphora object. :returns: None """ if not getattr(amphora, 'api_version', None): try: amphora.api_version = self.clients['base'].get_api_version( amphora, timeout_dict=timeout_dict, raise_retry_exception=raise_retry_exception)['api_version'] except exc.NotFound: # Amphora is too old for version discovery, default to 0.5 amphora.api_version = '0.5' LOG.debug('Amphora %s has API version %s', amphora.id, amphora.api_version) return list(map(int, amphora.api_version.split('.'))) def update_amphora_listeners(self, loadbalancer, amphora, timeout_dict=None): """Update the amphora with a new configuration. :param loadbalancer: The load balancer to update :type loadbalancer: object :param amphora: The amphora to update :type amphora: object :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval :returns: None Updates the configuration of the listeners on a single amphora. """ # if the amphora does not yet have listeners, no need to update them. if not loadbalancer.listeners: LOG.debug('No listeners found to update.') return if amphora is None or amphora.status == consts.DELETED: return # Check which HAProxy version is on the amp haproxy_versions = self._get_haproxy_versions(amphora) # Check which config style to use api_version = self._populate_amphora_api_version(amphora) if api_version[0] == 0 and api_version[1] <= 5: # 0.5 or earlier split_config = True LOG.warning( 'Amphora %s for loadbalancer %s needs upgrade to single ' 'process mode.', amphora.id, loadbalancer.id) else: split_config = False LOG.debug('Amphora %s for loadbalancer %s is already in single ' 'process mode.', amphora.id, loadbalancer.id) has_tcp = False certs = {} listeners_to_update = [] for listener in loadbalancer.listeners: LOG.debug("%s updating listener %s on amphora %s", self.__class__.__name__, listener.id, amphora.id) if listener.protocol == 'UDP': # Generate Keepalived LVS configuration from listener object config = self.udp_jinja.build_config(listener=listener) self.clients[amphora.api_version].upload_udp_config( amphora, listener.id, config, timeout_dict=timeout_dict) self.clients[amphora.api_version].reload_listener( amphora, listener.id, timeout_dict=timeout_dict) else: has_tcp = True if split_config: obj_id = listener.id else: obj_id = loadbalancer.id try: certs.update({ listener.tls_certificate_id: self._process_tls_certificates( listener, amphora, obj_id)['tls_cert']}) certs.update({listener.client_ca_tls_certificate_id: self._process_secret( listener, listener.client_ca_tls_certificate_id, amphora, obj_id)}) certs.update({listener.client_crl_container_id: self._process_secret( listener, listener.client_crl_container_id, amphora, obj_id)}) certs.update(self._process_listener_pool_certs( listener, amphora, obj_id)) if split_config: config = self.jinja_split.build_config( host_amphora=amphora, listener=listener, haproxy_versions=haproxy_versions, client_ca_filename=certs[ listener.client_ca_tls_certificate_id], client_crl=certs[listener.client_crl_container_id], pool_tls_certs=certs) self.clients[amphora.api_version].upload_config( amphora, listener.id, config, timeout_dict=timeout_dict) self.clients[amphora.api_version].reload_listener( amphora, listener.id, timeout_dict=timeout_dict) else: listeners_to_update.append(listener) except Exception as e: LOG.exception('Unable to update listener %s due to ' '"%s". Skipping this listener.', listener.id, str(e)) listener_repo = repo.ListenerRepository() listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=consts.ERROR, operating_status=consts.ERROR) if has_tcp and not split_config: if listeners_to_update: # Generate HaProxy configuration from listener object config = self.jinja_combo.build_config( host_amphora=amphora, listeners=listeners_to_update, tls_certs=certs, haproxy_versions=haproxy_versions) self.clients[amphora.api_version].upload_config( amphora, loadbalancer.id, config, timeout_dict=timeout_dict) self.clients[amphora.api_version].reload_listener( amphora, loadbalancer.id, timeout_dict=timeout_dict) else: # If we aren't updating any listeners, make sure there are # no listeners hanging around. For example if this update # was called from a listener delete. self.clients[amphora.api_version].delete_listener( amphora, loadbalancer.id) def _udp_update(self, listener, vip): LOG.debug("Amphora %s keepalivedlvs, updating " "listener %s, vip %s", self.__class__.__name__, listener.protocol_port, vip.ip_address) for amp in listener.load_balancer.amphorae: if amp.status != consts.DELETED: # Generate Keepalived LVS configuration from listener object self._populate_amphora_api_version(amp) config = self.udp_jinja.build_config(listener=listener) self.clients[amp.api_version].upload_udp_config( amp, listener.id, config) self.clients[amp.api_version].reload_listener( amp, listener.id) def update(self, loadbalancer): for amphora in loadbalancer.amphorae: if amphora.status != consts.DELETED: self.update_amphora_listeners(loadbalancer, amphora) def upload_cert_amp(self, amp, pem): LOG.debug("Amphora %s updating cert in REST driver " "with amphora id %s,", self.__class__.__name__, amp.id) self._populate_amphora_api_version(amp) self.clients[amp.api_version].update_cert_for_rotation(amp, pem) def _apply(self, func_name, loadbalancer, amphora=None, *args): if amphora is None: amphorae = loadbalancer.amphorae else: amphorae = [amphora] for amp in amphorae: if amp.status != consts.DELETED: api_version = self._populate_amphora_api_version(amp) # Check which config style to use if api_version[0] == 0 and api_version[1] <= 5: # 0.5 or earlier LOG.warning( 'Amphora %s for loadbalancer %s needs upgrade to ' 'single process mode.', amp.id, loadbalancer.id) for listener in loadbalancer.listeners: getattr(self.clients[amp.api_version], func_name)( amp, listener.id, *args) else: LOG.debug( 'Amphora %s for loadbalancer %s is already in single ' 'process mode.', amp.id, loadbalancer.id) has_tcp = False for listener in loadbalancer.listeners: if listener.protocol == consts.PROTOCOL_UDP: getattr(self.clients[amp.api_version], func_name)( amp, listener.id, *args) else: has_tcp = True if has_tcp: getattr(self.clients[amp.api_version], func_name)( amp, loadbalancer.id, *args) def reload(self, loadbalancer, amphora=None, timeout_dict=None): self._apply('reload_listener', loadbalancer, amphora, timeout_dict) def start(self, loadbalancer, amphora=None, timeout_dict=None): self._apply('start_listener', loadbalancer, amphora, timeout_dict) def delete(self, listener): # Delete any UDP listeners the old way (we didn't update the way they # are configured) loadbalancer = listener.load_balancer if listener.protocol == consts.PROTOCOL_UDP: for amp in loadbalancer.amphorae: if amp.status != consts.DELETED: self._populate_amphora_api_version(amp) self.clients[amp.api_version].delete_listener( amp, listener.id) return # In case the listener is not UDP, things get more complicated. # We need to do this individually for each amphora in case some are # using split config and others are using combined config. for amp in loadbalancer.amphorae: if amp.status != consts.DELETED: api_version = self._populate_amphora_api_version(amp) # Check which config style to use if api_version[0] == 0 and api_version[1] <= 5: # 0.5 or earlier LOG.warning( 'Amphora %s for loadbalancer %s needs upgrade to ' 'single process mode.', amp.id, loadbalancer.id) self.clients[amp.api_version].delete_listener( amp, listener.id) else: LOG.debug( 'Amphora %s for loadbalancer %s is already in single ' 'process mode.', amp.id, loadbalancer.id) self._combined_config_delete(amp, listener) def _combined_config_delete(self, amphora, listener): # Remove the listener from the listener list on the LB before # passing the whole thing over to update (so it'll actually delete) # In case of amphorae in ACTIVE_STANDBY topology, ensure that we don't # remove an already removed listener. if listener in listener.load_balancer.listeners: listener.load_balancer.listeners.remove(listener) # Check if there's any certs that we need to delete certs = self._process_tls_certificates(listener) certs_to_delete = set() if certs['tls_cert']: certs_to_delete.add(certs['tls_cert'].id) for sni_cert in certs['sni_certs']: certs_to_delete.add(sni_cert.id) # Delete them (they'll be recreated before the reload if they are # needed for other listeners anyway) self._populate_amphora_api_version(amphora) for cert_id in certs_to_delete: self.clients[amphora.api_version].delete_cert_pem( amphora, listener.load_balancer.id, '{id}.pem'.format(id=cert_id)) # See how many non-UDP listeners we have left non_udp_listener_count = len([ 1 for li in listener.load_balancer.listeners if li.protocol != consts.PROTOCOL_UDP]) if non_udp_listener_count > 0: # We have other listeners, so just update is fine. # TODO(rm_work): This is a little inefficient since this duplicates # a lot of the detection logic that has already been done, but it # is probably safer to re-use the existing code-path. self.update_amphora_listeners(listener.load_balancer, amphora) else: # Deleting the last listener, so really do the delete self.clients[amphora.api_version].delete_listener( amphora, listener.load_balancer.id) def get_info(self, amphora, raise_retry_exception=False): self._populate_amphora_api_version( amphora, raise_retry_exception=raise_retry_exception) return self.clients[amphora.api_version].get_info( amphora, raise_retry_exception=raise_retry_exception) def get_diagnostics(self, amphora): pass def finalize_amphora(self, amphora): pass def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, vrrp_port=None, vip_subnet=None): if amphora.status != consts.DELETED: self._populate_amphora_api_version(amphora) if vip_subnet is None: subnet = amphorae_network_config.get(amphora.id).vip_subnet else: subnet = vip_subnet # NOTE(blogan): using the vrrp port here because that # is what the allowed address pairs network driver sets # this particular port to. This does expose a bit of # tight coupling between the network driver and amphora # driver. We will need to revisit this to try and remove # this tight coupling. # NOTE (johnsom): I am loading the vrrp_ip into the # net_info structure here so that I don't break # compatibility with old amphora agent versions. if vrrp_port is None: port = amphorae_network_config.get(amphora.id).vrrp_port mtu = port.network.mtu else: port = vrrp_port mtu = port.network['mtu'] LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s", amphora.vrrp_ip, port.id) host_routes = [{'nexthop': hr.nexthop, 'destination': hr.destination} for hr in subnet.host_routes] net_info = {'subnet_cidr': subnet.cidr, 'gateway': subnet.gateway_ip, 'mac_address': port.mac_address, 'vrrp_ip': amphora.vrrp_ip, 'mtu': mtu, 'host_routes': host_routes} try: self.clients[amphora.api_version].plug_vip( amphora, load_balancer.vip.ip_address, net_info) except exc.Conflict: LOG.warning('VIP with MAC %(mac)s already exists on amphora, ' 'skipping post_vip_plug', {'mac': port.mac_address}) def post_network_plug(self, amphora, port): fixed_ips = [] for fixed_ip in port.fixed_ips: host_routes = [{'nexthop': hr.nexthop, 'destination': hr.destination} for hr in fixed_ip.subnet.host_routes] ip = {'ip_address': fixed_ip.ip_address, 'subnet_cidr': fixed_ip.subnet.cidr, 'host_routes': host_routes} fixed_ips.append(ip) port_info = {'mac_address': port.mac_address, 'fixed_ips': fixed_ips, 'mtu': port.network.mtu} try: self._populate_amphora_api_version(amphora) self.clients[amphora.api_version].plug_network(amphora, port_info) except exc.Conflict: LOG.warning('Network with MAC %(mac)s already exists on amphora, ' 'skipping post_network_plug', {'mac': port.mac_address}) def _process_tls_certificates(self, listener, amphora=None, obj_id=None): """Processes TLS data from the listener. Converts and uploads PEM data to the Amphora API return TLS_CERT and SNI_CERTS """ tls_cert = None sni_certs = [] certs = [] cert_filename_list = [] data = cert_parser.load_certificates_data( self.cert_manager, listener) if data['tls_cert'] is not None: tls_cert = data['tls_cert'] # Note, the first cert is the TLS default cert certs.append(tls_cert) if data['sni_certs']: sni_certs = data['sni_certs'] certs.extend(sni_certs) if amphora and obj_id: for cert in certs: pem = cert_parser.build_pem(cert) md5 = hashlib.md5(pem).hexdigest() # nosec name = '{id}.pem'.format(id=cert.id) cert_filename_list.append( os.path.join( CONF.haproxy_amphora.base_cert_dir, obj_id, name)) self._upload_cert(amphora, obj_id, pem, md5, name) if certs: # Build and upload the crt-list file for haproxy crt_list = "\n".join(cert_filename_list).encode('utf-8') md5 = hashlib.md5(crt_list).hexdigest() # nosec name = '{id}.pem'.format(id=listener.id) self._upload_cert(amphora, obj_id, crt_list, md5, name) return {'tls_cert': tls_cert, 'sni_certs': sni_certs} def _process_secret(self, listener, secret_ref, amphora=None, obj_id=None): """Get the secret from the cert manager and upload it to the amp. :returns: The filename of the secret in the amp. """ if not secret_ref: return None context = oslo_context.RequestContext(project_id=listener.project_id) secret = self.cert_manager.get_secret(context, secret_ref) try: secret = secret.encode('utf-8') except AttributeError: pass md5 = hashlib.md5(secret).hexdigest() # nosec id = hashlib.sha1(secret).hexdigest() # nosec name = '{id}.pem'.format(id=id) if amphora and obj_id: self._upload_cert( amphora, obj_id, pem=secret, md5=md5, name=name) return name def _process_listener_pool_certs(self, listener, amphora, obj_id): # {'POOL-ID': { # 'client_cert': client_full_filename, # 'ca_cert': ca_cert_full_filename, # 'crl': crl_full_filename}} pool_certs_dict = dict() for pool in listener.pools: if pool.id not in pool_certs_dict: pool_certs_dict[pool.id] = self._process_pool_certs( listener, pool, amphora, obj_id) for l7policy in listener.l7policies: if (l7policy.redirect_pool and l7policy.redirect_pool.id not in pool_certs_dict): pool_certs_dict[l7policy.redirect_pool.id] = ( self._process_pool_certs(listener, l7policy.redirect_pool, amphora, obj_id)) return pool_certs_dict def _process_pool_certs(self, listener, pool, amphora, obj_id): pool_cert_dict = dict() # Handle the client cert(s) and key if pool.tls_certificate_id: data = cert_parser.load_certificates_data(self.cert_manager, pool) tls_cert = data['tls_cert'] pem = cert_parser.build_pem(tls_cert) try: pem = pem.encode('utf-8') except AttributeError: pass md5 = hashlib.md5(pem).hexdigest() # nosec name = '{id}.pem'.format(id=tls_cert.id) if amphora and obj_id: self._upload_cert(amphora, obj_id, pem=pem, md5=md5, name=name) pool_cert_dict['client_cert'] = os.path.join( CONF.haproxy_amphora.base_cert_dir, obj_id, name) if pool.ca_tls_certificate_id: name = self._process_secret(listener, pool.ca_tls_certificate_id, amphora, obj_id) pool_cert_dict['ca_cert'] = os.path.join( CONF.haproxy_amphora.base_cert_dir, obj_id, name) if pool.crl_container_id: name = self._process_secret(listener, pool.crl_container_id, amphora, obj_id) pool_cert_dict['crl'] = os.path.join( CONF.haproxy_amphora.base_cert_dir, obj_id, name) return pool_cert_dict def _upload_cert(self, amp, listener_id, pem, md5, name): try: if self.clients[amp.api_version].get_cert_md5sum( amp, listener_id, name, ignore=(404,)) == md5: return except exc.NotFound: pass self.clients[amp.api_version].upload_cert_pem( amp, listener_id, name, pem) def update_amphora_agent_config(self, amphora, agent_config, timeout_dict=None): """Update the amphora agent configuration file. :param amphora: The amphora to update. :type amphora: object :param agent_config: The new amphora agent configuration. :type agent_config: string :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval :returns: None Note: This will mutate the amphora agent config and adopt the new values. """ try: self._populate_amphora_api_version(amphora) self.clients[amphora.api_version].update_agent_config( amphora, agent_config, timeout_dict=timeout_dict) except exc.NotFound: LOG.debug('Amphora %s does not support the update_agent_config ' 'API.', amphora.id) raise driver_except.AmpDriverNotImplementedError() def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): """Get the interface name for an IP address. :param amphora: The amphora to query. :type amphora: octavia.db.models.Amphora :param ip_address: The IP address to lookup. (IPv4 or IPv6) :type ip_address: string :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval :type timeout_dict: dict :returns: None if not found, the interface name string if found. """ try: self._populate_amphora_api_version(amphora, timeout_dict) response_json = self.clients[amphora.api_version].get_interface( amphora, ip_address, timeout_dict, log_error=False) return response_json.get('interface', None) except (exc.NotFound, driver_except.TimeOutException): return None # Check a custom hostname class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter): def cert_verify(self, conn, url, verify, cert): conn.assert_hostname = self.uuid return super(CustomHostNameCheckingAdapter, self).cert_verify(conn, url, verify, cert) def init_poolmanager(self, *pool_args, **pool_kwargs): proto = CONF.amphora_agent.agent_tls_protocol.replace('.', '_') pool_kwargs['ssl_version'] = getattr(ssl, "PROTOCOL_%s" % proto) return super(CustomHostNameCheckingAdapter, self).init_poolmanager(*pool_args, **pool_kwargs) class AmphoraAPIClientBase(object): def __init__(self): super(AmphoraAPIClientBase, self).__init__() self.get = functools.partial(self.request, 'get') self.post = functools.partial(self.request, 'post') self.put = functools.partial(self.request, 'put') self.delete = functools.partial(self.request, 'delete') self.head = functools.partial(self.request, 'head') self.session = requests.Session() self.session.cert = CONF.haproxy_amphora.client_cert self.ssl_adapter = CustomHostNameCheckingAdapter() self.session.mount('https://', self.ssl_adapter) def _base_url(self, ip, api_version=None): if utils.is_ipv6_lla(ip): ip = '[{ip}%{interface}]'.format( ip=ip, interface=CONF.haproxy_amphora.lb_network_interface) elif utils.is_ipv6(ip): ip = '[{ip}]'.format(ip=ip) if api_version: return "https://{ip}:{port}/{version}/".format( ip=ip, port=CONF.haproxy_amphora.bind_port, version=api_version) return "https://{ip}:{port}/".format( ip=ip, port=CONF.haproxy_amphora.bind_port) def request(self, method, amp, path='/', timeout_dict=None, retry_404=True, raise_retry_exception=False, **kwargs): cfg_ha_amp = CONF.haproxy_amphora if timeout_dict is None: timeout_dict = {} req_conn_timeout = timeout_dict.get( consts.REQ_CONN_TIMEOUT, cfg_ha_amp.rest_request_conn_timeout) req_read_timeout = timeout_dict.get( consts.REQ_READ_TIMEOUT, cfg_ha_amp.rest_request_read_timeout) conn_max_retries = timeout_dict.get( consts.CONN_MAX_RETRIES, cfg_ha_amp.connection_max_retries) conn_retry_interval = timeout_dict.get( consts.CONN_RETRY_INTERVAL, cfg_ha_amp.connection_retry_interval) LOG.debug("request url %s", path) _request = getattr(self.session, method.lower()) _url = self._base_url(amp.lb_network_ip, amp.api_version) + path LOG.debug("request url %s", _url) reqargs = { 'verify': CONF.haproxy_amphora.server_ca, 'url': _url, 'timeout': (req_conn_timeout, req_read_timeout), } reqargs.update(kwargs) headers = reqargs.setdefault('headers', {}) headers['User-Agent'] = OCTAVIA_API_CLIENT self.ssl_adapter.uuid = amp.id exception = None # Keep retrying for dummy in range(conn_max_retries): try: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="A true SSLContext object is not available" ) r = _request(**reqargs) LOG.debug('Connected to amphora. Response: %(resp)s', {'resp': r}) content_type = r.headers.get('content-type', '') # Check the 404 to see if it is just that the network in the # amphora is not yet up, in which case retry. # Otherwise return the response quickly. if r.status_code == 404: if not retry_404: raise exc.NotFound() LOG.debug('Got a 404 (content-type: %(content_type)s) -- ' 'connection data: %(content)s', {'content_type': content_type, 'content': r.content}) if content_type.find("application/json") == -1: LOG.debug("Amphora agent not ready.") raise requests.ConnectionError try: json_data = r.json().get('details', '') if 'No suitable network interface found' in json_data: LOG.debug("Amphora network interface not found.") raise requests.ConnectionError except simplejson.JSONDecodeError: # if r.json() fails pass # TODO(rm_work) Should we do something? return r except (requests.ConnectionError, requests.Timeout) as e: exception = e LOG.warning("Could not connect to instance. Retrying.") time.sleep(conn_retry_interval) if raise_retry_exception: # For taskflow persistence cause attribute should # be serializable to JSON. Pass None, as cause exception # is described in the expection message. raise driver_except.AmpConnectionRetry( exception=str(e)) from None LOG.error("Connection retries (currently set to %(max_retries)s) " "exhausted. The amphora is unavailable. Reason: " "%(exception)s", {'max_retries': conn_max_retries, 'exception': exception}) raise driver_except.TimeOutException() def get_api_version(self, amp, timeout_dict=None, raise_retry_exception=False): amp.api_version = None r = self.get(amp, retry_404=False, timeout_dict=timeout_dict, raise_retry_exception=raise_retry_exception) # Handle 404 special as we don't want to log an ERROR on 404 exc.check_exception(r, (404,)) if r.status_code == 404: raise exc.NotFound() return r.json() class AmphoraAPIClient0_5(AmphoraAPIClientBase): def __init__(self): super(AmphoraAPIClient0_5, self).__init__() self.start_listener = functools.partial(self._action, consts.AMP_ACTION_START) self.reload_listener = functools.partial(self._action, consts.AMP_ACTION_RELOAD) self.start_vrrp = functools.partial(self._vrrp_action, consts.AMP_ACTION_START) self.stop_vrrp = functools.partial(self._vrrp_action, consts.AMP_ACTION_STOP) self.reload_vrrp = functools.partial(self._vrrp_action, consts.AMP_ACTION_RELOAD) def upload_config(self, amp, listener_id, config, timeout_dict=None): r = self.put( amp, 'listeners/{amphora_id}/{listener_id}/haproxy'.format( amphora_id=amp.id, listener_id=listener_id), timeout_dict, data=config) return exc.check_exception(r) def _action(self, action, amp, listener_id, timeout_dict=None): r = self.put(amp, 'listeners/{listener_id}/{action}'.format( listener_id=listener_id, action=action), timeout_dict=timeout_dict) return exc.check_exception(r) def upload_cert_pem(self, amp, listener_id, pem_filename, pem_file): r = self.put( amp, 'listeners/{listener_id}/certificates/{filename}'.format( listener_id=listener_id, filename=pem_filename), data=pem_file) return exc.check_exception(r) def get_cert_md5sum(self, amp, listener_id, pem_filename, ignore=tuple()): r = self.get( amp, 'listeners/{listener_id}/certificates/{filename}'.format( listener_id=listener_id, filename=pem_filename)) if exc.check_exception(r, ignore): return r.json().get("md5sum") return None def delete_cert_pem(self, amp, listener_id, pem_filename): r = self.delete( amp, 'listeners/{listener_id}/certificates/{filename}'.format( listener_id=listener_id, filename=pem_filename)) return exc.check_exception(r, (404,)) def update_cert_for_rotation(self, amp, pem_file): r = self.put(amp, 'certificate', data=pem_file) return exc.check_exception(r) def delete_listener(self, amp, listener_id): r = self.delete( amp, 'listeners/{listener_id}'.format(listener_id=listener_id)) return exc.check_exception(r, (404,)) def get_info(self, amp, raise_retry_exception=False): r = self.get(amp, "info", raise_retry_exception=raise_retry_exception) if exc.check_exception(r): return r.json() return None def get_details(self, amp): r = self.get(amp, "details") if exc.check_exception(r): return r.json() return None def get_all_listeners(self, amp): r = self.get(amp, "listeners") if exc.check_exception(r): return r.json() return None def plug_network(self, amp, port): r = self.post(amp, 'plug/network', json=port) return exc.check_exception(r) def plug_vip(self, amp, vip, net_info): r = self.post(amp, 'plug/vip/{vip}'.format(vip=vip), json=net_info) return exc.check_exception(r) def upload_vrrp_config(self, amp, config): r = self.put(amp, 'vrrp/upload', data=config) return exc.check_exception(r) def _vrrp_action(self, action, amp, timeout_dict=None): r = self.put(amp, 'vrrp/{action}'.format(action=action), timeout_dict=timeout_dict) return exc.check_exception(r) def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True): r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr), timeout_dict=timeout_dict) return exc.check_exception(r, log_error=log_error).json() def upload_udp_config(self, amp, listener_id, config, timeout_dict=None): r = self.put( amp, 'listeners/{amphora_id}/{listener_id}/udp_listener'.format( amphora_id=amp.id, listener_id=listener_id), timeout_dict, data=config) return exc.check_exception(r) def update_agent_config(self, amp, agent_config, timeout_dict=None): r = self.put(amp, 'config', timeout_dict, data=agent_config) return exc.check_exception(r) class AmphoraAPIClient1_0(AmphoraAPIClientBase): def __init__(self): super(AmphoraAPIClient1_0, self).__init__() self.start_listener = functools.partial(self._action, consts.AMP_ACTION_START) self.reload_listener = functools.partial(self._action, consts.AMP_ACTION_RELOAD) self.start_vrrp = functools.partial(self._vrrp_action, consts.AMP_ACTION_START) self.stop_vrrp = functools.partial(self._vrrp_action, consts.AMP_ACTION_STOP) self.reload_vrrp = functools.partial(self._vrrp_action, consts.AMP_ACTION_RELOAD) def upload_config(self, amp, loadbalancer_id, config, timeout_dict=None): r = self.put( amp, 'loadbalancer/{amphora_id}/{loadbalancer_id}/haproxy'.format( amphora_id=amp.id, loadbalancer_id=loadbalancer_id), timeout_dict, data=config) return exc.check_exception(r) def get_listener_status(self, amp, listener_id): r = self.get( amp, 'listeners/{listener_id}'.format(listener_id=listener_id)) if exc.check_exception(r): return r.json() return None def _action(self, action, amp, object_id, timeout_dict=None): r = self.put( amp, 'loadbalancer/{object_id}/{action}'.format( object_id=object_id, action=action), timeout_dict=timeout_dict) return exc.check_exception(r) def upload_cert_pem(self, amp, loadbalancer_id, pem_filename, pem_file): r = self.put( amp, 'loadbalancer/{loadbalancer_id}/certificates/{filename}'.format( loadbalancer_id=loadbalancer_id, filename=pem_filename), data=pem_file) return exc.check_exception(r) def get_cert_md5sum(self, amp, loadbalancer_id, pem_filename, ignore=tuple()): r = self.get( amp, 'loadbalancer/{loadbalancer_id}/certificates/{filename}'.format( loadbalancer_id=loadbalancer_id, filename=pem_filename)) if exc.check_exception(r, ignore): return r.json().get("md5sum") return None def delete_cert_pem(self, amp, loadbalancer_id, pem_filename): r = self.delete( amp, 'loadbalancer/{loadbalancer_id}/certificates/{filename}'.format( loadbalancer_id=loadbalancer_id, filename=pem_filename)) return exc.check_exception(r, (404,)) def update_cert_for_rotation(self, amp, pem_file): r = self.put(amp, 'certificate', data=pem_file) return exc.check_exception(r) def delete_listener(self, amp, object_id): r = self.delete( amp, 'listeners/{object_id}'.format(object_id=object_id)) return exc.check_exception(r, (404,)) def get_info(self, amp, raise_retry_exception=False): r = self.get(amp, "info", raise_retry_exception=raise_retry_exception) if exc.check_exception(r): return r.json() return None def get_details(self, amp): r = self.get(amp, "details") if exc.check_exception(r): return r.json() return None def get_all_listeners(self, amp): r = self.get(amp, "listeners") if exc.check_exception(r): return r.json() return None def plug_network(self, amp, port): r = self.post(amp, 'plug/network', json=port) return exc.check_exception(r) def plug_vip(self, amp, vip, net_info): r = self.post(amp, 'plug/vip/{vip}'.format(vip=vip), json=net_info) return exc.check_exception(r) def upload_vrrp_config(self, amp, config): r = self.put(amp, 'vrrp/upload', data=config) return exc.check_exception(r) def _vrrp_action(self, action, amp, timeout_dict=None): r = self.put(amp, 'vrrp/{action}'.format(action=action), timeout_dict=timeout_dict) return exc.check_exception(r) def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True): r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr), timeout_dict=timeout_dict) return exc.check_exception(r, log_error=log_error).json() def upload_udp_config(self, amp, listener_id, config, timeout_dict=None): r = self.put( amp, 'listeners/{amphora_id}/{listener_id}/udp_listener'.format( amphora_id=amp.id, listener_id=listener_id), timeout_dict, data=config) return exc.check_exception(r) def update_agent_config(self, amp, agent_config, timeout_dict=None): r = self.put(amp, 'config', timeout_dict, data=agent_config) return exc.check_exception(r) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/drivers/health/0000775000175000017500000000000000000000000021337 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/health/__init__.py0000664000175000017500000000107400000000000023452 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/health/heartbeat_udp.py0000664000175000017500000001673200000000000024531 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from concurrent import futures import socket import time from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from octavia.amphorae.backends.health_daemon import status_message from octavia.common import exceptions from octavia.db import repositories UDP_MAX_SIZE = 64 * 1024 CONF = cfg.CONF LOG = logging.getLogger(__name__) def update_health(obj, srcaddr): handler = stevedore_driver.DriverManager( namespace='octavia.amphora.health_update_drivers', name=CONF.health_manager.health_update_driver, invoke_on_load=True ).driver handler.update_health(obj, srcaddr) def update_stats(obj, srcaddr): handler = stevedore_driver.DriverManager( namespace='octavia.amphora.stats_update_drivers', name=CONF.health_manager.stats_update_driver, invoke_on_load=True ).driver handler.update_stats(obj, srcaddr) class UDPStatusGetter(object): """This class defines methods that will gather heatbeats The heartbeats are transmitted via UDP and this class will bind to a port and absorb them """ def __init__(self): self.key = cfg.CONF.health_manager.heartbeat_key self.ip = cfg.CONF.health_manager.bind_ip self.port = cfg.CONF.health_manager.bind_port self.sockaddr = None LOG.info('attempting to listen on %(ip)s port %(port)s', {'ip': self.ip, 'port': self.port}) self.sock = None self.update(self.key, self.ip, self.port) self.health_executor = futures.ProcessPoolExecutor( max_workers=CONF.health_manager.health_update_threads) self.stats_executor = futures.ProcessPoolExecutor( max_workers=CONF.health_manager.stats_update_threads) self.repo = repositories.Repositories().amphorahealth def update(self, key, ip, port): """Update the running config for the udp socket server :param key: The hmac key used to verify the UDP packets. String :param ip: The ip address the UDP server will read from :param port: The port the UDP server will read from :return: None """ self.key = key for addrinfo in socket.getaddrinfo(ip, port, 0, socket.SOCK_DGRAM): ai_family = addrinfo[0] self.sockaddr = addrinfo[4] if self.sock is not None: self.sock.close() self.sock = socket.socket(ai_family, socket.SOCK_DGRAM) self.sock.settimeout(1) self.sock.bind(self.sockaddr) if cfg.CONF.health_manager.sock_rlimit > 0: rlimit = cfg.CONF.health_manager.sock_rlimit LOG.info("setting sock rlimit to %s", rlimit) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, rlimit) break # just used the first addr getaddrinfo finds if self.sock is None: raise exceptions.NetworkConfig("unable to find suitable socket") def dorecv(self, *args, **kw): """Waits for a UDP heart beat to be sent. :return: Returns the unwrapped payload and addr that sent the heartbeat. The format of the obj from the UDP sender can be seen below. Note that listener_1 has no pools and listener_4 has no members. Example:: { "listeners": { "listener_uuid_1": { "pools": {}, "status": "OPEN", "stats": { "conns": 0, "rx": 0, "tx": 0 } }, "listener_uuid_2": { "pools": { "pool_uuid_1": { "members": [{ "member_uuid_1": "DOWN" }, { "member_uuid_2": "DOWN" }, { "member_uuid_3": "DOWN" }, { "member_uuid_4": "DOWN" } ] } }, "status": "OPEN", "stats": { "conns": 0, "rx": 0, "tx": 0 } }, "listener_uuid_3": { "pools": { "pool_uuid_2": { "members": [{ "member_uuid_5": "DOWN" }, { "member_uuid_6": "DOWN" }, { "member_uuid_7": "DOWN" }, { "member_uuid_8": "DOWN" } ] } }, "status": "OPEN", "stats": { "conns": 0, "rx": 0, "tx": 0 } }, "listener_uuid_4": { "pools": { "pool_uuid_3": { "members": [] } }, "status": "OPEN", "stats": { "conns": 0, "rx": 0, "tx": 0 } } }, "id": "amphora_uuid", "seq": 1033 } """ (data, srcaddr) = self.sock.recvfrom(UDP_MAX_SIZE) LOG.debug('Received packet from %s', srcaddr) try: obj = status_message.unwrap_envelope(data, self.key) except Exception as e: LOG.warning('Health Manager experienced an exception processing a ' 'heartbeat message from %s. Ignoring this packet. ' 'Exception: %s', srcaddr, str(e)) raise exceptions.InvalidHMACException() obj['recv_time'] = time.time() return obj, srcaddr[0] def check(self): try: obj, srcaddr = self.dorecv() except socket.timeout: # Pass here as this is an expected cycling of the listen socket pass except exceptions.InvalidHMACException: # Pass here as the packet was dropped and logged already pass except Exception as e: LOG.warning('Health Manager experienced an exception processing a ' 'heartbeat packet. Ignoring this packet. ' 'Exception: %s', str(e)) else: self.health_executor.submit(update_health, obj, srcaddr) self.stats_executor.submit(update_stats, obj, srcaddr) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/drivers/keepalived/0000775000175000017500000000000000000000000022203 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/keepalived/__init__.py0000664000175000017500000000107400000000000024316 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/drivers/keepalived/jinja/0000775000175000017500000000000000000000000023276 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/keepalived/jinja/__init__.py0000664000175000017500000000107400000000000025411 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py0000664000175000017500000001130100000000000025556 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import os import jinja2 from oslo_config import cfg from octavia.amphorae.backends.agent.api_server import util from octavia.common import constants KEEPALIVED_TEMPLATE = os.path.abspath( os.path.join(os.path.dirname(__file__), 'templates/keepalived_base.template')) CONF = cfg.CONF class KeepalivedJinjaTemplater(object): def __init__(self, keepalived_template=None): """Keepalived configuration generation :param keepalived_template: Absolute path to keepalived Jinja template """ super(KeepalivedJinjaTemplater, self).__init__() self.keepalived_template = (keepalived_template if keepalived_template else KEEPALIVED_TEMPLATE) self._jinja_env = None def get_template(self, template_file): """Returns the specified Jinja configuration template.""" if not self._jinja_env: template_loader = jinja2.FileSystemLoader( searchpath=os.path.dirname(template_file)) self._jinja_env = jinja2.Environment( autoescape=True, loader=template_loader, trim_blocks=True, lstrip_blocks=True) return self._jinja_env.get_template(os.path.basename(template_file)) def build_keepalived_config(self, loadbalancer, amphora, vip_cidr): """Renders the loadblanacer keepalived configuration for Active/Standby :param loadbalancer: A lodabalancer object :param amp: An amphora object :param vip_cidr: The VIP subnet cidr """ # Note on keepalived configuration: The current base configuration # enforced Master election whenever a high priority VRRP instance # start advertising its presence. Accordingly, the fallback behavior # - which I described in the blueprint - is the default behavior. # Although this is a stable behavior, this can be undesirable for # several backend services. To disable the fallback behavior, we need # to add the "nopreempt" flag in the backup instance section. peers_ips = [] # Validate the VIP address and see if it is IPv6 vip = loadbalancer.vip.ip_address vip_addr = ipaddress.ip_address(vip) vip_ipv6 = vip_addr.version == 6 # Normalize and validate the VIP subnet CIDR vip_network_cidr = None if vip_ipv6: vip_network_cidr = ipaddress.IPv6Network(vip_cidr).with_prefixlen else: vip_network_cidr = ipaddress.IPv4Network(vip_cidr).with_prefixlen for amp in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): if amp.vrrp_ip != amphora.vrrp_ip: peers_ips.append(amp.vrrp_ip) return self.get_template(self.keepalived_template).render( {'vrrp_group_name': loadbalancer.vrrp_group.vrrp_group_name, 'amp_role': amphora.role, 'amp_intf': amphora.vrrp_interface, 'amp_vrrp_id': amphora.vrrp_id, 'amp_priority': amphora.vrrp_priority, 'vrrp_garp_refresh': CONF.keepalived_vrrp.vrrp_garp_refresh_interval, 'vrrp_garp_refresh_repeat': CONF.keepalived_vrrp.vrrp_garp_refresh_count, 'vrrp_auth_type': loadbalancer.vrrp_group.vrrp_auth_type, 'vrrp_auth_pass': loadbalancer.vrrp_group.vrrp_auth_pass, 'amp_vrrp_ip': amphora.vrrp_ip, 'peers_vrrp_ips': peers_ips, 'vip_ip_address': vip, 'advert_int': loadbalancer.vrrp_group.advert_int, 'check_script_path': util.keepalived_check_script_path(), 'vrrp_check_interval': CONF.keepalived_vrrp.vrrp_check_interval, 'vrrp_fail_count': CONF.keepalived_vrrp.vrrp_fail_count, 'vrrp_success_count': CONF.keepalived_vrrp.vrrp_success_count, 'vip_network_cidr': vip_network_cidr, 'vip_ipv6': vip_ipv6}, constants=constants) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/drivers/keepalived/jinja/templates/0000775000175000017500000000000000000000000025274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template0000664000175000017500000000313500000000000032136 0ustar00zuulzuul00000000000000{# # Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} vrrp_script check_script { script {{ check_script_path }} interval {{ vrrp_check_interval }} fall {{ vrrp_fail_count }} rise {{ vrrp_success_count }} } vrrp_instance {{ vrrp_group_name }} { state {{ amp_role }} interface {{ amp_intf }} virtual_router_id {{ amp_vrrp_id }} priority {{ amp_priority }} nopreempt accept garp_master_refresh {{ vrrp_garp_refresh }} garp_master_refresh_repeat {{ vrrp_garp_refresh_repeat }} advert_int {{ advert_int }} authentication { auth_type {{ vrrp_auth_type }} auth_pass {{ vrrp_auth_pass }} } unicast_src_ip {{ amp_vrrp_ip }} unicast_peer { {% for amp_vrrp_ip in peers_vrrp_ips %} {{ amp_vrrp_ip }} {% endfor %} } virtual_ipaddress { {{ vip_ip_address }} } virtual_routes { {{ vip_network_cidr }} dev {{ amp_intf }} src {{ vip_ip_address }} scope link table 1 } virtual_rules { from {{ vip_ip_address }}/{{ '128' if vip_ipv6 else '32' }} table 1 priority 100 } track_script { check_script } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py0000664000175000017500000001112100000000000026152 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from octavia.amphorae.drivers import driver_base from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg from octavia.common import constants LOG = logging.getLogger(__name__) API_VERSION = constants.API_VERSION class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin): def __init__(self): super(KeepalivedAmphoraDriverMixin, self).__init__() # The Mixed class must define a self.client object for the # AmphoraApiClient def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, timeout_dict=None): """Update amphora of the loadbalancer with a new VRRP configuration :param loadbalancer: loadbalancer object :param amphorae_network_config: amphorae network configurations :param amphora: The amphora object to update. :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval """ if amphora.status != constants.AMPHORA_ALLOCATED: LOG.debug('update_vrrp_conf called for un-allocated amphora %s. ' 'Ignoring.', amphora.id) return templater = jinja_cfg.KeepalivedJinjaTemplater() LOG.debug("Update amphora %s VRRP configuration.", amphora.id) self._populate_amphora_api_version(amphora) # Get the VIP subnet prefix for the amphora # For amphorav2 amphorae_network_config will be list of dicts try: vip_cidr = amphorae_network_config[amphora.id].vip_subnet.cidr except AttributeError: vip_cidr = amphorae_network_config[amphora.id][ constants.VIP_SUBNET][constants.CIDR] # Generate Keepalived configuration from loadbalancer object config = templater.build_keepalived_config( loadbalancer, amphora, vip_cidr) self.clients[amphora.api_version].upload_vrrp_config(amphora, config) def stop_vrrp_service(self, loadbalancer): """Stop the vrrp services running on the loadbalancer's amphorae :param loadbalancer: loadbalancer object """ LOG.info("Stop loadbalancer %s amphora VRRP Service.", loadbalancer.id) for amp in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): self._populate_amphora_api_version(amp) self.clients[amp.api_version].stop_vrrp(amp) def start_vrrp_service(self, amphora, timeout_dict=None): """Start the VRRP services on an amphorae. :param amphora: amphora object :param timeout_dict: Dictionary of timeout values for calls to the amphora. May contain: req_conn_timeout, req_read_timeout, conn_max_retries, conn_retry_interval """ if amphora.status != constants.AMPHORA_ALLOCATED: LOG.debug('start_vrrp_service called for un-allocated amphora %s. ' 'Ignoring.', amphora.id) return LOG.info("Start amphora %s VRRP Service.", amphora.id) self._populate_amphora_api_version(amphora) self.clients[amphora.api_version].start_vrrp(amphora, timeout_dict=timeout_dict) def reload_vrrp_service(self, loadbalancer): """Reload the VRRP services of all amphorae of the loadbalancer :param loadbalancer: loadbalancer object """ LOG.info("Reload loadbalancer %s amphora VRRP Service.", loadbalancer.id) for amp in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): self._populate_amphora_api_version(amp) self.clients[amp.api_version].reload_vrrp(amp) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/amphorae/drivers/noop_driver/0000775000175000017500000000000000000000000022420 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/noop_driver/__init__.py0000664000175000017500000000107400000000000024533 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/amphorae/drivers/noop_driver/driver.py0000664000175000017500000002036600000000000024274 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from octavia.amphorae.drivers import driver_base LOG = logging.getLogger(__name__) class LoggingUpdate(object): def update_stats(self, stats): LOG.debug("Amphora %s no-op, update stats %s", self.__class__.__name__, stats) self.stats = stats def update_health(self, health): LOG.debug("Amphora %s no-op, update health %s", self.__class__.__name__, health) self.health = health class NoopManager(object): def __init__(self): super(NoopManager, self).__init__() self.amphoraconfig = {} def update_amphora_listeners(self, loadbalancer, amphora, timeout_dict): amphora_id = amphora.id for listener in loadbalancer.listeners: LOG.debug("Amphora noop driver update_amphora_listeners, " "listener %s, amphora %s, timeouts %s", listener.id, amphora_id, timeout_dict) self.amphoraconfig[(listener.id, amphora_id)] = ( listener, amphora_id, timeout_dict, "update_amp") def update(self, loadbalancer): LOG.debug("Amphora %s no-op, update listener %s, vip %s", self.__class__.__name__, tuple(li.protocol_port for li in loadbalancer.listeners), loadbalancer.vip.ip_address) self.amphoraconfig[ (tuple(li.protocol_port for li in loadbalancer.listeners), loadbalancer.vip.ip_address)] = (loadbalancer.listeners, loadbalancer.vip, 'active') def start(self, loadbalancer, amphora=None, timeout_dict=None): LOG.debug("Amphora %s no-op, start listeners, lb %s, amp %s" "timeouts %s", self.__class__.__name__, loadbalancer.id, amphora, timeout_dict) self.amphoraconfig[ (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, 'start') def reload(self, loadbalancer, amphora=None, timeout_dict=None): LOG.debug("Amphora %s no-op, reload listeners, lb %s, amp %s, " "timeouts %s", self.__class__.__name__, loadbalancer.id, amphora, timeout_dict) self.amphoraconfig[ (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, 'reload') def delete(self, listener): LOG.debug("Amphora %s no-op, delete listener %s, vip %s", self.__class__.__name__, listener.protocol_port, listener.load_balancer.vip.ip_address) self.amphoraconfig[(listener.protocol_port, listener.load_balancer.vip.ip_address)] = ( listener, listener.load_balancer.vip, 'delete') def get_info(self, amphora, raise_retry_exception=False): LOG.debug("Amphora %s no-op, info amphora %s", self.__class__.__name__, amphora.id) self.amphoraconfig[amphora.id] = (amphora.id, 'get_info') def get_diagnostics(self, amphora): LOG.debug("Amphora %s no-op, get diagnostics amphora %s", self.__class__.__name__, amphora.id) self.amphoraconfig[amphora.id] = (amphora.id, 'get_diagnostics') def finalize_amphora(self, amphora): LOG.debug("Amphora %s no-op, finalize amphora %s", self.__class__.__name__, amphora.id) self.amphoraconfig[amphora.id] = (amphora.id, 'finalize amphora') def post_network_plug(self, amphora, port): LOG.debug("Amphora %s no-op, post network plug amphora %s, port %s", self.__class__.__name__, amphora.id, port.id) self.amphoraconfig[amphora.id, port.id] = (amphora.id, port.id, 'post_network_plug') def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, vrrp_port=None, vip_subnet=None): LOG.debug("Amphora %s no-op, post vip plug load balancer %s", self.__class__.__name__, load_balancer.id) self.amphoraconfig[(load_balancer.id, id(amphorae_network_config))] = ( load_balancer.id, amphorae_network_config, 'post_vip_plug') def upload_cert_amp(self, amphora, pem_file): LOG.debug("Amphora %s no-op, upload cert amphora %s,with pem file %s", self.__class__.__name__, amphora.id, pem_file) self.amphoraconfig[amphora.id, pem_file] = (amphora.id, pem_file, 'update_amp_cert_file') def update_amphora_agent_config(self, amphora, agent_config): LOG.debug("Amphora %s no-op, update agent config amphora " "%s, with agent config %s", self.__class__.__name__, amphora.id, agent_config) self.amphoraconfig[amphora.id, agent_config] = ( amphora.id, agent_config, 'update_amphora_agent_config') def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): LOG.debug("Amphora %s no-op, get interface from amphora %s for IP %s", self.__class__.__name__, amphora.id, ip_address) if ip_address == '198.51.100.99': return "noop0" return None class NoopAmphoraLoadBalancerDriver( driver_base.AmphoraLoadBalancerDriver, driver_base.VRRPDriverMixin): def __init__(self): super(NoopAmphoraLoadBalancerDriver, self).__init__() self.driver = NoopManager() def update_amphora_listeners(self, loadbalancer, amphora, timeout_dict): self.driver.update_amphora_listeners(loadbalancer, amphora, timeout_dict) def update(self, loadbalancer): self.driver.update(loadbalancer) def start(self, loadbalancer, amphora=None, timeout_dict=None): self.driver.start(loadbalancer, amphora, timeout_dict) def reload(self, loadbalancer, amphora=None, timeout_dict=None): self.driver.reload(loadbalancer, amphora, timeout_dict) def delete(self, listener): self.driver.delete(listener) def get_info(self, amphora, raise_retry_exception=False): self.driver.get_info(amphora, raise_retry_exception=raise_retry_exception) def get_diagnostics(self, amphora): self.driver.get_diagnostics(amphora) def finalize_amphora(self, amphora): self.driver.finalize_amphora(amphora) def post_network_plug(self, amphora, port): self.driver.post_network_plug(amphora, port) def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, vrrp_port=None, vip_subnet=None): self.driver.post_vip_plug(amphora, load_balancer, amphorae_network_config, vrrp_port=vrrp_port, vip_subnet=vip_subnet) def upload_cert_amp(self, amphora, pem_file): self.driver.upload_cert_amp(amphora, pem_file) def update_amphora_agent_config(self, amphora, agent_config): self.driver.update_amphora_agent_config(amphora, agent_config) def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): return self.driver.get_interface_from_ip(amphora, ip_address, timeout_dict) def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, timeout_dict=None): pass def stop_vrrp_service(self, loadbalancer): pass def start_vrrp_service(self, amphora, timeout_dict=None): pass def reload_vrrp_service(self, loadbalancer): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3742166 octavia-6.2.2/octavia/api/0000775000175000017500000000000000000000000015371 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/__init__.py0000664000175000017500000000107400000000000017504 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/app.py0000664000175000017500000000672000000000000016530 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import keystonemiddleware.audit as audit_middleware from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import cors from oslo_middleware import http_proxy_to_wsgi from oslo_middleware import request_id from pecan import configuration as pecan_configuration from pecan import make_app as pecan_make_app from octavia.api import config as app_config from octavia.api.drivers import driver_factory from octavia.common import constants from octavia.common import exceptions from octavia.common import keystone from octavia.common import service as octavia_service LOG = logging.getLogger(__name__) CONF = cfg.CONF def get_pecan_config(): """Returns the pecan config.""" filename = app_config.__file__.replace('.pyc', '.py') return pecan_configuration.conf_from_file(filename) def _init_drivers(): """Initialize provider drivers.""" for provider in CONF.api_settings.enabled_provider_drivers: driver_factory.get_driver(provider) def setup_app(pecan_config=None, debug=False, argv=None): """Creates and returns a pecan wsgi app.""" if argv is None: argv = sys.argv octavia_service.prepare_service(argv) cfg.CONF.log_opt_values(LOG, logging.DEBUG) _init_drivers() if not pecan_config: pecan_config = get_pecan_config() pecan_configuration.set_config(dict(pecan_config), overwrite=True) return pecan_make_app( pecan_config.app.root, wrap_app=_wrap_app, debug=debug, hooks=pecan_config.app.hooks, wsme=pecan_config.wsme ) def _wrap_app(app): """Wraps wsgi app with additional middlewares.""" app = request_id.RequestId(app) if CONF.audit.enabled: try: app = audit_middleware.AuditMiddleware( app, audit_map_file=CONF.audit.audit_map_file, ignore_req_list=CONF.audit.ignore_req_list ) except (EnvironmentError, OSError, audit_middleware.PycadfAuditApiConfigError) as e: raise exceptions.InputFileError( file_name=CONF.audit.audit_map_file, reason=e ) if cfg.CONF.api_settings.auth_strategy == constants.KEYSTONE: app = keystone.SkippingAuthProtocol(app, {}) app = http_proxy_to_wsgi.HTTPProxyToWSGI(app) # This should be the last middleware in the list (which results in # it being the first in the middleware chain). This is to ensure # that any errors thrown by other middleware, such as an auth # middleware - are annotated with CORS headers, and thus accessible # by the browser. app = cors.CORS(app, cfg.CONF) cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id'], allow_methods=['GET', 'PUT', 'POST', 'DELETE'], expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id'] ) return app ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/common/0000775000175000017500000000000000000000000016661 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/common/__init__.py0000664000175000017500000000107400000000000020774 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/common/hooks.py0000664000175000017500000000243100000000000020356 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pecan import hooks from octavia.api.common import pagination from octavia.common import constants from octavia.common import context class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request.""" def on_route(self, state): context_obj = context.Context.from_environ(state.request.environ) state.request.context['octavia_context'] = context_obj class QueryParametersHook(hooks.PecanHook): def before(self, state): if state.request.method != 'GET': return state.request.context[ constants.PAGINATION_HELPER] = pagination.PaginationHelper( state.request.params.mixed()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/common/pagination.py0000664000175000017500000004114700000000000021373 0ustar00zuulzuul00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools from oslo_log import log as logging from pecan import request import sqlalchemy from sqlalchemy.orm import aliased import sqlalchemy.sql as sa_sql from octavia.api.common import types from octavia.common.config import cfg from octavia.common import constants from octavia.common import exceptions from octavia.db import base_models from octavia.db import models CONF = cfg.CONF LOG = logging.getLogger(__name__) class PaginationHelper(object): """Class helping to interact with pagination functionality Pass this class to `db.repositories` to apply it on query """ _auxiliary_arguments = ('limit', 'marker', 'sort', 'sort_key', 'sort_dir', 'fields', 'page_reverse', ) def __init__(self, params, sort_dir=constants.DEFAULT_SORT_DIR): """Pagination Helper takes params and a default sort direction :param params: Contains the following: limit: maximum number of items to return marker: the last item of the previous page; we return the next results after this value. sort: array of attr by which results should be sorted :param sort_dir: default direction to sort (asc, desc) """ self.marker = params.get('marker') self.sort_dir = self._validate_sort_dir(sort_dir) self.limit = self._parse_limit(params) self.sort_keys = self._parse_sort_keys(params) self.params = params self.filters = None self.page_reverse = params.get('page_reverse', 'False') @staticmethod def _parse_limit(params): if CONF.api_settings.pagination_max_limit == 'infinite': page_max_limit = None else: page_max_limit = int(CONF.api_settings.pagination_max_limit) limit = params.get('limit', page_max_limit) try: # Deal with limit being a string or int meaning 'Unlimited' if limit == 'infinite' or int(limit) < 1: limit = None # If we don't have a max, just use whatever limit is specified elif page_max_limit is None: limit = int(limit) # Otherwise, we need to compare against the max else: limit = min(int(limit), page_max_limit) except ValueError: raise exceptions.InvalidLimit(key=limit) return limit def _parse_sort_keys(self, params): sort_keys_dirs = [] sort = params.get('sort') sort_keys = params.get('sort_key') if sort: for sort_dir_key in sort.split(","): comps = sort_dir_key.split(":") if len(comps) == 1: # Use default sort order sort_keys_dirs.append((comps[0], self.sort_dir)) elif len(comps) == 2: sort_keys_dirs.append( (comps[0], self._validate_sort_dir(comps[1]))) else: raise exceptions.InvalidSortKey(key=comps) elif sort_keys: sort_keys = sort_keys.split(',') sort_dirs = params.get('sort_dir') if not sort_dirs: sort_dirs = [self.sort_dir] * len(sort_keys) else: sort_dirs = sort_dirs.split(',') if len(sort_dirs) < len(sort_keys): sort_dirs += [self.sort_dir] * (len(sort_keys) - len(sort_dirs)) for sk, sd in zip(sort_keys, sort_dirs): sort_keys_dirs.append((sk, self._validate_sort_dir(sd))) return sort_keys_dirs def _parse_marker(self, session, model): return session.query(model).filter_by(id=self.marker).one_or_none() @staticmethod def _get_default_column_value(column_type): """Return the default value of the columns from DB table In postgreDB case, if no right default values are being set, an psycopg2.DataError will be thrown. """ type_schema = { 'datetime': None, 'big_integer': 0, 'integer': 0, 'string': '' } if isinstance(column_type, sa_sql.type_api.Variant): return PaginationHelper._get_default_column_value(column_type.impl) return type_schema[column_type.__visit_name__] @staticmethod def _validate_sort_dir(sort_dir): sort_dir = sort_dir.lower() if sort_dir not in constants.ALLOWED_SORT_DIR: raise exceptions.InvalidSortDirection(key=sort_dir) return sort_dir def _make_links(self, model_list): if CONF.api_settings.api_base_uri: path_url = "{api_base_url}{path}".format( api_base_url=CONF.api_settings.api_base_uri.rstrip('/'), path=request.path) else: path_url = request.path_url links = [] if model_list: prev_attr = ["limit={}".format(self.limit)] if self.params.get('sort'): prev_attr.append("sort={}".format(self.params.get('sort'))) if self.params.get('sort_key'): prev_attr.append("sort_key={}".format( self.params.get('sort_key'))) next_attr = copy.copy(prev_attr) if self.marker: prev_attr.append("marker={}".format(model_list[0].get('id'))) prev_attr.append("page_reverse=True") prev_link = { "rel": "previous", "href": "{url}?{params}".format( url=path_url, params="&".join(prev_attr)) } links.append(prev_link) # TODO(rm_work) Do we need to know when there are more vs exact? # We safely know if we have a full page, but it might include the # last element or it might not, it is unclear if len(model_list) >= self.limit: next_attr.append("marker={}".format(model_list[-1].get('id'))) next_link = { "rel": "next", "href": "{url}?{params}".format( url=path_url, params="&".join(next_attr)) } links.append(next_link) links = [types.PageType(**link) for link in links] return links def _apply_tags_filtering(self, params, model, query): if not getattr(model, "_tags", None): return query if 'tags' in params: tags = params.pop('tags') for tag in tags: # This requires a multi-join to the tags table, # so me must use aliases for each one. tag_alias = aliased(base_models.Tags) query = query.join(tag_alias, model._tags) query = query.filter(tag_alias.tag == tag) if 'tags-any' in params: tags = params.pop('tags-any') tag_alias = aliased(base_models.Tags) query = query.join(tag_alias, model._tags) query = query.filter(tag_alias.tag.in_(tags)) if 'not-tags' in params: tags = params.pop('not-tags') subq = query.session.query(model.id) for tag in tags: tag_alias = aliased(base_models.Tags) subq = subq.join(tag_alias, model._tags) subq = subq.filter(tag_alias.tag == tag) query = query.filter(~model.id.in_(subq)) if 'not-tags-any' in params: tags = params.pop('not-tags-any') query = query.filter( ~model._tags.any(base_models.Tags.tag.in_(tags))) return query @staticmethod def _prepare_tags_list(param): """Split comma seperated tags and return a flat list of tags.""" if not isinstance(param, list): param = [param] return list(itertools.chain.from_iterable( tag.split(',') for tag in param)) def apply(self, query, model, enforce_valid_params=True): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the pagination 'marker'. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting/filtering :param model: the ORM model class :param enforce_valid_params: check for invalid enteries in self.params :rtype: sqlalchemy.orm.query.Query :returns: The query with sorting/pagination/filtering added. """ # Add filtering if CONF.api_settings.allow_filtering: # Exclude (valid) arguments that are not used for data filtering filter_params = {k: v for k, v in self.params.items() if k not in self._auxiliary_arguments} secondary_query_filter = filter_params.pop( "project_id", None) if (model == models.Amphora) else None # Tranlate arguments from API standard to data model's field name filter_params = ( model.__v2_wsme__.translate_dict_keys_to_data_model( filter_params) ) if 'loadbalancer_id' in filter_params: filter_params['load_balancer_id'] = filter_params.pop( 'loadbalancer_id') # Pop the 'tags' related parameters off before handling the # other filters. Then apply the 'tags' filters after the # other filters have been applied. tag_params = {} if 'tags' in filter_params: tag_params['tags'] = self._prepare_tags_list( filter_params.pop('tags')) if 'tags-any' in filter_params: tag_params['tags-any'] = self._prepare_tags_list( filter_params.pop('tags-any')) if 'not-tags' in filter_params: tag_params['not-tags'] = self._prepare_tags_list( filter_params.pop('not-tags')) if 'not-tags-any' in filter_params: tag_params['not-tags-any'] = self._prepare_tags_list( filter_params.pop('not-tags-any')) # Drop invalid arguments self.filters = {k: v for (k, v) in filter_params.items() if k in vars(model.__data_model__())} if enforce_valid_params and ( len(self.filters) < len(filter_params) ): raise exceptions.InvalidFilterArgument() query = model.apply_filter(query, model, self.filters) if secondary_query_filter is not None: query = query.filter(model.load_balancer.has( project_id=secondary_query_filter)) # Apply tags filtering for the models which support tags. query = self._apply_tags_filtering(tag_params, model, query) # Add sorting if CONF.api_settings.allow_sorting: # Add default sort keys (if they are OK for the model) keys_only = [k[0] for k in self.sort_keys] for key in constants.DEFAULT_SORT_KEYS: if key not in keys_only and hasattr(model, key): self.sort_keys.append((key, self.sort_dir)) for current_sort_key, current_sort_dir in self.sort_keys: # Translate sort_key from API standard to data model's name current_sort_key = ( model.__v2_wsme__.translate_key_to_data_model( current_sort_key)) sort_dir_func = { constants.ASC: sqlalchemy.asc, constants.DESC: sqlalchemy.desc, }[current_sort_dir] try: # The translated object may be a nested parameter # such as vip.ip_address, so handle that case by # joining with the nested table. if '.' in current_sort_key: parent, child = current_sort_key.split('.') parent_obj = getattr(model, parent) query = query.join(parent_obj) sort_key_attr = child else: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exceptions.InvalidSortKey(key=current_sort_key) query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if CONF.api_settings.allow_pagination: default = '' # Default to an empty string if NULL if self.marker is not None: marker_object = self._parse_marker(query.session, model) if not marker_object: raise exceptions.InvalidMarker(key=self.marker) marker_values = [] for sort_key, _ in self.sort_keys: v = getattr(marker_object, sort_key) if v is None: v = default marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(len(self.sort_keys)): crit_attrs = [] for j in range(i): model_attr = getattr(model, self.sort_keys[j][0]) default = PaginationHelper._get_default_column_value( model_attr.property.columns[0].type) attr = sa_sql.expression.case( [(model_attr.isnot(None), model_attr), ], else_=default) crit_attrs.append((attr == marker_values[j])) model_attr = getattr(model, self.sort_keys[i][0]) default = PaginationHelper._get_default_column_value( model_attr.property.columns[0].type) attr = sa_sql.expression.case( [(model_attr.isnot(None), model_attr), ], else_=default) this_sort_dir = self.sort_keys[i][1] if this_sort_dir == constants.DESC: if self.page_reverse == "True": crit_attrs.append((attr > marker_values[i])) else: crit_attrs.append((attr < marker_values[i])) elif this_sort_dir == constants.ASC: if self.page_reverse == "True": crit_attrs.append((attr < marker_values[i])) else: crit_attrs.append((attr > marker_values[i])) else: raise exceptions.InvalidSortDirection( key=this_sort_dir) criteria = sa_sql.and_(*crit_attrs) criteria_list.append(criteria) f = sa_sql.or_(*criteria_list) query = query.filter(f) if self.limit is not None: query = query.limit(self.limit) model_list = query.all() links = None if CONF.api_settings.allow_pagination: links = self._make_links(model_list) return model_list, links ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/common/types.py0000664000175000017500000002070200000000000020400 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from dateutil import parser import netaddr from wsme import types as wtypes from octavia.common import exceptions from octavia.common import validate class IPAddressType(wtypes.UserType): basetype = str name = 'ipaddress' @staticmethod def validate(value): """Validates whether value is an IPv4 or IPv6 address.""" try: wtypes.IPv4AddressType.validate(value) return value except ValueError: try: wtypes.IPv6AddressType.validate(value) return value except ValueError: error = 'Value should be IPv4 or IPv6 format' raise ValueError(error) class CidrType(wtypes.UserType): basetype = str name = 'cidr' @staticmethod def validate(value): """Validates whether value is an IPv4 or IPv6 CIDR.""" try: return str(netaddr.IPNetwork(value).cidr) except (ValueError, netaddr.core.AddrFormatError): error = 'Value should be IPv4 or IPv6 CIDR format' raise ValueError(error) class URLType(wtypes.UserType): basetype = str name = 'url' def __init__(self, require_scheme=True): super(URLType, self).__init__() self.require_scheme = require_scheme def validate(self, value): try: validate.url(value, require_scheme=self.require_scheme) except exceptions.InvalidURL: error = 'Value must be a valid URL string' raise ValueError(error) return value class URLPathType(wtypes.UserType): basetype = str name = 'url_path' @staticmethod def validate(value): try: validate.url_path(value) except exceptions.InvalidURLPath: error = 'Value must be a valid URL Path string' raise ValueError(error) return value class BaseMeta(wtypes.BaseMeta): def __new__(cls, name, bases, dct): def get_tenant_id(self): tenant_id = getattr(self, '_tenant_id', wtypes.Unset) # If tenant_id was explicitly set to Unset, return that if tenant_id is wtypes.Unset and self._unset_tenant: return tenant_id # Otherwise, assume we can return project_id return self.project_id def set_tenant_id(self, tenant_id): self._tenant_id = tenant_id if tenant_id is wtypes.Unset: # Record that tenant_id was explicitly Unset self._unset_tenant = True else: # Reset 'unset' state, and update project_id as well self._unset_tenant = False self.project_id = tenant_id if 'project_id' in dct and 'tenant_id' not in dct: dct['tenant_id'] = wtypes.wsproperty( wtypes.StringType(max_length=36), get_tenant_id, set_tenant_id) # This will let us know if tenant_id was explicitly set to Unset dct['_unset_tenant'] = False return super(BaseMeta, cls).__new__(cls, name, bases, dct) class BaseType(wtypes.Base, metaclass=BaseMeta): @classmethod def _full_response(cls): return False @classmethod def from_data_model(cls, data_model, children=False): """Converts data_model to Octavia WSME type. :param data_model: data model to convert from :param children: convert child data models """ type_dict = data_model.to_dict() # We need to have json convertible data for storing it in persistence # jobboard backend. for k, v in type_dict.items(): if ('_at' in k or 'expiration' in k) and v is not None: type_dict[k] = parser.parse(v) if not hasattr(cls, '_type_to_model_map'): return cls(**type_dict) dm_to_type_map = {value: key for key, value in cls._type_to_model_map.items()} new_dict = copy.deepcopy(type_dict) for key, value in type_dict.items(): if isinstance(value, dict): for child_key, child_value in value.items(): if '.'.join([key, child_key]) in dm_to_type_map: new_dict['_'.join([key, child_key])] = child_value elif key in ['name', 'description'] and value is None: new_dict[key] = '' else: if key in dm_to_type_map: new_dict[dm_to_type_map[key]] = value del new_dict[key] return cls(**new_dict) @classmethod def translate_dict_keys_to_data_model(cls, wsme_dict): """Translate the keys from wsme class type, to data_model.""" if not hasattr(cls, '_type_to_model_map'): return wsme_dict res = {} for (k, v) in wsme_dict.items(): if k in cls._type_to_model_map: k = cls._type_to_model_map[k] if '.' in k: parent, child = k.split('.') if parent not in res: res[parent] = {} res[parent][child] = v continue res[k] = v return res @classmethod def translate_key_to_data_model(cls, key): """Translate the keys from wsme class type, to data_model.""" if not hasattr(cls, '_type_to_model_map') or ( key not in cls._type_to_model_map): return key return cls._type_to_model_map[key] def to_dict(self, render_unsets=False): """Converts Octavia WSME type to dictionary. :param render_unsets: If True, will convert items that are WSME Unset types to None. If False, does not add the item """ # Set project_id equal tenant_id if project_id is unset and tenant_id # is if hasattr(self, 'project_id') and hasattr(self, 'tenant_id'): # pylint: disable=access-member-before-definition if (isinstance(self.project_id, wtypes.UnsetType) and not isinstance(self.tenant_id, wtypes.UnsetType)): self.project_id = self.tenant_id if hasattr(self, 'admin_state_up') and getattr( self, 'admin_state_up') is None: # This situation will be hit during request with # admin_state_up is null. If users specify this field to null, # then we treat it as False self.admin_state_up = bool(self.admin_state_up) wsme_dict = {} for attr in dir(self): if attr.startswith('_'): continue value = getattr(self, attr, None) # TODO(blogan): Investigate wsme types handling the duality of # tenant_id and project_id in a clean way. One way could be # wsme.rest.json.fromjson and using the @fromjson.when_object # decorator. if attr == 'tenant_id': continue if value and callable(value): continue if value and isinstance(value, BaseType): value = value.to_dict(render_unsets=render_unsets) if value and isinstance(value, list): value = [val.to_dict(render_unsets=render_unsets) if isinstance(val, BaseType) else val for val in value] if isinstance(value, wtypes.UnsetType): if render_unsets: value = None else: continue wsme_dict[attr] = value return self.translate_dict_keys_to_data_model(wsme_dict) class IdOnlyType(BaseType): id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) class NameOnlyType(BaseType): name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) class PageType(BaseType): href = wtypes.StringType() rel = wtypes.StringType() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/config.py0000664000175000017500000000226700000000000017217 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.api.common import hooks # Pecan Application Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa app = { 'root': 'octavia.api.root_controller.RootController', 'modules': ['octavia.api'], 'hooks': [ hooks.ContextHook(), hooks.QueryParametersHook()], 'debug': False } # WSME Configurations # See https://wsme.readthedocs.org/en/latest/integrate.html#configuration wsme = { # Provider driver uses 501 if the driver is not installed. # Don't dump a stack trace for 501s 'debug': False } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/drivers/0000775000175000017500000000000000000000000017047 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/__init__.py0000664000175000017500000000107400000000000021162 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/drivers/amphora_driver/0000775000175000017500000000000000000000000022051 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/amphora_driver/__init__.py0000664000175000017500000000107400000000000024164 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/amphora_driver/availability_zone_schema.py0000664000175000017500000000435300000000000027455 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace US Inc. All rights reserved. # Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common import constants as consts # This is a JSON schema validation dictionary # https://json-schema.org/latest/json-schema-validation.html # # Note: This is used to generate the amphora driver "supported availability # zone metadata" dictionary. Each property should include a description # for the user to understand what this availability zone setting does. # # Where possible, the property name should match the configuration file name # for the setting. The configuration file setting is the default when a # setting is not defined in an availability zone profile. SUPPORTED_AVAILABILITY_ZONE_SCHEMA = { "$schema": "http://json-schema.org/draft-07/schema#", "title": "Octavia Amphora Driver Availability Zone Metadata Schema", "description": "This schema is used to validate new availability zone " "profiles submitted for use in an amphora driver " "availability zone.", "type": "object", "additionalProperties": False, "properties": { consts.COMPUTE_ZONE: { "type": "string", "description": "The compute availability zone." }, consts.MANAGEMENT_NETWORK: { "type": "string", "description": "The management network ID for the amphora." }, consts.VALID_VIP_NETWORKS: { "type": "array", "description": "List of network IDs that are allowed for VIP use. " "This overrides/replaces the list of allowed " "networks configured in `octavia.conf`." } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/amphora_driver/flavor_schema.py0000664000175000017500000000402000000000000025230 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace US Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common import constants as consts # This is a JSON schema validation dictionary # https://json-schema.org/latest/json-schema-validation.html # # Note: This is used to generate the amphora driver "supported flavor # metadata" dictionary. Each property should include a description # for the user to understand what this flavor setting does. # # Where possible, the property name should match the configuration file name # for the setting. The configuration file setting is the default when a # setting is not defined in a flavor profile. SUPPORTED_FLAVOR_SCHEMA = { "$schema": "http://json-schema.org/draft-07/schema#", "title": "Octavia Amphora Driver Flavor Metadata Schema", "description": "This schema is used to validate new flavor profiles " "submitted for use in an amphora driver flavor profile.", "type": "object", "additionalProperties": False, "properties": { consts.LOADBALANCER_TOPOLOGY: { "type": "string", "description": "The load balancer topology. One of: " "SINGLE - One amphora per load balancer. " "ACTIVE_STANDBY - Two amphora per load balancer.", "enum": list(consts.SUPPORTED_LB_TOPOLOGIES) }, consts.COMPUTE_FLAVOR: { "type": "string", "description": "The compute driver flavor ID." } } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/drivers/amphora_driver/v1/0000775000175000017500000000000000000000000022377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/amphora_driver/v1/__init__.py0000664000175000017500000000107400000000000024512 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/amphora_driver/v1/driver.py0000664000175000017500000005030600000000000024250 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from jsonschema import exceptions as js_exceptions from jsonschema import validate from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from stevedore import driver as stevedore_driver from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions from octavia_lib.api.drivers import provider_base as driver_base from octavia_lib.common import constants as lib_consts from octavia.api.drivers.amphora_driver import availability_zone_schema from octavia.api.drivers.amphora_driver import flavor_schema from octavia.api.drivers import utils as driver_utils from octavia.common import constants as consts from octavia.common import data_models from octavia.common import rpc from octavia.common import utils from octavia.db import api as db_apis from octavia.db import repositories from octavia.network import base as network_base CONF = cfg.CONF CONF.import_group('oslo_messaging', 'octavia.common.config') LOG = logging.getLogger(__name__) AMPHORA_SUPPORTED_LB_ALGORITHMS = [ consts.LB_ALGORITHM_ROUND_ROBIN, consts.LB_ALGORITHM_SOURCE_IP, consts.LB_ALGORITHM_LEAST_CONNECTIONS] class AmphoraProviderDriver(driver_base.ProviderDriver): def __init__(self): super(AmphoraProviderDriver, self).__init__() topic = cfg.CONF.oslo_messaging.topic self.target = messaging.Target( namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT, topic=topic, version="1.0", fanout=False) self.client = rpc.get_client(self.target) self.repositories = repositories.Repositories() def _validate_pool_algorithm(self, pool): if pool.lb_algorithm not in AMPHORA_SUPPORTED_LB_ALGORITHMS: msg = ('Amphora provider does not support %s algorithm.' % pool.lb_algorithm) raise exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) # Load Balancer def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary): vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary) lb_obj = data_models.LoadBalancer(id=loadbalancer_id, project_id=project_id, vip=vip_obj) network_driver = utils.get_network_driver() vip_network = network_driver.get_network( vip_dictionary[lib_consts.VIP_NETWORK_ID]) if not vip_network.port_security_enabled: message = "Port security must be enabled on the VIP network." raise exceptions.DriverError(user_fault_string=message, operator_fault_string=message) try: vip = network_driver.allocate_vip(lb_obj) except network_base.AllocateVIPException as e: message = str(e) if getattr(e, 'orig_msg', None) is not None: message = e.orig_msg raise exceptions.DriverError(user_fault_string=message, operator_fault_string=message) LOG.info('Amphora provider created VIP port %s for load balancer %s.', vip.port_id, loadbalancer_id) return driver_utils.vip_dict_to_provider_dict(vip.to_dict()) # TODO(johnsom) convert this to octavia_lib constant flavor # once octavia is transitioned to use octavia_lib def loadbalancer_create(self, loadbalancer): if loadbalancer.flavor == driver_dm.Unset: loadbalancer.flavor = None if loadbalancer.availability_zone == driver_dm.Unset: loadbalancer.availability_zone = None payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id, consts.FLAVOR: loadbalancer.flavor, consts.AVAILABILITY_ZONE: loadbalancer.availability_zone} self.client.cast({}, 'create_load_balancer', **payload) def loadbalancer_delete(self, loadbalancer, cascade=False): loadbalancer_id = loadbalancer.loadbalancer_id payload = {consts.LOAD_BALANCER_ID: loadbalancer_id, 'cascade': cascade} self.client.cast({}, 'delete_load_balancer', **payload) def loadbalancer_failover(self, loadbalancer_id): payload = {consts.LOAD_BALANCER_ID: loadbalancer_id} self.client.cast({}, 'failover_load_balancer', **payload) def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): # Adapt the provider data model to the queue schema lb_dict = new_loadbalancer.to_dict() if 'admin_state_up' in lb_dict: lb_dict['enabled'] = lb_dict.pop('admin_state_up') lb_id = lb_dict.pop('loadbalancer_id') # Put the qos_policy_id back under the vip element the controller # expects vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None) if vip_qos_policy_id: vip_dict = {"qos_policy_id": vip_qos_policy_id} lb_dict["vip"] = vip_dict payload = {consts.LOAD_BALANCER_ID: lb_id, consts.LOAD_BALANCER_UPDATES: lb_dict} self.client.cast({}, 'update_load_balancer', **payload) # Listener def listener_create(self, listener): payload = {consts.LISTENER_ID: listener.listener_id} self.client.cast({}, 'create_listener', **payload) def listener_delete(self, listener): listener_id = listener.listener_id payload = {consts.LISTENER_ID: listener_id} self.client.cast({}, 'delete_listener', **payload) def listener_update(self, old_listener, new_listener): listener_dict = new_listener.to_dict() if 'admin_state_up' in listener_dict: listener_dict['enabled'] = listener_dict.pop('admin_state_up') listener_id = listener_dict.pop('listener_id') if 'client_ca_tls_container_ref' in listener_dict: listener_dict['client_ca_tls_container_id'] = listener_dict.pop( 'client_ca_tls_container_ref') listener_dict.pop('client_ca_tls_container_data', None) if 'client_crl_container_ref' in listener_dict: listener_dict['client_crl_container_id'] = listener_dict.pop( 'client_crl_container_ref') listener_dict.pop('client_crl_container_data', None) payload = {consts.LISTENER_ID: listener_id, consts.LISTENER_UPDATES: listener_dict} self.client.cast({}, 'update_listener', **payload) # Pool def pool_create(self, pool): self._validate_pool_algorithm(pool) payload = {consts.POOL_ID: pool.pool_id} self.client.cast({}, 'create_pool', **payload) def pool_delete(self, pool): pool_id = pool.pool_id payload = {consts.POOL_ID: pool_id} self.client.cast({}, 'delete_pool', **payload) def pool_update(self, old_pool, new_pool): if new_pool.lb_algorithm: self._validate_pool_algorithm(new_pool) pool_dict = new_pool.to_dict() if 'admin_state_up' in pool_dict: pool_dict['enabled'] = pool_dict.pop('admin_state_up') pool_id = pool_dict.pop('pool_id') if 'tls_container_ref' in pool_dict: pool_dict['tls_certificate_id'] = pool_dict.pop( 'tls_container_ref') pool_dict.pop('tls_container_data', None) if 'ca_tls_container_ref' in pool_dict: pool_dict['ca_tls_certificate_id'] = pool_dict.pop( 'ca_tls_container_ref') pool_dict.pop('ca_tls_container_data', None) if 'crl_container_ref' in pool_dict: pool_dict['crl_container_id'] = pool_dict.pop('crl_container_ref') pool_dict.pop('crl_container_data', None) payload = {consts.POOL_ID: pool_id, consts.POOL_UPDATES: pool_dict} self.client.cast({}, 'update_pool', **payload) # Member def member_create(self, member): pool_id = member.pool_id db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) self._validate_members(db_pool, [member]) payload = {consts.MEMBER_ID: member.member_id} self.client.cast({}, 'create_member', **payload) def member_delete(self, member): member_id = member.member_id payload = {consts.MEMBER_ID: member_id} self.client.cast({}, 'delete_member', **payload) def member_update(self, old_member, new_member): member_dict = new_member.to_dict() if 'admin_state_up' in member_dict: member_dict['enabled'] = member_dict.pop('admin_state_up') member_id = member_dict.pop('member_id') payload = {consts.MEMBER_ID: member_id, consts.MEMBER_UPDATES: member_dict} self.client.cast({}, 'update_member', **payload) def member_batch_update(self, pool_id, members): # The DB should not have updated yet, so we can still use the pool db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) self._validate_members(db_pool, members) old_members = db_pool.members old_member_ids = [m.id for m in old_members] # The driver will always pass objects with IDs. new_member_ids = [m.member_id for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if m.member_id not in old_member_ids: new_members.append(m) else: member_dict = m.to_dict(render_unsets=False) member_dict['id'] = member_dict.pop('member_id') if 'address' in member_dict: member_dict['ip_address'] = member_dict.pop('address') if 'admin_state_up' in member_dict: member_dict['enabled'] = member_dict.pop('admin_state_up') updated_members.append(member_dict) # Find members that are deleted deleted_members = [] for m in old_members: if m.id not in new_member_ids: deleted_members.append(m) payload = {'old_member_ids': [m.id for m in deleted_members], 'new_member_ids': [m.member_id for m in new_members], 'updated_members': updated_members} self.client.cast({}, 'batch_update_members', **payload) def _validate_members(self, db_pool, members): if db_pool.protocol == consts.PROTOCOL_UDP: # For UDP LBs, check that we are not mixing IPv4 and IPv6 for member in members: member_is_ipv6 = utils.is_ipv6(member.address) for listener in db_pool.listeners: lb = listener.load_balancer vip_is_ipv6 = utils.is_ipv6(lb.vip.ip_address) if member_is_ipv6 != vip_is_ipv6: msg = ("This provider doesn't support mixing IPv4 and " "IPv6 addresses for its VIP and members in UDP " "load balancers.") raise exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) # Health Monitor def health_monitor_create(self, healthmonitor): payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id} self.client.cast({}, 'create_health_monitor', **payload) def health_monitor_delete(self, healthmonitor): healthmonitor_id = healthmonitor.healthmonitor_id payload = {consts.HEALTH_MONITOR_ID: healthmonitor_id} self.client.cast({}, 'delete_health_monitor', **payload) def health_monitor_update(self, old_healthmonitor, new_healthmonitor): healthmon_dict = new_healthmonitor.to_dict() if 'admin_state_up' in healthmon_dict: healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up') if 'max_retries_down' in healthmon_dict: healthmon_dict['fall_threshold'] = healthmon_dict.pop( 'max_retries_down') if 'max_retries' in healthmon_dict: healthmon_dict['rise_threshold'] = healthmon_dict.pop( 'max_retries') healthmon_id = healthmon_dict.pop('healthmonitor_id') payload = {consts.HEALTH_MONITOR_ID: healthmon_id, consts.HEALTH_MONITOR_UPDATES: healthmon_dict} self.client.cast({}, 'update_health_monitor', **payload) # L7 Policy def l7policy_create(self, l7policy): payload = {consts.L7POLICY_ID: l7policy.l7policy_id} self.client.cast({}, 'create_l7policy', **payload) def l7policy_delete(self, l7policy): l7policy_id = l7policy.l7policy_id payload = {consts.L7POLICY_ID: l7policy_id} self.client.cast({}, 'delete_l7policy', **payload) def l7policy_update(self, old_l7policy, new_l7policy): l7policy_dict = new_l7policy.to_dict() if 'admin_state_up' in l7policy_dict: l7policy_dict['enabled'] = l7policy_dict.pop('admin_state_up') l7policy_id = l7policy_dict.pop('l7policy_id') payload = {consts.L7POLICY_ID: l7policy_id, consts.L7POLICY_UPDATES: l7policy_dict} self.client.cast({}, 'update_l7policy', **payload) # L7 Rule def l7rule_create(self, l7rule): payload = {consts.L7RULE_ID: l7rule.l7rule_id} self.client.cast({}, 'create_l7rule', **payload) def l7rule_delete(self, l7rule): l7rule_id = l7rule.l7rule_id payload = {consts.L7RULE_ID: l7rule_id} self.client.cast({}, 'delete_l7rule', **payload) def l7rule_update(self, old_l7rule, new_l7rule): l7rule_dict = new_l7rule.to_dict() if 'admin_state_up' in l7rule_dict: l7rule_dict['enabled'] = l7rule_dict.pop('admin_state_up') l7rule_id = l7rule_dict.pop('l7rule_id') payload = {consts.L7RULE_ID: l7rule_id, consts.L7RULE_UPDATES: l7rule_dict} self.client.cast({}, 'update_l7rule', **payload) # Flavor def get_supported_flavor_metadata(self): """Returns the valid flavor metadata keys and descriptions. This extracts the valid flavor metadata keys and descriptions from the JSON validation schema and returns it as a dictionary. :return: Dictionary of flavor metadata keys and descriptions. :raises DriverError: An unexpected error occurred. """ try: props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties'] return {k: v.get('description', '') for k, v in props.items()} except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to get the supported flavor ' 'metadata due to: {}'.format(str(e)), operator_fault_string='Failed to get the supported flavor ' 'metadata due to: {}'.format(str(e))) def validate_flavor(self, flavor_dict): """Validates flavor profile data. This will validate a flavor profile dataset against the flavor settings the amphora driver supports. :param flavor_dict: The flavor dictionary to validate. :type flavor: dict :return: None :raises DriverError: An unexpected error occurred. :raises UnsupportedOptionError: If the driver does not support one of the flavor settings. """ try: validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA) except js_exceptions.ValidationError as e: error_object = '' if e.relative_path: error_object = '{} '.format(e.relative_path[0]) raise exceptions.UnsupportedOptionError( user_fault_string='{0}{1}'.format(error_object, e.message), operator_fault_string=str(e)) except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to validate the flavor metadata ' 'due to: {}'.format(str(e)), operator_fault_string='Failed to validate the flavor metadata ' 'due to: {}'.format(str(e))) compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None) if compute_flavor: compute_driver = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver # TODO(johnsom) Fix this to raise a NotFound error # when the octavia-lib supports it. compute_driver.validate_flavor(compute_flavor) # Availability Zone def get_supported_availability_zone_metadata(self): """Returns the valid availability zone metadata keys and descriptions. This extracts the valid availability zone metadata keys and descriptions from the JSON validation schema and returns it as a dictionary. :return: Dictionary of availability zone metadata keys and descriptions :raises DriverError: An unexpected error occurred. """ try: props = ( availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA[ 'properties']) return {k: v.get('description', '') for k, v in props.items()} except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to get the supported availability ' 'zone metadata due to: {}'.format(str(e)), operator_fault_string='Failed to get the supported ' 'availability zone metadata due to: ' '{}'.format(str(e))) def validate_availability_zone(self, availability_zone_dict): """Validates availability zone profile data. This will validate an availability zone profile dataset against the availability zone settings the amphora driver supports. :param availability_zone_dict: The availability zone dict to validate. :type availability_zone_dict: dict :return: None :raises DriverError: An unexpected error occurred. :raises UnsupportedOptionError: If the driver does not support one of the availability zone settings. """ try: validate( availability_zone_dict, availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA) except js_exceptions.ValidationError as e: error_object = '' if e.relative_path: error_object = '{} '.format(e.relative_path[0]) raise exceptions.UnsupportedOptionError( user_fault_string='{0}{1}'.format(error_object, e.message), operator_fault_string=str(e)) except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to validate the availability zone ' 'metadata due to: {}'.format(str(e)), operator_fault_string='Failed to validate the availability ' 'zone metadata due to: {}'.format(str(e)) ) compute_zone = availability_zone_dict.get(consts.COMPUTE_ZONE, None) if compute_zone: compute_driver = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver # TODO(johnsom) Fix this to raise a NotFound error # when the octavia-lib supports it. compute_driver.validate_availability_zone(compute_zone) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/drivers/amphora_driver/v2/0000775000175000017500000000000000000000000022400 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/amphora_driver/v2/__init__.py0000664000175000017500000000107400000000000024513 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/amphora_driver/v2/driver.py0000664000175000017500000005325500000000000024257 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography import fernet from jsonschema import exceptions as js_exceptions from jsonschema import validate from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from stevedore import driver as stevedore_driver from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions from octavia_lib.api.drivers import provider_base as driver_base from octavia_lib.common import constants as lib_consts from octavia.api.drivers.amphora_driver import availability_zone_schema from octavia.api.drivers.amphora_driver import flavor_schema from octavia.api.drivers import utils as driver_utils from octavia.common import constants as consts from octavia.common import data_models from octavia.common import rpc from octavia.common import utils from octavia.db import api as db_apis from octavia.db import repositories from octavia.network import base as network_base CONF = cfg.CONF CONF.import_group('oslo_messaging', 'octavia.common.config') LOG = logging.getLogger(__name__) AMPHORA_SUPPORTED_LB_ALGORITHMS = [ consts.LB_ALGORITHM_ROUND_ROBIN, consts.LB_ALGORITHM_SOURCE_IP, consts.LB_ALGORITHM_LEAST_CONNECTIONS] class AmphoraProviderDriver(driver_base.ProviderDriver): def __init__(self): super(AmphoraProviderDriver, self).__init__() self.target = messaging.Target( namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT, topic=consts.TOPIC_AMPHORA_V2, version="2.0", fanout=False) self.client = rpc.get_client(self.target) self.repositories = repositories.Repositories() key = utils.get_compatible_server_certs_key_passphrase() self.fernet = fernet.Fernet(key) def _validate_pool_algorithm(self, pool): if pool.lb_algorithm not in AMPHORA_SUPPORTED_LB_ALGORITHMS: msg = ('Amphora provider does not support %s algorithm.' % pool.lb_algorithm) raise exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) # Load Balancer def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary): vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary) lb_obj = data_models.LoadBalancer(id=loadbalancer_id, project_id=project_id, vip=vip_obj) network_driver = utils.get_network_driver() vip_network = network_driver.get_network( vip_dictionary[lib_consts.VIP_NETWORK_ID]) if not vip_network.port_security_enabled: message = "Port security must be enabled on the VIP network." raise exceptions.DriverError(user_fault_string=message, operator_fault_string=message) try: vip = network_driver.allocate_vip(lb_obj) except network_base.AllocateVIPException as e: message = str(e) if getattr(e, 'orig_msg', None) is not None: message = e.orig_msg raise exceptions.DriverError(user_fault_string=message, operator_fault_string=message) LOG.info('Amphora provider created VIP port %s for load balancer %s.', vip.port_id, loadbalancer_id) return driver_utils.vip_dict_to_provider_dict(vip.to_dict()) # TODO(johnsom) convert this to octavia_lib constant flavor # once octavia is transitioned to use octavia_lib def loadbalancer_create(self, loadbalancer): if loadbalancer.flavor == driver_dm.Unset: loadbalancer.flavor = None if loadbalancer.availability_zone == driver_dm.Unset: loadbalancer.availability_zone = None payload = {consts.LOADBALANCER: loadbalancer.to_dict(), consts.FLAVOR: loadbalancer.flavor, consts.AVAILABILITY_ZONE: loadbalancer.availability_zone} self.client.cast({}, 'create_load_balancer', **payload) def loadbalancer_delete(self, loadbalancer, cascade=False): payload = {consts.LOADBALANCER: loadbalancer.to_dict(), 'cascade': cascade} self.client.cast({}, 'delete_load_balancer', **payload) def loadbalancer_failover(self, loadbalancer_id): payload = {consts.LOAD_BALANCER_ID: loadbalancer_id} self.client.cast({}, 'failover_load_balancer', **payload) def loadbalancer_update(self, original_load_balancer, new_loadbalancer): # Adapt the provider data model to the queue schema lb_dict = new_loadbalancer.to_dict() if 'admin_state_up' in lb_dict: lb_dict['enabled'] = lb_dict.pop('admin_state_up') # Put the qos_policy_id back under the vip element the controller # expects vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None) lb_dict.pop(consts.LOADBALANCER_ID) if vip_qos_policy_id: vip_dict = {"qos_policy_id": vip_qos_policy_id} lb_dict["vip"] = vip_dict payload = {consts.ORIGINAL_LOADBALANCER: original_load_balancer.to_dict(), consts.LOAD_BALANCER_UPDATES: lb_dict} self.client.cast({}, 'update_load_balancer', **payload) def _encrypt_tls_container_data(self, tls_container_data): for key, val in tls_container_data.items(): if isinstance(val, bytes): tls_container_data[key] = self.fernet.encrypt(val) elif isinstance(val, list): encrypt_vals = [] for i in val: if isinstance(i, bytes): encrypt_vals.append(self.fernet.encrypt(i)) else: encrypt_vals.append(i) tls_container_data[key] = encrypt_vals def _encrypt_listener_dict(self, listener_dict): # We need to encrypt the user cert/key data for sending it # over messaging. if listener_dict.get(consts.DEFAULT_TLS_CONTAINER_DATA, False): container_data = listener_dict[consts.DEFAULT_TLS_CONTAINER_DATA] self._encrypt_tls_container_data(container_data) if listener_dict.get(consts.SNI_CONTAINER_DATA, False): sni_list = [] for sni_data in listener_dict[consts.SNI_CONTAINER_DATA]: self._encrypt_tls_container_data(sni_data) sni_list.append(sni_data) if sni_list: listener_dict[consts.SNI_CONTAINER_DATA] = sni_list # Listener def listener_create(self, listener): payload = {consts.LISTENER: listener.to_dict()} self._encrypt_listener_dict(payload[consts.LISTENER]) self.client.cast({}, 'create_listener', **payload) def listener_delete(self, listener): payload = {consts.LISTENER: listener.to_dict()} self.client.cast({}, 'delete_listener', **payload) def listener_update(self, old_listener, new_listener): original_listener = old_listener.to_dict() listener_updates = new_listener.to_dict() self._encrypt_listener_dict(original_listener) self._encrypt_listener_dict(listener_updates) payload = {consts.ORIGINAL_LISTENER: original_listener, consts.LISTENER_UPDATES: listener_updates} self.client.cast({}, 'update_listener', **payload) # Pool def _pool_convert_to_dict(self, pool): pool_dict = pool.to_dict(recurse=True) if 'admin_state_up' in pool_dict: pool_dict['enabled'] = pool_dict.pop('admin_state_up') if 'tls_container_ref' in pool_dict: pool_dict['tls_certificate_id'] = pool_dict.pop( 'tls_container_ref') pool_dict.pop('tls_container_data', None) if 'ca_tls_container_ref' in pool_dict: pool_dict['ca_tls_certificate_id'] = pool_dict.pop( 'ca_tls_container_ref') pool_dict.pop('ca_tls_container_data', None) if 'crl_container_ref' in pool_dict: pool_dict['crl_container_id'] = pool_dict.pop('crl_container_ref') pool_dict.pop('crl_container_data', None) return pool_dict def pool_create(self, pool): self._validate_pool_algorithm(pool) payload = {consts.POOL: self._pool_convert_to_dict(pool)} self.client.cast({}, 'create_pool', **payload) def pool_delete(self, pool): payload = {consts.POOL: pool.to_dict(recurse=True)} self.client.cast({}, 'delete_pool', **payload) def pool_update(self, old_pool, new_pool): if new_pool.lb_algorithm: self._validate_pool_algorithm(new_pool) pool_dict = self._pool_convert_to_dict(new_pool) pool_dict.pop('pool_id') payload = {consts.ORIGINAL_POOL: old_pool.to_dict(), consts.POOL_UPDATES: pool_dict} self.client.cast({}, 'update_pool', **payload) # Member def member_create(self, member): pool_id = member.pool_id db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) self._validate_members(db_pool, [member]) payload = {consts.MEMBER: member.to_dict()} self.client.cast({}, 'create_member', **payload) def member_delete(self, member): payload = {consts.MEMBER: member.to_dict()} self.client.cast({}, 'delete_member', **payload) def member_update(self, old_member, new_member): original_member = old_member.to_dict() member_updates = new_member.to_dict() if 'admin_state_up' in member_updates: member_updates['enabled'] = member_updates.pop('admin_state_up') member_updates.pop(consts.MEMBER_ID) payload = {consts.ORIGINAL_MEMBER: original_member, consts.MEMBER_UPDATES: member_updates} self.client.cast({}, 'update_member', **payload) def member_batch_update(self, pool_id, members): # The DB should not have updated yet, so we can still use the pool db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) self._validate_members(db_pool, members) old_members = db_pool.members old_member_ids = [m.id for m in old_members] # The driver will always pass objects with IDs. new_member_ids = [m.member_id for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if m.member_id not in old_member_ids: new_members.append(m) else: member_dict = m.to_dict(render_unsets=False) member_dict['id'] = member_dict.pop('member_id') if 'address' in member_dict: member_dict['ip_address'] = member_dict.pop('address') if 'admin_state_up' in member_dict: member_dict['enabled'] = member_dict.pop('admin_state_up') updated_members.append(member_dict) # Find members that are deleted deleted_members = [] for m in old_members: if m.id not in new_member_ids: deleted_members.append(m) payload = {'old_members': [m.to_dict() for m in deleted_members], 'new_members': [m.to_dict() for m in new_members], 'updated_members': updated_members} self.client.cast({}, 'batch_update_members', **payload) def _validate_members(self, db_pool, members): if db_pool.protocol == consts.PROTOCOL_UDP: # For UDP LBs, check that we are not mixing IPv4 and IPv6 for member in members: member_is_ipv6 = utils.is_ipv6(member.address) for listener in db_pool.listeners: lb = listener.load_balancer vip_is_ipv6 = utils.is_ipv6(lb.vip.ip_address) if member_is_ipv6 != vip_is_ipv6: msg = ("This provider doesn't support mixing IPv4 and " "IPv6 addresses for its VIP and members in UDP " "load balancers.") raise exceptions.UnsupportedOptionError( user_fault_string=msg, operator_fault_string=msg) # Health Monitor def health_monitor_create(self, healthmonitor): payload = {consts.HEALTH_MONITOR: healthmonitor.to_dict()} self.client.cast({}, 'create_health_monitor', **payload) def health_monitor_delete(self, healthmonitor): payload = {consts.HEALTH_MONITOR: healthmonitor.to_dict()} self.client.cast({}, 'delete_health_monitor', **payload) def health_monitor_update(self, old_healthmonitor, new_healthmonitor): healthmon_dict = new_healthmonitor.to_dict() if 'admin_state_up' in healthmon_dict: healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up') if 'max_retries_down' in healthmon_dict: healthmon_dict['fall_threshold'] = healthmon_dict.pop( 'max_retries_down') if 'max_retries' in healthmon_dict: healthmon_dict['rise_threshold'] = healthmon_dict.pop( 'max_retries') healthmon_dict.pop('healthmonitor_id') payload = {consts.ORIGINAL_HEALTH_MONITOR: old_healthmonitor.to_dict(), consts.HEALTH_MONITOR_UPDATES: healthmon_dict} self.client.cast({}, 'update_health_monitor', **payload) # L7 Policy def l7policy_create(self, l7policy): payload = {consts.L7POLICY: l7policy.to_dict()} self.client.cast({}, 'create_l7policy', **payload) def l7policy_delete(self, l7policy): payload = {consts.L7POLICY: l7policy.to_dict()} self.client.cast({}, 'delete_l7policy', **payload) def l7policy_update(self, old_l7policy, new_l7policy): l7policy_dict = new_l7policy.to_dict() if 'admin_state_up' in l7policy_dict: l7policy_dict['enabled'] = l7policy_dict.pop(consts.ADMIN_STATE_UP) l7policy_dict.pop(consts.L7POLICY_ID) payload = {consts.ORIGINAL_L7POLICY: old_l7policy.to_dict(), consts.L7POLICY_UPDATES: l7policy_dict} self.client.cast({}, 'update_l7policy', **payload) # L7 Rule def l7rule_create(self, l7rule): payload = {consts.L7RULE: l7rule.to_dict()} self.client.cast({}, 'create_l7rule', **payload) def l7rule_delete(self, l7rule): payload = {consts.L7RULE: l7rule.to_dict()} self.client.cast({}, 'delete_l7rule', **payload) def l7rule_update(self, old_l7rule, new_l7rule): l7rule_dict = new_l7rule.to_dict() if consts.ADMIN_STATE_UP in l7rule_dict: l7rule_dict['enabled'] = l7rule_dict.pop(consts.ADMIN_STATE_UP) l7rule_dict.pop(consts.L7RULE_ID) payload = {consts.ORIGINAL_L7RULE: old_l7rule.to_dict(), consts.L7RULE_UPDATES: l7rule_dict} self.client.cast({}, 'update_l7rule', **payload) # Flavor def get_supported_flavor_metadata(self): """Returns the valid flavor metadata keys and descriptions. This extracts the valid flavor metadata keys and descriptions from the JSON validation schema and returns it as a dictionary. :return: Dictionary of flavor metadata keys and descriptions. :raises DriverError: An unexpected error occurred. """ try: props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties'] return {k: v.get('description', '') for k, v in props.items()} except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to get the supported flavor ' 'metadata due to: {}'.format(str(e)), operator_fault_string='Failed to get the supported flavor ' 'metadata due to: {}'.format(str(e))) def validate_flavor(self, flavor_dict): """Validates flavor profile data. This will validate a flavor profile dataset against the flavor settings the amphora driver supports. :param flavor_dict: The flavor dictionary to validate. :type flavor: dict :return: None :raises DriverError: An unexpected error occurred. :raises UnsupportedOptionError: If the driver does not support one of the flavor settings. """ try: validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA) except js_exceptions.ValidationError as e: error_object = '' if e.relative_path: error_object = '{} '.format(e.relative_path[0]) raise exceptions.UnsupportedOptionError( user_fault_string='{0}{1}'.format(error_object, e.message), operator_fault_string=str(e)) except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to validate the flavor metadata ' 'due to: {}'.format(str(e)), operator_fault_string='Failed to validate the flavor metadata ' 'due to: {}'.format(str(e))) compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None) if compute_flavor: compute_driver = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver # TODO(johnsom) Fix this to raise a NotFound error # when the octavia-lib supports it. compute_driver.validate_flavor(compute_flavor) # Availability Zone def get_supported_availability_zone_metadata(self): """Returns the valid availability zone metadata keys and descriptions. This extracts the valid availability zone metadata keys and descriptions from the JSON validation schema and returns it as a dictionary. :return: Dictionary of availability zone metadata keys and descriptions :raises DriverError: An unexpected error occurred. """ try: props = ( availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA[ 'properties']) return {k: v.get('description', '') for k, v in props.items()} except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to get the supported availability ' 'zone metadata due to: {}'.format(str(e)), operator_fault_string='Failed to get the supported ' 'availability zone metadata due to: ' '{}'.format(str(e))) def validate_availability_zone(self, availability_zone_dict): """Validates availability zone profile data. This will validate an availability zone profile dataset against the availability zone settings the amphora driver supports. :param availability_zone_dict: The availability zone dict to validate. :type availability_zone_dict: dict :return: None :raises DriverError: An unexpected error occurred. :raises UnsupportedOptionError: If the driver does not support one of the availability zone settings. """ try: validate( availability_zone_dict, availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA) except js_exceptions.ValidationError as e: error_object = '' if e.relative_path: error_object = '{} '.format(e.relative_path[0]) raise exceptions.UnsupportedOptionError( user_fault_string='{0}{1}'.format(error_object, e.message), operator_fault_string=str(e)) except Exception as e: raise exceptions.DriverError( user_fault_string='Failed to validate the availability zone ' 'metadata due to: {}'.format(str(e)), operator_fault_string='Failed to validate the availability ' 'zone metadata due to: {}'.format(str(e)) ) compute_zone = availability_zone_dict.get(consts.COMPUTE_ZONE, None) if compute_zone: compute_driver = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver # TODO(johnsom) Fix this to raise a NotFound error # when the octavia-lib supports it. compute_driver.validate_availability_zone(compute_zone) check_nets = availability_zone_dict.get( consts.VALID_VIP_NETWORKS, []) management_net = availability_zone_dict.get( consts.MANAGEMENT_NETWORK, None) if management_net: check_nets.append(management_net) for check_net in check_nets: network_driver = utils.get_network_driver() # TODO(johnsom) Fix this to raise a NotFound error # when the octavia-lib supports it. network_driver.get_network(check_net) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/data_models.py0000664000175000017500000000462600000000000021705 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace # Copyright (c) 2016 Blue Box, an IBM Company # Copyright 2018 Rackspace, US Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from debtcollector import moves from octavia_lib.api.drivers import data_models as lib_data_models warnings.simplefilter('default', DeprecationWarning) BaseDataModel = moves.moved_class(lib_data_models.BaseDataModel, 'BaseDataModel', __name__, version='Stein', removal_version='U') UnsetType = moves.moved_class(lib_data_models.UnsetType, 'UnsetType', __name__, version='Stein', removal_version='U') LoadBalancer = moves.moved_class(lib_data_models.LoadBalancer, 'LoadBalancer', __name__, version='Stein', removal_version='U') Listener = moves.moved_class(lib_data_models.Listener, 'Listener', __name__, version='Stein', removal_version='U') Pool = moves.moved_class(lib_data_models.Pool, 'Pool', __name__, version='Stein', removal_version='U') Member = moves.moved_class(lib_data_models.Member, 'Member', __name__, version='Stein', removal_version='U') HealthMonitor = moves.moved_class(lib_data_models.HealthMonitor, 'HealthMonitor', __name__, version='Stein', removal_version='U') L7Policy = moves.moved_class(lib_data_models.L7Policy, 'L7Policy', __name__, version='Stein', removal_version='U') L7Rule = moves.moved_class(lib_data_models.L7Rule, 'L7Rule', __name__, version='Stein', removal_version='U') VIP = moves.moved_class(lib_data_models.VIP, 'VIP', __name__, version='Stein', removal_version='U') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/drivers/driver_agent/0000775000175000017500000000000000000000000021520 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/driver_agent/__init__.py0000664000175000017500000000107400000000000023633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/driver_agent/driver_get.py0000664000175000017500000000755700000000000024242 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.common import constants as lib_consts from octavia.api.drivers import utils as driver_utils from octavia.common import constants from octavia.db import api as db_api from octavia.db import repositories def process_get(get_data): session = db_api.get_session() if get_data[constants.OBJECT] == lib_consts.LOADBALANCERS: lb_repo = repositories.LoadBalancerRepository() db_lb = lb_repo.get(session, id=get_data[lib_consts.ID], show_deleted=False) if db_lb: provider_lb = ( driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb)) return provider_lb.to_dict(recurse=True, render_unsets=True) elif get_data[constants.OBJECT] == lib_consts.LISTENERS: listener_repo = repositories.ListenerRepository() db_listener = listener_repo.get( session, id=get_data[lib_consts.ID], show_deleted=False) if db_listener: provider_listener = ( driver_utils.db_listener_to_provider_listener(db_listener)) return provider_listener.to_dict(recurse=True, render_unsets=True) elif get_data[constants.OBJECT] == lib_consts.POOLS: pool_repo = repositories.PoolRepository() db_pool = pool_repo.get(session, id=get_data[lib_consts.ID], show_deleted=False) if db_pool: provider_pool = ( driver_utils.db_pool_to_provider_pool(db_pool)) return provider_pool.to_dict(recurse=True, render_unsets=True) elif get_data[constants.OBJECT] == lib_consts.MEMBERS: member_repo = repositories.MemberRepository() db_member = member_repo.get(session, id=get_data[lib_consts.ID], show_deleted=False) if db_member: provider_member = ( driver_utils.db_member_to_provider_member(db_member)) return provider_member.to_dict(recurse=True, render_unsets=True) elif get_data[constants.OBJECT] == lib_consts.HEALTHMONITORS: hm_repo = repositories.HealthMonitorRepository() db_hm = hm_repo.get(session, id=get_data[lib_consts.ID], show_deleted=False) if db_hm: provider_hm = ( driver_utils.db_HM_to_provider_HM(db_hm)) return provider_hm.to_dict(recurse=True, render_unsets=True) elif get_data[constants.OBJECT] == lib_consts.L7POLICIES: l7policy_repo = repositories.L7PolicyRepository() db_l7policy = l7policy_repo.get(session, id=get_data[lib_consts.ID], show_deleted=False) if db_l7policy: provider_l7policy = ( driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) return provider_l7policy.to_dict(recurse=True, render_unsets=True) elif get_data[constants.OBJECT] == lib_consts.L7RULES: l7rule_repo = repositories.L7RuleRepository() db_l7rule = l7rule_repo.get(session, id=get_data[lib_consts.ID], show_deleted=False) if db_l7rule: provider_l7rule = ( driver_utils.db_l7rule_to_provider_l7rule(db_l7rule)) return provider_l7rule.to_dict(recurse=True, render_unsets=True) return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/driver_agent/driver_listener.py0000664000175000017500000001262200000000000025275 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import socketserver import threading from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from octavia.api.drivers.driver_agent import driver_get from octavia.api.drivers.driver_agent import driver_updater CONF = cfg.CONF LOG = logging.getLogger(__name__) def _recv(recv_socket): size_str = b'' char = recv_socket.recv(1) while char != b'\n': size_str += char char = recv_socket.recv(1) payload_size = int(size_str) mv_buffer = memoryview(bytearray(payload_size)) next_offset = 0 while payload_size - next_offset > 0: recv_size = recv_socket.recv_into(mv_buffer[next_offset:], payload_size - next_offset) next_offset += recv_size return jsonutils.loads(mv_buffer.tobytes()) class StatusRequestHandler(socketserver.BaseRequestHandler): def handle(self): # Get the update data status = _recv(self.request) # Process the update updater = driver_updater.DriverUpdater() response = updater.update_loadbalancer_status(status) # Send the response json_data = jsonutils.dump_as_bytes(response) len_str = '{}\n'.format(len(json_data)).encode('utf-8') self.request.send(len_str) self.request.sendall(json_data) class StatsRequestHandler(socketserver.BaseRequestHandler): def handle(self): # Get the update data stats = _recv(self.request) # Process the update updater = driver_updater.DriverUpdater() response = updater.update_listener_statistics(stats) # Send the response json_data = jsonutils.dump_as_bytes(response) len_str = '{}\n'.format(len(json_data)).encode('utf-8') self.request.send(len_str) self.request.sendall(json_data) class GetRequestHandler(socketserver.BaseRequestHandler): def handle(self): # Get the data request get_data = _recv(self.request) # Process the get response = driver_get.process_get(get_data) # Send the response json_data = jsonutils.dump_as_bytes(response) len_str = '{}\n'.format(len(json_data)).encode('utf-8') self.request.send(len_str) self.request.sendall(json_data) class ForkingUDSServer(socketserver.ForkingMixIn, socketserver.UnixStreamServer): pass def _cleanup_socket_file(filename): # Remove the socket file if it already exists try: os.remove(filename) except OSError as e: if e.errno != errno.ENOENT: raise def status_listener(exit_event): _cleanup_socket_file(CONF.driver_agent.status_socket_path) server = ForkingUDSServer(CONF.driver_agent.status_socket_path, StatusRequestHandler) server.timeout = CONF.driver_agent.status_request_timeout server.max_children = CONF.driver_agent.status_max_processes while not exit_event.is_set(): server.handle_request() LOG.info('Waiting for driver status listener to shutdown...') # Can't shut ourselves down as we would deadlock, spawn a thread threading.Thread(target=server.shutdown).start() LOG.info('Driver status listener shutdown finished.') server.server_close() _cleanup_socket_file(CONF.driver_agent.status_socket_path) def stats_listener(exit_event): _cleanup_socket_file(CONF.driver_agent.stats_socket_path) server = ForkingUDSServer(CONF.driver_agent.stats_socket_path, StatsRequestHandler) server.timeout = CONF.driver_agent.stats_request_timeout server.max_children = CONF.driver_agent.stats_max_processes while not exit_event.is_set(): server.handle_request() LOG.info('Waiting for driver statistics listener to shutdown...') # Can't shut ourselves down as we would deadlock, spawn a thread threading.Thread(target=server.shutdown).start() LOG.info('Driver statistics listener shutdown finished.') server.server_close() _cleanup_socket_file(CONF.driver_agent.stats_socket_path) def get_listener(exit_event): _cleanup_socket_file(CONF.driver_agent.get_socket_path) server = ForkingUDSServer(CONF.driver_agent.get_socket_path, GetRequestHandler) server.timeout = CONF.driver_agent.get_request_timeout server.max_children = CONF.driver_agent.get_max_processes while not exit_event.is_set(): server.handle_request() LOG.info('Waiting for driver get listener to shutdown...') # Can't shut ourselves down as we would deadlock, spawn a thread threading.Thread(target=server.shutdown).start() LOG.info('Driver get listener shutdown finished.') server.server_close() _cleanup_socket_file(CONF.driver_agent.get_socket_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/driver_agent/driver_updater.py0000664000175000017500000002233300000000000025114 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.api.drivers import exceptions as driver_exceptions from octavia_lib.common import constants as lib_consts from oslo_log import log as logging from oslo_utils import excutils from octavia.common import constants as consts from octavia.common import utils from octavia.db import api as db_apis from octavia.db import repositories as repo LOG = logging.getLogger(__name__) class DriverUpdater(object): def __init__(self, **kwargs): self.repos = repo.Repositories() self.loadbalancer_repo = repo.LoadBalancerRepository() self.listener_repo = repo.ListenerRepository() self.pool_repo = repo.PoolRepository() self.health_mon_repo = repo.HealthMonitorRepository() self.member_repo = repo.MemberRepository() self.l7policy_repo = repo.L7PolicyRepository() self.l7rule_repo = repo.L7RuleRepository() self.listener_stats_repo = repo.ListenerStatisticsRepository() self.db_session = db_apis.get_session() super(DriverUpdater, self).__init__(**kwargs) def _check_for_lb_vip_deallocate(self, repo, lb_id): lb = repo.get(self.db_session, id=lb_id) if lb.vip.octavia_owned: vip = lb.vip # We need a backreference vip.load_balancer = lb # Only lookup the network driver if we have a VIP to deallocate network_driver = utils.get_network_driver() network_driver.deallocate_vip(vip) def _decrement_quota(self, repo, object_name, record_id): lock_session = db_apis.get_session(autocommit=False) db_object = repo.get(lock_session, id=record_id) try: if db_object.provisioning_status == consts.DELETED: LOG.info('%(name)s with ID of %(id)s is already in the ' 'DELETED state. Skipping quota update.', {'name': object_name, 'id': record_id}) lock_session.rollback() return self.repos.decrement_quota(lock_session, repo.model_class.__data_model__, db_object.project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement %(name)s quota for ' 'project: %(proj)s the project may have excess ' 'quota in use.', {'proj': db_object.project_id, 'name': object_name}) lock_session.rollback() def _process_status_update(self, repo, object_name, record, delete_record=False): # Zero it out so that if the ID is missing from a record we do not # report the last LB as the failed record in the exception record_id = None try: record_id = record['id'] record_kwargs = {} prov_status = record.get(consts.PROVISIONING_STATUS, None) if prov_status: if prov_status == consts.DELETED: if object_name == consts.LOADBALANCERS: self._check_for_lb_vip_deallocate(repo, record_id) self._decrement_quota(repo, object_name, record_id) if delete_record and object_name != consts.LOADBALANCERS: repo.delete(self.db_session, id=record_id) return record_kwargs[consts.PROVISIONING_STATUS] = prov_status op_status = record.get(consts.OPERATING_STATUS, None) if op_status: record_kwargs[consts.OPERATING_STATUS] = op_status if prov_status or op_status: repo.update(self.db_session, record_id, **record_kwargs) except Exception as e: # We need to raise a failure here to notify the driver it is # sending bad status data. raise driver_exceptions.UpdateStatusError( fault_string=str(e), status_object_id=record_id, status_object=object_name) def update_loadbalancer_status(self, status): """Update load balancer status. :param status: dictionary defining the provisioning status and operating status for load balancer objects, including pools, members, listeners, L7 policies, and L7 rules. iod (string): ID for the object. provisioning_status (string): Provisioning status for the object. operating_status (string): Operating status for the object. :type status: dict :raises: UpdateStatusError :returns: None """ try: members = status.pop(consts.MEMBERS, []) for member in members: self._process_status_update(self.member_repo, consts.MEMBERS, member, delete_record=True) health_mons = status.pop(consts.HEALTHMONITORS, []) for health_mon in health_mons: self._process_status_update( self.health_mon_repo, consts.HEALTHMONITORS, health_mon, delete_record=True) pools = status.pop(consts.POOLS, []) for pool in pools: self._process_status_update(self.pool_repo, consts.POOLS, pool, delete_record=True) l7rules = status.pop(consts.L7RULES, []) for l7rule in l7rules: self._process_status_update(self.l7rule_repo, consts.L7RULES, l7rule, delete_record=True) l7policies = status.pop(consts.L7POLICIES, []) for l7policy in l7policies: self._process_status_update( self.l7policy_repo, consts.L7POLICIES, l7policy, delete_record=True) listeners = status.pop(lib_consts.LISTENERS, []) for listener in listeners: self._process_status_update( self.listener_repo, lib_consts.LISTENERS, listener, delete_record=True) lbs = status.pop(consts.LOADBALANCERS, []) for lb in lbs: self._process_status_update(self.loadbalancer_repo, consts.LOADBALANCERS, lb) except driver_exceptions.UpdateStatusError as e: return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.FAULT_STRING: e.fault_string, lib_consts.STATUS_OBJECT: e.status_object, lib_consts.STATUS_OBJECT_ID: e.status_object_id} except Exception as e: return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.FAULT_STRING: str(e)} return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_OK} def update_listener_statistics(self, statistics): """Update listener statistics. :param statistics: Statistics for listeners: id (string): ID for listener. active_connections (int): Number of currently active connections. bytes_in (int): Total bytes received. bytes_out (int): Total bytes sent. request_errors (int): Total requests not fulfilled. total_connections (int): The total connections handled. :type statistics: dict :raises: UpdateStatisticsError :returns: None """ listener_stats = statistics.get(lib_consts.LISTENERS, []) for stat in listener_stats: try: listener_id = stat.pop('id') except Exception as e: return { lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.FAULT_STRING: str(e), lib_consts.STATS_OBJECT: lib_consts.LISTENERS} # Provider drivers other than the amphora driver do not have # an amphora ID, use the listener ID again here to meet the # constraint requirement. try: self.listener_stats_repo.replace(self.db_session, listener_id, listener_id, **stat) except Exception as e: return { lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.FAULT_STRING: str(e), lib_consts.STATS_OBJECT: lib_consts.LISTENERS, lib_consts.STATS_OBJECT_ID: listener_id} return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_OK} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/driver_factory.py0000664000175000017500000000354600000000000022453 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from wsme import types as wtypes from octavia.common import exceptions CONF = cfg.CONF LOG = logging.getLogger(__name__) def get_driver(provider): # If this came in None it must be a load balancer that existed before # provider support was added. These must be of type 'amphora' and not # whatever the current "default" is set to. if isinstance(provider, wtypes.UnsetType): provider = CONF.api_settings.default_provider_driver elif not provider: provider = 'amphora' if provider not in CONF.api_settings.enabled_provider_drivers: LOG.warning("Requested provider driver '%s' was not enabled in the " "configuration file.", provider) raise exceptions.ProviderNotEnabled(prov=provider) try: driver = stevedore_driver.DriverManager( namespace='octavia.api.drivers', name=provider, invoke_on_load=True).driver driver.name = provider except Exception as e: LOG.error('Unable to load provider driver %s due to: %s', provider, str(e)) raise exceptions.ProviderNotFound(prov=provider) return driver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/driver_lib.py0000664000175000017500000000163000000000000021542 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from debtcollector import moves from octavia_lib.api.drivers import driver_lib as lib_driver_lib warnings.simplefilter('default', DeprecationWarning) DriverLibrary = moves.moved_class( lib_driver_lib.DriverLibrary, 'DriverLibrary', __name__, version='Stein', removal_version='U') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/drivers/noop_driver/0000775000175000017500000000000000000000000021375 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/noop_driver/__init__.py0000664000175000017500000000107400000000000023510 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/noop_driver/agent.py0000664000175000017500000000161500000000000023050 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging LOG = logging.getLogger(__name__) def noop_provider_agent(exit_event): LOG.info('No-Op provider agent has started.') while not exit_event.is_set(): time.sleep(1) LOG.info('No-Op provider agent is exiting.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/noop_driver/driver.py0000664000175000017500000003410500000000000023245 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils from octavia_lib.api.drivers import data_models from octavia_lib.api.drivers import provider_base as driver_base LOG = logging.getLogger(__name__) class NoopManager(object): def __init__(self): super(NoopManager, self).__init__() self.driverconfig = {} # Load Balancer def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary): LOG.debug('Provider %s no-op, create_vip_port loadbalancer %s', self.__class__.__name__, loadbalancer_id) self.driverconfig[loadbalancer_id] = (loadbalancer_id, project_id, vip_dictionary, 'create_vip_port') vip_address = vip_dictionary.get('vip_address', '198.0.2.5') vip_network_id = vip_dictionary.get('vip_network_id', uuidutils.generate_uuid()) vip_port_id = vip_dictionary.get('vip_port_id', uuidutils.generate_uuid()) vip_subnet_id = vip_dictionary.get('vip_subnet_id', uuidutils.generate_uuid()) return data_models.VIP(vip_address=vip_address, vip_network_id=vip_network_id, vip_port_id=vip_port_id, vip_subnet_id=vip_subnet_id).to_dict() def loadbalancer_create(self, loadbalancer): LOG.debug('Provider %s no-op, loadbalancer_create loadbalancer %s', self.__class__.__name__, loadbalancer.loadbalancer_id) self.driverconfig[loadbalancer.loadbalancer_id] = ( loadbalancer, 'loadbalancer_create') def loadbalancer_delete(self, loadbalancer, cascade=False): loadbalancer_id = loadbalancer.loadbalancer_id LOG.debug('Provider %s no-op, loadbalancer_delete loadbalancer %s', self.__class__.__name__, loadbalancer_id) self.driverconfig[loadbalancer_id] = (loadbalancer_id, cascade, 'loadbalancer_delete') def loadbalancer_failover(self, loadbalancer_id): LOG.debug('Provider %s no-op, loadbalancer_failover loadbalancer %s', self.__class__.__name__, loadbalancer_id) self.driverconfig[loadbalancer_id] = (loadbalancer_id, 'loadbalancer_failover') def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): LOG.debug('Provider %s no-op, loadbalancer_update loadbalancer %s ' 'old: %s. new: %s', self.__class__.__name__, new_loadbalancer.loadbalancer_id, old_loadbalancer.to_dict(), new_loadbalancer.to_dict()) self.driverconfig[new_loadbalancer.loadbalancer_id] = ( new_loadbalancer, 'loadbalancer_update') # Listener def listener_create(self, listener): LOG.debug('Provider %s no-op, listener_create listener %s', self.__class__.__name__, listener.listener_id) self.driverconfig[listener.listener_id] = (listener, 'listener_create') def listener_delete(self, listener): listener_id = listener.listener_id LOG.debug('Provider %s no-op, listener_delete listener %s', self.__class__.__name__, listener_id) self.driverconfig[listener_id] = (listener_id, 'listener_delete') def listener_update(self, old_listener, new_listener): LOG.debug('Provider %s no-op, listener_update listener %s ' 'old: %s. new: %s', self.__class__.__name__, new_listener.listener_id, old_listener.to_dict(), new_listener.to_dict()) self.driverconfig[new_listener.listener_id] = ( new_listener, 'listener_update') # Pool def pool_create(self, pool): LOG.debug('Provider %s no-op, pool_create pool %s', self.__class__.__name__, pool.pool_id) self.driverconfig[pool.pool_id] = (pool, 'pool_create') def pool_delete(self, pool): pool_id = pool.pool_id LOG.debug('Provider %s no-op, pool_delete pool %s', self.__class__.__name__, pool_id) self.driverconfig[pool_id] = (pool_id, 'pool_delete') def pool_update(self, old_pool, new_pool): LOG.debug('Provider %s no-op, pool_update pool %s ' 'old: %s. new: %s', self.__class__.__name__, new_pool.pool_id, old_pool.to_dict(), new_pool.to_dict()) self.driverconfig[new_pool.pool_id] = ( new_pool, 'pool_update') # Member def member_create(self, member): LOG.debug('Provider %s no-op, member_create member %s', self.__class__.__name__, member.member_id) self.driverconfig[member.member_id] = (member, 'member_create') def member_delete(self, member): member_id = member.member_id LOG.debug('Provider %s no-op, member_delete member %s', self.__class__.__name__, member_id) self.driverconfig[member_id] = (member_id, 'member_delete') def member_update(self, old_member, new_member): LOG.debug('Provider %s no-op, member_update member %s ' 'old: %s. new: %s', self.__class__.__name__, new_member.member_id, old_member.to_dict(), new_member.to_dict()) self.driverconfig[new_member.member_id] = ( new_member, 'member_update') def member_batch_update(self, pool_id, members): for member in members: LOG.debug('Provider %s no-op, member_batch_update pool_id %s ' 'member %s', self.__class__.__name__, pool_id, member.member_id) self.driverconfig[member.member_id] = (member, 'member_batch_update') # Health Monitor def health_monitor_create(self, healthmonitor): LOG.debug('Provider %s no-op, health_monitor_create healthmonitor %s', self.__class__.__name__, healthmonitor.healthmonitor_id) self.driverconfig[healthmonitor.healthmonitor_id] = ( healthmonitor, 'health_monitor_create') def health_monitor_delete(self, healthmonitor): healthmonitor_id = healthmonitor.healthmonitor_id LOG.debug('Provider %s no-op, health_monitor_delete healthmonitor %s', self.__class__.__name__, healthmonitor_id) self.driverconfig[healthmonitor_id] = (healthmonitor_id, 'health_monitor_delete') def health_monitor_update(self, old_healthmonitor, new_healthmonitor): LOG.debug('Provider %s no-op, health_monitor_update healthmonitor %s ' 'old: %s. new: %s', self.__class__.__name__, new_healthmonitor.healthmonitor_id, old_healthmonitor.to_dict(), new_healthmonitor.to_dict()) self.driverconfig[new_healthmonitor.healthmonitor_id] = ( new_healthmonitor, 'health_monitor_update') # L7 Policy def l7policy_create(self, l7policy): LOG.debug('Provider %s no-op, l7policy_create l7policy %s', self.__class__.__name__, l7policy.l7policy_id) self.driverconfig[l7policy.l7policy_id] = (l7policy, 'l7policy_create') def l7policy_delete(self, l7policy): l7policy_id = l7policy.l7policy_id LOG.debug('Provider %s no-op, l7policy_delete l7policy %s', self.__class__.__name__, l7policy_id) self.driverconfig[l7policy_id] = (l7policy_id, 'l7policy_delete') def l7policy_update(self, old_l7policy, new_l7policy): LOG.debug('Provider %s no-op, l7policy_update l7policy %s ' 'old: %s. new: %s', self.__class__.__name__, new_l7policy.l7policy_id, old_l7policy.to_dict(), new_l7policy.to_dict()) self.driverconfig[new_l7policy.l7policy_id] = ( new_l7policy, 'l7policy_update') # L7 Rule def l7rule_create(self, l7rule): LOG.debug('Provider %s no-op, l7rule_create l7rule %s', self.__class__.__name__, l7rule.l7rule_id) self.driverconfig[l7rule.l7rule_id] = (l7rule, 'l7rule_create') def l7rule_delete(self, l7rule): l7rule_id = l7rule.l7rule_id LOG.debug('Provider %s no-op, l7rule_delete l7rule %s', self.__class__.__name__, l7rule_id) self.driverconfig[l7rule_id] = (l7rule_id, 'l7rule_delete') def l7rule_update(self, old_l7rule, new_l7rule): LOG.debug('Provider %s no-op, l7rule_update l7rule %s. ' 'old: %s. new: %s', self.__class__.__name__, new_l7rule.l7rule_id, old_l7rule.to_dict(), new_l7rule.to_dict()) self.driverconfig[new_l7rule.l7rule_id] = (new_l7rule, 'l7rule_update') # Flavor def get_supported_flavor_metadata(self): LOG.debug('Provider %s no-op, get_supported_flavor_metadata', self.__class__.__name__) return {"amp_image_tag": "The glance image tag to use for this load " "balancer."} def validate_flavor(self, flavor_metadata): LOG.debug('Provider %s no-op, validate_flavor metadata: %s', self.__class__.__name__, flavor_metadata) flavor_hash = hash(frozenset(flavor_metadata)) self.driverconfig[flavor_hash] = (flavor_metadata, 'validate_flavor') # Availability Zone def get_supported_availability_zone_metadata(self): LOG.debug( 'Provider %s no-op, get_supported_availability_zone_metadata', self.__class__.__name__) return {"compute_zone": "The compute availability zone to use for " "this loadbalancer."} def validate_availability_zone(self, availability_zone_metadata): LOG.debug('Provider %s no-op, validate_availability_zone metadata: %s', self.__class__.__name__, availability_zone_metadata) availability_zone_hash = hash(frozenset(availability_zone_metadata)) self.driverconfig[availability_zone_hash] = ( availability_zone_metadata, 'validate_availability_zone') class NoopProviderDriver(driver_base.ProviderDriver): def __init__(self): super(NoopProviderDriver, self).__init__() self.driver = NoopManager() # Load Balancer def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary): return self.driver.create_vip_port(loadbalancer_id, project_id, vip_dictionary) def loadbalancer_create(self, loadbalancer): self.driver.loadbalancer_create(loadbalancer) def loadbalancer_delete(self, loadbalancer, cascade=False): self.driver.loadbalancer_delete(loadbalancer, cascade) def loadbalancer_failover(self, loadbalancer_id): self.driver.loadbalancer_failover(loadbalancer_id) def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): self.driver.loadbalancer_update(old_loadbalancer, new_loadbalancer) # Listener def listener_create(self, listener): self.driver.listener_create(listener) def listener_delete(self, listener): self.driver.listener_delete(listener) def listener_update(self, old_listener, new_listener): self.driver.listener_update(old_listener, new_listener) # Pool def pool_create(self, pool): self.driver.pool_create(pool) def pool_delete(self, pool): self.driver.pool_delete(pool) def pool_update(self, old_pool, new_pool): self.driver.pool_update(old_pool, new_pool) # Member def member_create(self, member): self.driver.member_create(member) def member_delete(self, member): self.driver.member_delete(member) def member_update(self, old_member, new_member): self.driver.member_update(old_member, new_member) def member_batch_update(self, pool_id, members): self.driver.member_batch_update(pool_id, members) # Health Monitor def health_monitor_create(self, healthmonitor): self.driver.health_monitor_create(healthmonitor) def health_monitor_delete(self, healthmonitor): self.driver.health_monitor_delete(healthmonitor) def health_monitor_update(self, old_healthmonitor, new_healthmonitor): self.driver.health_monitor_update(old_healthmonitor, new_healthmonitor) # L7 Policy def l7policy_create(self, l7policy): self.driver.l7policy_create(l7policy) def l7policy_delete(self, l7policy): self.driver.l7policy_delete(l7policy) def l7policy_update(self, old_l7policy, new_l7policy): self.driver.l7policy_update(old_l7policy, new_l7policy) # L7 Rule def l7rule_create(self, l7rule): self.driver.l7rule_create(l7rule) def l7rule_delete(self, l7rule): self.driver.l7rule_delete(l7rule) def l7rule_update(self, old_l7rule, new_l7rule): self.driver.l7rule_update(old_l7rule, new_l7rule) # Flavor def get_supported_flavor_metadata(self): return self.driver.get_supported_flavor_metadata() def validate_flavor(self, flavor_metadata): self.driver.validate_flavor(flavor_metadata) # Availability Zone def get_supported_availability_zone_metadata(self): return self.driver.get_supported_availability_zone_metadata() def validate_availability_zone(self, availability_zone_metadata): self.driver.validate_availability_zone(availability_zone_metadata) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/provider_base.py0000664000175000017500000000164400000000000022252 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from debtcollector import moves from octavia_lib.api.drivers import provider_base as lib_provider_base warnings.simplefilter('default', DeprecationWarning) ProviderDriver = moves.moved_class( lib_provider_base.ProviderDriver, 'ProviderDriver', __name__, version='Stein', removal_version='U') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/drivers/utils.py0000664000175000017500000005766200000000000020601 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions as lib_exceptions from oslo_config import cfg from oslo_context import context as oslo_context from oslo_log import log as logging from oslo_utils import excutils from stevedore import driver as stevedore_driver from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common.tls_utils import cert_parser from octavia.db import api as db_api from octavia.db import repositories from octavia.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF def call_provider(provider, driver_method, *args, **kwargs): """Wrap calls to the provider driver to handle driver errors. This allows Octavia to return user friendly errors when a provider driver has an issue. :param driver_method: Method in the driver to call. :raises ProviderDriverError: Catch all driver error. :raises ProviderNotImplementedError: The driver doesn't support this action. :raises ProviderUnsupportedOptionError: The driver doesn't support a provided option. """ try: return driver_method(*args, **kwargs) except lib_exceptions.DriverError as e: LOG.exception("Provider '%s' raised a driver error: %s", provider, e.operator_fault_string) raise exceptions.ProviderDriverError(prov=provider, user_msg=e.user_fault_string) except (lib_exceptions.NotImplementedError, NotImplementedError) as e: op_fault_string = ( e.operator_fault_string if hasattr(e, "operator_fault_string") else _("This feature is not implemented by this provider.")) usr_fault_string = ( e.user_fault_string if hasattr(e, "user_fault_string") else _("This feature is not implemented by the provider.")) LOG.info("Provider '%s' raised a not implemented error: %s", provider, op_fault_string) raise exceptions.ProviderNotImplementedError( prov=provider, user_msg=usr_fault_string) except lib_exceptions.UnsupportedOptionError as e: LOG.info("Provider '%s' raised an unsupported option error: " "%s", provider, e.operator_fault_string) raise exceptions.ProviderUnsupportedOptionError( prov=provider, user_msg=e.user_fault_string) except Exception as e: LOG.exception("Provider '%s' raised an unknown error: %s", provider, str(e)) raise exceptions.ProviderDriverError(prov=provider, user_msg=e) def _base_to_provider_dict(current_dict, include_project_id=False): new_dict = copy.deepcopy(current_dict) if 'provisioning_status' in new_dict: del new_dict['provisioning_status'] if 'operating_status' in new_dict: del new_dict['operating_status'] if 'provider' in new_dict: del new_dict['provider'] if 'created_at' in new_dict: del new_dict['created_at'] if 'updated_at' in new_dict: del new_dict['updated_at'] if 'enabled' in new_dict: new_dict['admin_state_up'] = new_dict.pop('enabled') if 'project_id' in new_dict and not include_project_id: del new_dict['project_id'] if 'tenant_id' in new_dict: del new_dict['tenant_id'] if 'tags' in new_dict: del new_dict['tags'] if 'flavor_id' in new_dict: del new_dict['flavor_id'] if 'topology' in new_dict: del new_dict['topology'] if 'vrrp_group' in new_dict: del new_dict['vrrp_group'] if 'amphorae' in new_dict: del new_dict['amphorae'] if 'vip' in new_dict: del new_dict['vip'] if 'listeners' in new_dict: del new_dict['listeners'] if 'pools' in new_dict: del new_dict['pools'] if 'server_group_id' in new_dict: del new_dict['server_group_id'] return new_dict # Note: The provider dict returned from this method will have provider # data model objects in it. def lb_dict_to_provider_dict(lb_dict, vip=None, db_pools=None, db_listeners=None, for_delete=False): new_lb_dict = _base_to_provider_dict(lb_dict, include_project_id=True) new_lb_dict['loadbalancer_id'] = new_lb_dict.pop('id') if vip: new_lb_dict['vip_address'] = vip.ip_address new_lb_dict['vip_network_id'] = vip.network_id new_lb_dict['vip_port_id'] = vip.port_id new_lb_dict['vip_subnet_id'] = vip.subnet_id new_lb_dict['vip_qos_policy_id'] = vip.qos_policy_id if 'flavor_id' in lb_dict and lb_dict['flavor_id']: flavor_repo = repositories.FlavorRepository() new_lb_dict['flavor'] = flavor_repo.get_flavor_metadata_dict( db_api.get_session(), lb_dict['flavor_id']) if db_pools: new_lb_dict['pools'] = db_pools_to_provider_pools( db_pools, for_delete=for_delete) if db_listeners: new_lb_dict['listeners'] = db_listeners_to_provider_listeners( db_listeners, for_delete=for_delete) return new_lb_dict def db_loadbalancer_to_provider_loadbalancer(db_loadbalancer, for_delete=False): new_loadbalancer_dict = lb_dict_to_provider_dict( db_loadbalancer.to_dict(recurse=True), vip=db_loadbalancer.vip, db_pools=db_loadbalancer.pools, db_listeners=db_loadbalancer.listeners, for_delete=for_delete) for unsupported_field in ['server_group_id', 'amphorae', 'vrrp_group', 'topology', 'vip']: if unsupported_field in new_loadbalancer_dict: del new_loadbalancer_dict[unsupported_field] provider_loadbalancer = driver_dm.LoadBalancer.from_dict( new_loadbalancer_dict) return provider_loadbalancer def db_listeners_to_provider_listeners(db_listeners, for_delete=False): provider_listeners = [] for listener in db_listeners: provider_listener = db_listener_to_provider_listener( listener, for_delete=for_delete) provider_listeners.append(provider_listener) return provider_listeners def db_listeners_to_provider_dicts_list_of_dicts(db_listeners, for_delete=False): listeners = db_listeners_to_provider_listeners( db_listeners, for_delete=for_delete) return [listener.to_dict() for listener in listeners] def db_listener_to_provider_listener(db_listener, for_delete=False): new_listener_dict = listener_dict_to_provider_dict( db_listener.to_dict(recurse=True), for_delete=for_delete) if ('default_pool' in new_listener_dict and new_listener_dict['default_pool']): provider_pool = db_pool_to_provider_pool(db_listener.default_pool, for_delete=for_delete) new_listener_dict['default_pool_id'] = provider_pool.pool_id new_listener_dict['default_pool'] = provider_pool if new_listener_dict.get('l7policies', None): new_listener_dict['l7policies'] = ( db_l7policies_to_provider_l7policies(db_listener.l7policies)) provider_listener = driver_dm.Listener.from_dict(new_listener_dict) return provider_listener def _get_secret_data(cert_manager, project_id, secret_ref, for_delete=False): """Get the secret from the certificate manager and upload it to the amp. :returns: The secret data. """ context = oslo_context.RequestContext(project_id=project_id) try: secret_data = cert_manager.get_secret(context, secret_ref) except Exception as e: LOG.warning('Unable to retrieve certificate: %s due to %s.', secret_ref, str(e)) if for_delete: secret_data = None else: raise exceptions.CertificateRetrievalException(ref=secret_ref) # We need to have json convertible data for storing it in # persistence jobboard backend. if isinstance(secret_data, bytes): return secret_data.decode() return secret_data def listener_dict_to_provider_dict(listener_dict, for_delete=False): new_listener_dict = _base_to_provider_dict(listener_dict, include_project_id=True) new_listener_dict['listener_id'] = new_listener_dict.pop('id') if 'load_balancer_id' in new_listener_dict: new_listener_dict['loadbalancer_id'] = new_listener_dict.pop( 'load_balancer_id') # Pull the certs out of the certificate manager to pass to the provider if 'tls_certificate_id' in new_listener_dict: new_listener_dict['default_tls_container_ref'] = new_listener_dict.pop( 'tls_certificate_id') if 'client_ca_tls_certificate_id' in new_listener_dict: new_listener_dict['client_ca_tls_container_ref'] = ( new_listener_dict.pop('client_ca_tls_certificate_id')) if 'client_crl_container_id' in new_listener_dict: new_listener_dict['client_crl_container_ref'] = ( new_listener_dict.pop('client_crl_container_id')) listener_obj = data_models.Listener(**listener_dict) if (listener_obj.tls_certificate_id or listener_obj.sni_containers or listener_obj.client_ca_tls_certificate_id): SNI_objs = [] for sni in listener_obj.sni_containers: if isinstance(sni, dict): sni_obj = data_models.SNI(**sni) SNI_objs.append(sni_obj) elif isinstance(sni, str): sni_obj = data_models.SNI(tls_container_id=sni) SNI_objs.append(sni_obj) else: raise exceptions.ValidationException( detail=_('Invalid SNI container on listener')) listener_obj.sni_containers = SNI_objs cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', name=CONF.certificates.cert_manager, invoke_on_load=True, ).driver try: cert_dict = cert_parser.load_certificates_data(cert_manager, listener_obj) except Exception as e: with excutils.save_and_reraise_exception() as ctxt: LOG.warning('Unable to retrieve certificate(s) due to %s.', str(e)) if for_delete: ctxt.reraise = False cert_dict = {} if 'tls_cert' in cert_dict and cert_dict['tls_cert']: new_listener_dict['default_tls_container_data'] = ( cert_dict['tls_cert'].to_dict(recurse=True)) if 'sni_certs' in cert_dict and cert_dict['sni_certs']: sni_data_list = [] for sni in cert_dict['sni_certs']: sni_data_list.append(sni.to_dict(recurse=True)) new_listener_dict['sni_container_data'] = sni_data_list if listener_obj.client_ca_tls_certificate_id: cert = _get_secret_data(cert_manager, listener_obj.project_id, listener_obj.client_ca_tls_certificate_id) new_listener_dict['client_ca_tls_container_data'] = cert if listener_obj.client_crl_container_id: crl_file = _get_secret_data(cert_manager, listener_obj.project_id, listener_obj.client_crl_container_id) new_listener_dict['client_crl_container_data'] = crl_file # Format the allowed_cidrs if ('allowed_cidrs' in new_listener_dict and new_listener_dict['allowed_cidrs'] and 'cidr' in new_listener_dict['allowed_cidrs'][0]): cidrs_dict_list = new_listener_dict.pop('allowed_cidrs') new_listener_dict['allowed_cidrs'] = [cidr_dict['cidr'] for cidr_dict in cidrs_dict_list] # Format the sni_containers -> sni_container_refs sni_containers = new_listener_dict.pop('sni_containers', None) if sni_containers: new_listener_dict['sni_container_refs'] = [] for sni in sni_containers: if isinstance(sni, dict): new_listener_dict['sni_container_refs'].append( sni['tls_container_id']) elif isinstance(sni, str): new_listener_dict['sni_container_refs'].append(sni) else: raise exceptions.ValidationException( detail=_('Invalid SNI container on listener')) # Remove the DB back references if 'load_balancer' in new_listener_dict: del new_listener_dict['load_balancer'] if 'peer_port' in new_listener_dict: del new_listener_dict['peer_port'] if 'pools' in new_listener_dict: del new_listener_dict['pools'] if 'stats' in new_listener_dict: del new_listener_dict['stats'] if ('default_pool' in new_listener_dict and new_listener_dict['default_pool']): pool = new_listener_dict.pop('default_pool') new_listener_dict['default_pool'] = pool_dict_to_provider_dict( pool, for_delete=for_delete) provider_l7policies = [] if 'l7policies' in new_listener_dict: l7policies = new_listener_dict.pop('l7policies') or [] for l7policy in l7policies: provider_l7policy = l7policy_dict_to_provider_dict(l7policy) provider_l7policies.append(provider_l7policy) new_listener_dict['l7policies'] = provider_l7policies return new_listener_dict def db_pools_to_provider_pools(db_pools, for_delete=False): provider_pools = [] for pool in db_pools: provider_pools.append(db_pool_to_provider_pool(pool, for_delete=for_delete)) return provider_pools def db_pool_to_provider_pool(db_pool, for_delete=False): new_pool_dict = pool_dict_to_provider_dict(db_pool.to_dict(recurse=True), for_delete=for_delete) # Replace the sub-dicts with objects if 'health_monitor' in new_pool_dict: del new_pool_dict['health_monitor'] if db_pool.health_monitor: provider_healthmonitor = db_HM_to_provider_HM(db_pool.health_monitor) new_pool_dict['healthmonitor'] = provider_healthmonitor # Don't leave a 'members' None here, we want it to pass through to Unset if new_pool_dict.get('members', None): del new_pool_dict['members'] if db_pool.members: provider_members = db_members_to_provider_members(db_pool.members) new_pool_dict['members'] = provider_members db_listeners = db_pool.listeners if db_listeners: new_pool_dict['listener_id'] = db_listeners[0].id return driver_dm.Pool.from_dict(new_pool_dict) def pool_dict_to_provider_dict(pool_dict, for_delete=False): new_pool_dict = _base_to_provider_dict(pool_dict, include_project_id=True) new_pool_dict['pool_id'] = new_pool_dict.pop('id') # Pull the certs out of the certificate manager to pass to the provider if 'tls_certificate_id' in new_pool_dict: new_pool_dict['tls_container_ref'] = new_pool_dict.pop( 'tls_certificate_id') if 'ca_tls_certificate_id' in new_pool_dict: new_pool_dict['ca_tls_container_ref'] = new_pool_dict.pop( 'ca_tls_certificate_id') if 'crl_container_id' in new_pool_dict: new_pool_dict['crl_container_ref'] = new_pool_dict.pop( 'crl_container_id') pool_obj = data_models.Pool(**pool_dict) if (pool_obj.tls_certificate_id or pool_obj.ca_tls_certificate_id or pool_obj.crl_container_id): cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', name=CONF.certificates.cert_manager, invoke_on_load=True, ).driver try: cert_dict = cert_parser.load_certificates_data(cert_manager, pool_obj) except Exception as e: with excutils.save_and_reraise_exception() as ctxt: LOG.warning('Unable to retrieve certificate(s) due to %s.', str(e)) if for_delete: ctxt.reraise = False cert_dict = {} if 'tls_cert' in cert_dict and cert_dict['tls_cert']: new_pool_dict['tls_container_data'] = ( cert_dict['tls_cert'].to_dict(recurse=True)) if pool_obj.ca_tls_certificate_id: cert = _get_secret_data(cert_manager, pool_obj.project_id, pool_obj.ca_tls_certificate_id) new_pool_dict['ca_tls_container_data'] = cert if pool_obj.crl_container_id: crl_file = _get_secret_data(cert_manager, pool_obj.project_id, pool_obj.crl_container_id) new_pool_dict['crl_container_data'] = crl_file # Remove the DB back references if ('session_persistence' in new_pool_dict and new_pool_dict['session_persistence']): if 'pool_id' in new_pool_dict['session_persistence']: del new_pool_dict['session_persistence']['pool_id'] if 'pool' in new_pool_dict['session_persistence']: del new_pool_dict['session_persistence']['pool'] if 'l7policies' in new_pool_dict: del new_pool_dict['l7policies'] if 'listeners' in new_pool_dict: del new_pool_dict['listeners'] if 'load_balancer' in new_pool_dict: del new_pool_dict['load_balancer'] if 'load_balancer_id' in new_pool_dict: new_pool_dict['loadbalancer_id'] = new_pool_dict.pop( 'load_balancer_id') if 'health_monitor' in new_pool_dict: hm = new_pool_dict.pop('health_monitor') if hm: new_pool_dict['healthmonitor'] = hm_dict_to_provider_dict(hm) else: new_pool_dict['healthmonitor'] = None if 'members' in new_pool_dict and new_pool_dict['members']: members = new_pool_dict.pop('members') provider_members = [] for member in members: provider_member = member_dict_to_provider_dict(member) provider_members.append(provider_member) new_pool_dict['members'] = provider_members return new_pool_dict def db_members_to_provider_members(db_members): provider_members = [] for member in db_members: provider_members.append(db_member_to_provider_member(member)) return provider_members def db_member_to_provider_member(db_member): new_member_dict = member_dict_to_provider_dict(db_member.to_dict()) return driver_dm.Member.from_dict(new_member_dict) def member_dict_to_provider_dict(member_dict): new_member_dict = _base_to_provider_dict(member_dict, include_project_id=True) new_member_dict['member_id'] = new_member_dict.pop('id') if 'ip_address' in new_member_dict: new_member_dict['address'] = new_member_dict.pop('ip_address') # Remove the DB back references if 'pool' in new_member_dict: del new_member_dict['pool'] return new_member_dict def db_HM_to_provider_HM(db_hm): new_HM_dict = hm_dict_to_provider_dict(db_hm.to_dict()) return driver_dm.HealthMonitor.from_dict(new_HM_dict) def hm_dict_to_provider_dict(hm_dict): new_hm_dict = _base_to_provider_dict(hm_dict, include_project_id=True) new_hm_dict['healthmonitor_id'] = new_hm_dict.pop('id') if 'fall_threshold' in new_hm_dict: new_hm_dict['max_retries_down'] = new_hm_dict.pop('fall_threshold') if 'rise_threshold' in new_hm_dict: new_hm_dict['max_retries'] = new_hm_dict.pop('rise_threshold') # Remove the DB back references if 'pool' in new_hm_dict: del new_hm_dict['pool'] return new_hm_dict def db_l7policies_to_provider_l7policies(db_l7policies): provider_l7policies = [] for l7policy in db_l7policies: provider_l7policy = db_l7policy_to_provider_l7policy(l7policy) provider_l7policies.append(provider_l7policy) return provider_l7policies def db_l7policy_to_provider_l7policy(db_l7policy): new_l7policy_dict = l7policy_dict_to_provider_dict( db_l7policy.to_dict(recurse=True)) if 'l7rules' in new_l7policy_dict: del new_l7policy_dict['l7rules'] new_l7rules = db_l7rules_to_provider_l7rules(db_l7policy.l7rules) new_l7policy_dict['rules'] = new_l7rules return driver_dm.L7Policy.from_dict(new_l7policy_dict) def l7policy_dict_to_provider_dict(l7policy_dict): new_l7policy_dict = _base_to_provider_dict(l7policy_dict, include_project_id=True) new_l7policy_dict['l7policy_id'] = new_l7policy_dict.pop('id') # Remove the DB back references if 'listener' in new_l7policy_dict: del new_l7policy_dict['listener'] if 'redirect_pool' in new_l7policy_dict: del new_l7policy_dict['redirect_pool'] if 'l7rules' in new_l7policy_dict and new_l7policy_dict['l7rules']: rules = new_l7policy_dict.pop('l7rules') provider_rules = [] for rule in rules: provider_rule = l7rule_dict_to_provider_dict(rule) provider_rules.append(provider_rule) new_l7policy_dict['rules'] = provider_rules return new_l7policy_dict def db_l7rules_to_provider_l7rules(db_l7rules): provider_l7rules = [] for l7rule in db_l7rules: provider_l7rule = db_l7rule_to_provider_l7rule(l7rule) provider_l7rules.append(provider_l7rule) return provider_l7rules def db_l7rule_to_provider_l7rule(db_l7rule): new_l7rule_dict = l7rule_dict_to_provider_dict(db_l7rule.to_dict()) return driver_dm.L7Rule.from_dict(new_l7rule_dict) def l7rule_dict_to_provider_dict(l7rule_dict): new_l7rule_dict = _base_to_provider_dict(l7rule_dict, include_project_id=True) new_l7rule_dict['l7rule_id'] = new_l7rule_dict.pop('id') # Remove the DB back references if 'l7policy' in new_l7rule_dict: del new_l7rule_dict['l7policy'] return new_l7rule_dict def vip_dict_to_provider_dict(vip_dict): new_vip_dict = {} if 'ip_address' in vip_dict: new_vip_dict['vip_address'] = vip_dict['ip_address'] if 'network_id' in vip_dict: new_vip_dict['vip_network_id'] = vip_dict['network_id'] if 'port_id' in vip_dict: new_vip_dict['vip_port_id'] = vip_dict['port_id'] if 'subnet_id' in vip_dict: new_vip_dict['vip_subnet_id'] = vip_dict['subnet_id'] if 'qos_policy_id' in vip_dict: new_vip_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] if constants.OCTAVIA_OWNED in vip_dict: new_vip_dict[constants.OCTAVIA_OWNED] = vip_dict[ constants.OCTAVIA_OWNED] return new_vip_dict def provider_vip_dict_to_vip_obj(vip_dictionary): vip_obj = data_models.Vip() if 'vip_address' in vip_dictionary: vip_obj.ip_address = vip_dictionary['vip_address'] if 'vip_network_id' in vip_dictionary: vip_obj.network_id = vip_dictionary['vip_network_id'] if 'vip_port_id' in vip_dictionary: vip_obj.port_id = vip_dictionary['vip_port_id'] if 'vip_subnet_id' in vip_dictionary: vip_obj.subnet_id = vip_dictionary['vip_subnet_id'] if 'vip_qos_policy_id' in vip_dictionary: vip_obj.qos_policy_id = vip_dictionary['vip_qos_policy_id'] if constants.OCTAVIA_OWNED in vip_dictionary: vip_obj.octavia_owned = vip_dictionary[constants.OCTAVIA_OWNED] return vip_obj ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/healthcheck/0000775000175000017500000000000000000000000017634 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/healthcheck/healthcheck_plugins.py0000664000175000017500000000470700000000000024222 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_config import cfg from oslo_middleware.healthcheck import pluginbase from octavia.db import api as db_apis from octavia.db import healthcheck CONF = cfg.CONF class OctaviaDBHealthcheck(pluginbase.HealthcheckBaseExtension): UNAVAILABLE_REASON = 'The Octavia database is unavailable.' last_check = None last_result = None last_message = None def __init__(self, *args, **kwargs): super(OctaviaDBHealthcheck, self).__init__(*args, **kwargs) def healthcheck(self, server_port): try: if (self.last_check is not None and ((datetime.datetime.now() - self.last_check).total_seconds()) < CONF.api_settings.healthcheck_refresh_interval): result = self.last_result message = self.last_message else: result, message = healthcheck.check_database_connection( db_apis.get_session()) self.last_check = datetime.datetime.now() self.last_result = result self.last_message = message if result: return OctaviaDBCheckResult(available=True, reason="OK") else: return OctaviaDBCheckResult(available=False, reason=self.UNAVAILABLE_REASON, details=message) except Exception as e: return OctaviaDBCheckResult(available=False, reason=self.UNAVAILABLE_REASON, details=str(e)) class OctaviaDBCheckResult(pluginbase.HealthcheckResult): """Result sub-class to provide a unique name in detail reports.""" def __init__(self, *args, **kwargs): super(OctaviaDBCheckResult, self).__init__(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/root_controller.py0000664000175000017500000001176300000000000021201 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import healthcheck from pecan import abort as pecan_abort from pecan import expose as pecan_expose from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2 import controllers as v2_controller CONF = cfg.CONF LOG = logging.getLogger(__name__) class RootController(object): """The controller with which the pecan wsgi app should be created.""" def __init__(self): super(RootController, self).__init__() setattr(self, 'v2.0', v2_controller.V2Controller()) setattr(self, 'v2', v2_controller.V2Controller()) if CONF.api_settings.healthcheck_enabled: self.healthcheck_obj = healthcheck.Healthcheck.app_factory(None) # Run the oslo middleware healthcheck for /healthcheck @pecan_expose('json') @pecan_expose(content_type='plain/text') @pecan_expose(content_type='text/html') def healthcheck(self): # pylint: disable=inconsistent-return-statements if CONF.api_settings.healthcheck_enabled: if pecan_request.method not in ['GET', 'HEAD']: pecan_abort(405) return self.healthcheck_obj.process_request(pecan_request) pecan_abort(404) def _add_a_version(self, versions, version, url_version, status, timestamp, base_url): versions.append({ 'id': version, 'status': status, 'updated': timestamp, 'links': [{ 'href': base_url + url_version, 'rel': 'self' }] }) @wsme_pecan.wsexpose(wtypes.text) def index(self): host_url = pecan_request.path_url if not host_url.endswith('/'): host_url = '{}/'.format(host_url) versions = [] self._add_a_version(versions, 'v2.0', 'v2', 'SUPPORTED', '2016-12-11T00:00:00Z', host_url) self._add_a_version(versions, 'v2.1', 'v2', 'SUPPORTED', '2018-04-20T00:00:00Z', host_url) self._add_a_version(versions, 'v2.2', 'v2', 'SUPPORTED', '2018-07-31T00:00:00Z', host_url) self._add_a_version(versions, 'v2.3', 'v2', 'SUPPORTED', '2018-12-18T00:00:00Z', host_url) # amp statistics self._add_a_version(versions, 'v2.4', 'v2', 'SUPPORTED', '2018-12-19T00:00:00Z', host_url) # Tags self._add_a_version(versions, 'v2.5', 'v2', 'SUPPORTED', '2019-01-21T00:00:00Z', host_url) # Flavors self._add_a_version(versions, 'v2.6', 'v2', 'SUPPORTED', '2019-01-25T00:00:00Z', host_url) # Amphora Config update self._add_a_version(versions, 'v2.7', 'v2', 'SUPPORTED', '2018-01-25T12:00:00Z', host_url) # TLS client authentication self._add_a_version(versions, 'v2.8', 'v2', 'SUPPORTED', '2019-02-12T00:00:00Z', host_url) # HTTP Redirect code self._add_a_version(versions, 'v2.9', 'v2', 'SUPPORTED', '2019-03-04T00:00:00Z', host_url) # Healthmonitor host header self._add_a_version(versions, 'v2.10', 'v2', 'SUPPORTED', '2019-03-05T00:00:00Z', host_url) # Additive batch member update self._add_a_version(versions, 'v2.11', 'v2', 'SUPPORTED', '2019-06-24T00:00:00Z', host_url) # VIP ACL self._add_a_version(versions, 'v2.12', 'v2', 'SUPPORTED', '2019-09-11T00:00:00Z', host_url) # SOURCE_IP_PORT algorithm self._add_a_version(versions, 'v2.13', 'v2', 'SUPPORTED', '2019-09-13T00:00:00Z', host_url) # Availability Zones self._add_a_version(versions, 'v2.14', 'v2', 'SUPPORTED', '2019-11-10T00:00:00Z', host_url) # TLS version and cipher options self._add_a_version(versions, 'v2.15', 'v2', 'SUPPORTED', '2020-03-10T00:00:00Z', host_url) # Additional UDP Healthcheck Types (HTTP/TCP) self._add_a_version(versions, 'v2.16', 'v2', 'CURRENT', '2020-03-15T00:00:00Z', host_url) return {'versions': versions} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3782167 octavia-6.2.2/octavia/api/v2/0000775000175000017500000000000000000000000015720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/__init__.py0000664000175000017500000000107400000000000020033 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/api/v2/controllers/0000775000175000017500000000000000000000000020266 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/__init__.py0000664000175000017500000000550700000000000022406 0ustar00zuulzuul00000000000000# Copyright 2016 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import amphora from octavia.api.v2.controllers import availability_zone_profiles from octavia.api.v2.controllers import availability_zones from octavia.api.v2.controllers import base from octavia.api.v2.controllers import flavor_profiles from octavia.api.v2.controllers import flavors from octavia.api.v2.controllers import health_monitor from octavia.api.v2.controllers import l7policy from octavia.api.v2.controllers import listener from octavia.api.v2.controllers import load_balancer from octavia.api.v2.controllers import pool from octavia.api.v2.controllers import provider from octavia.api.v2.controllers import quotas class BaseV2Controller(base.BaseController): loadbalancers = None listeners = None pools = None l7policies = None healthmonitors = None quotas = None def __init__(self): super(BaseV2Controller, self).__init__() self.loadbalancers = load_balancer.LoadBalancersController() self.listeners = listener.ListenersController() self.pools = pool.PoolsController() self.l7policies = l7policy.L7PolicyController() self.healthmonitors = health_monitor.HealthMonitorController() self.quotas = quotas.QuotasController() self.providers = provider.ProviderController() self.flavors = flavors.FlavorsController() self.flavorprofiles = flavor_profiles.FlavorProfileController() self.availabilityzones = ( availability_zones.AvailabilityZonesController()) self.availabilityzoneprofiles = ( availability_zone_profiles.AvailabilityZoneProfileController()) @wsme_pecan.wsexpose(wtypes.text) def get(self): return "v2" class OctaviaV2Controller(base.BaseController): amphorae = None def __init__(self): super(OctaviaV2Controller, self).__init__() self.amphorae = amphora.AmphoraController() @wsme_pecan.wsexpose(wtypes.text) def get(self): return "v2" class V2Controller(BaseV2Controller): lbaas = None def __init__(self): super(V2Controller, self).__init__() self.lbaas = BaseV2Controller() self.octavia = OctaviaV2Controller() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/amphora.py0000664000175000017500000002112300000000000022266 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import excutils from pecan import expose as pecan_expose from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import base from octavia.api.v2.types import amphora as amp_types from octavia.common import constants from octavia.common import exceptions from octavia.common import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class AmphoraController(base.BaseController): RBAC_TYPE = constants.RBAC_AMPHORA def __init__(self): super(AmphoraController, self).__init__() @wsme_pecan.wsexpose(amp_types.AmphoraRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, id, fields=None): """Gets a single amphora's details.""" context = pecan_request.context.get('octavia_context') db_amp = self._get_db_amp(context.session, id, show_deleted=False) self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ONE) result = self._convert_db_to_type( db_amp, amp_types.AmphoraResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return amp_types.AmphoraRootResponse(amphora=result) @wsme_pecan.wsexpose(amp_types.AmphoraeRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """Gets all health monitors.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) db_amp, links = self.repositories.amphora.get_all_API_list( context.session, show_deleted=False, pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) result = self._convert_db_to_type( db_amp, [amp_types.AmphoraResponse]) if fields is not None: result = self._filter_fields(result, fields) return amp_types.AmphoraeRootResponse( amphorae=result, amphorae_links=links) @pecan_expose() def _lookup(self, amphora_id, *remainder): """Overridden pecan _lookup method for custom routing. Currently it checks if this was a failover request and routes the request to the FailoverController. """ if amphora_id and remainder: controller = remainder[0] remainder = remainder[1:] if controller == 'config': return AmphoraUpdateController(amp_id=amphora_id), remainder if controller == 'failover': return FailoverController(amp_id=amphora_id), remainder if controller == 'stats': return AmphoraStatsController(amp_id=amphora_id), remainder return None class FailoverController(base.BaseController): RBAC_TYPE = constants.RBAC_AMPHORA def __init__(self, amp_id): super(FailoverController, self).__init__() if CONF.api_settings.default_provider_driver == constants.AMPHORAV2: topic = constants.TOPIC_AMPHORA_V2 version = "2.0" else: topic = cfg.CONF.oslo_messaging.topic version = "1.0" self.target = messaging.Target( namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, topic=topic, version=version, fanout=False) self.client = rpc.get_client(self.target) self.amp_id = amp_id @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) def put(self): """Fails over an amphora""" pcontext = pecan_request.context context = pcontext.get('octavia_context') db_amp = self._get_db_amp(context.session, self.amp_id, show_deleted=False) # Check to see if the amphora is a spare (not associated with an LB) if db_amp.load_balancer: self._auth_validate_action( context, db_amp.load_balancer.project_id, constants.RBAC_PUT_FAILOVER) self.repositories.load_balancer.test_and_set_provisioning_status( context.session, db_amp.load_balancer_id, status=constants.PENDING_UPDATE, raise_exception=True) else: self._auth_validate_action( context, context.project_id, constants.RBAC_PUT_FAILOVER) try: LOG.info("Sending failover request for amphora %s to the queue", self.amp_id) payload = {constants.AMPHORA_ID: db_amp.id} self.client.cast({}, 'failover_amphora', **payload) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( context.session, db_amp.load_balancer.id, provisioning_status=constants.ERROR) class AmphoraUpdateController(base.BaseController): RBAC_TYPE = constants.RBAC_AMPHORA def __init__(self, amp_id): super(AmphoraUpdateController, self).__init__() if CONF.api_settings.default_provider_driver == constants.AMPHORAV2: topic = constants.TOPIC_AMPHORA_V2 version = "2.0" else: topic = cfg.CONF.oslo_messaging.topic version = "1.0" self.transport = messaging.get_rpc_transport(cfg.CONF) self.target = messaging.Target( namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, topic=topic, version=version, fanout=False) self.client = messaging.RPCClient(self.transport, target=self.target) self.amp_id = amp_id @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) def put(self): """Update amphora agent configuration""" pcontext = pecan_request.context context = pcontext.get('octavia_context') db_amp = self._get_db_amp(context.session, self.amp_id, show_deleted=False) # Check to see if the amphora is a spare (not associated with an LB) if db_amp.load_balancer: self._auth_validate_action( context, db_amp.load_balancer.project_id, constants.RBAC_PUT_CONFIG) else: self._auth_validate_action( context, context.project_id, constants.RBAC_PUT_CONFIG) try: LOG.info("Sending amphora agent update request for amphora %s to " "the queue.", self.amp_id) payload = {constants.AMPHORA_ID: db_amp.id} self.client.cast({}, 'update_amphora_agent_config', **payload) except Exception: with excutils.save_and_reraise_exception(reraise=True): LOG.error("Unable to send amphora agent update request for " "amphora %s to the queue.", self.amp_id) class AmphoraStatsController(base.BaseController): RBAC_TYPE = constants.RBAC_AMPHORA def __init__(self, amp_id): super(AmphoraStatsController, self).__init__() self.amp_id = amp_id @wsme_pecan.wsexpose(amp_types.StatisticsRootResponse, wtypes.text, status_code=200) def get(self): context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_STATS) stats = self.repositories.get_amphora_stats(context.session, self.amp_id) if stats == []: raise exceptions.NotFound(resource='Amphora stats for', id=self.amp_id) wsme_stats = [] for stat in stats: wsme_stats.append(amp_types.AmphoraStatisticsResponse(**stat)) return amp_types.StatisticsRootResponse(amphora_stats=wsme_stats) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/availability_zone_profiles.py0000664000175000017500000002637000000000000026260 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import uuidutils from pecan import request as pecan_request from sqlalchemy.orm import exc as sa_exception from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.types import availability_zone_profile as profile_types from octavia.common import constants from octavia.common import exceptions from octavia.db import api as db_api LOG = logging.getLogger(__name__) class AvailabilityZoneProfileController(base.BaseController): RBAC_TYPE = constants.RBAC_AVAILABILITY_ZONE_PROFILE def __init__(self): super(AvailabilityZoneProfileController, self).__init__() @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfileRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, id, fields=None): """Gets an Availability Zone Profile's detail.""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ONE) if id == constants.NIL_UUID: raise exceptions.NotFound(resource='Availability Zone Profile', id=constants.NIL_UUID) db_availability_zone_profile = self._get_db_availability_zone_profile( context.session, id) result = self._convert_db_to_type( db_availability_zone_profile, profile_types.AvailabilityZoneProfileResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return profile_types.AvailabilityZoneProfileRootResponse( availability_zone_profile=result) @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfilesRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """Lists all Availability Zone Profiles.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) db_availability_zone_profiles, links = ( self.repositories.availability_zone_profile.get_all( context.session, pagination_helper=pcontext.get(constants.PAGINATION_HELPER))) result = self._convert_db_to_type( db_availability_zone_profiles, [profile_types.AvailabilityZoneProfileResponse]) if fields is not None: result = self._filter_fields(result, fields) return profile_types.AvailabilityZoneProfilesRootResponse( availability_zone_profiles=result, availability_zone_profile_links=links) @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfileRootResponse, body=profile_types.AvailabilityZoneProfileRootPOST, status_code=201) def post(self, availability_zone_profile_): """Creates an Availability Zone Profile.""" availability_zone_profile = ( availability_zone_profile_.availability_zone_profile) context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_POST) # Do a basic JSON validation on the metadata try: availability_zone_data_dict = jsonutils.loads( availability_zone_profile.availability_zone_data) except Exception: raise exceptions.InvalidOption( value=availability_zone_profile.availability_zone_data, option=constants.AVAILABILITY_ZONE_DATA) # Validate that the provider driver supports the metadata driver = driver_factory.get_driver( availability_zone_profile.provider_name) driver_utils.call_provider( driver.name, driver.validate_availability_zone, availability_zone_data_dict) lock_session = db_api.get_session(autocommit=False) try: availability_zone_profile_dict = availability_zone_profile.to_dict( render_unsets=True) availability_zone_profile_dict['id'] = uuidutils.generate_uuid() db_availability_zone_profile = ( self.repositories.availability_zone_profile.create( lock_session, **availability_zone_profile_dict)) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() result = self._convert_db_to_type( db_availability_zone_profile, profile_types.AvailabilityZoneProfileResponse) return profile_types.AvailabilityZoneProfileRootResponse( availability_zone_profile=result) def _validate_update_azp(self, context, id, availability_zone_profile): if availability_zone_profile.name is None: raise exceptions.InvalidOption(value=None, option=constants.NAME) if availability_zone_profile.provider_name is None: raise exceptions.InvalidOption( value=None, option=constants.PROVIDER_NAME) if availability_zone_profile.availability_zone_data is None: raise exceptions.InvalidOption( value=None, option=constants.AVAILABILITY_ZONE_DATA) # Don't allow changes to the availability_zone_data or provider_name if # it is in use. if (not isinstance(availability_zone_profile.availability_zone_data, wtypes.UnsetType) or not isinstance(availability_zone_profile.provider_name, wtypes.UnsetType)): if self.repositories.availability_zone.count( context.session, availability_zone_profile_id=id) > 0: raise exceptions.ObjectInUse( object='Availability Zone Profile', id=id) @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfileRootResponse, wtypes.text, status_code=200, body=profile_types.AvailabilityZoneProfileRootPUT) def put(self, id, availability_zone_profile_): """Updates an Availability Zone Profile.""" availability_zone_profile = ( availability_zone_profile_.availability_zone_profile) context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_PUT) self._validate_update_azp(context, id, availability_zone_profile) if id == constants.NIL_UUID: raise exceptions.NotFound(resource='Availability Zone Profile', id=constants.NIL_UUID) if not isinstance(availability_zone_profile.availability_zone_data, wtypes.UnsetType): # Do a basic JSON validation on the metadata try: availability_zone_data_dict = jsonutils.loads( availability_zone_profile.availability_zone_data) except Exception: raise exceptions.InvalidOption( value=availability_zone_profile.availability_zone_data, option=constants.FLAVOR_DATA) if isinstance(availability_zone_profile.provider_name, wtypes.UnsetType): db_availability_zone_profile = ( self._get_db_availability_zone_profile( context.session, id)) provider_driver = db_availability_zone_profile.provider_name else: provider_driver = availability_zone_profile.provider_name # Validate that the provider driver supports the metadata driver = driver_factory.get_driver(provider_driver) driver_utils.call_provider( driver.name, driver.validate_availability_zone, availability_zone_data_dict) lock_session = db_api.get_session(autocommit=False) try: availability_zone_profile_dict = availability_zone_profile.to_dict( render_unsets=False) if availability_zone_profile_dict: self.repositories.availability_zone_profile.update( lock_session, id, **availability_zone_profile_dict) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_availability_zone_profile = self._get_db_availability_zone_profile( context.session, id) result = self._convert_db_to_type( db_availability_zone_profile, profile_types.AvailabilityZoneProfileResponse) return profile_types.AvailabilityZoneProfileRootResponse( availability_zone_profile=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, availability_zone_profile_id): """Deletes an Availability Zone Profile""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_DELETE) if availability_zone_profile_id == constants.NIL_UUID: raise exceptions.NotFound(resource='Availability Zone Profile', id=constants.NIL_UUID) # Don't allow it to be deleted if it is in use by an availability zone if self.repositories.availability_zone.count( context.session, availability_zone_profile_id=availability_zone_profile_id) > 0: raise exceptions.ObjectInUse(object='Availability Zone Profile', id=availability_zone_profile_id) try: self.repositories.availability_zone_profile.delete( context.session, id=availability_zone_profile_id) except sa_exception.NoResultFound: raise exceptions.NotFound(resource='Availability Zone Profile', id=availability_zone_profile_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/availability_zones.py0000664000175000017500000002015200000000000024530 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import api as oslo_db_api from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from pecan import request as pecan_request from sqlalchemy.orm import exc as sa_exception from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import base from octavia.api.v2.types import availability_zones as availability_zone_types from octavia.common import constants from octavia.common import exceptions from octavia.db import api as db_api LOG = logging.getLogger(__name__) class AvailabilityZonesController(base.BaseController): RBAC_TYPE = constants.RBAC_AVAILABILITY_ZONE def __init__(self): super(AvailabilityZonesController, self).__init__() @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZoneRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, name, fields=None): """Gets an Availability Zone's detail.""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ONE) if name == constants.NIL_UUID: raise exceptions.NotFound(resource='Availability Zone', id=constants.NIL_UUID) db_availability_zone = self._get_db_availability_zone( context.session, name) result = self._convert_db_to_type( db_availability_zone, availability_zone_types.AvailabilityZoneResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return availability_zone_types.AvailabilityZoneRootResponse( availability_zone=result) @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZonesRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """Lists all Availability Zones.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) db_availability_zones, links = ( self.repositories.availability_zone.get_all( context.session, pagination_helper=pcontext.get(constants.PAGINATION_HELPER))) result = self._convert_db_to_type( db_availability_zones, [availability_zone_types.AvailabilityZoneResponse]) if fields is not None: result = self._filter_fields(result, fields) return availability_zone_types.AvailabilityZonesRootResponse( availability_zones=result, availability_zones_links=links) @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZoneRootResponse, body=availability_zone_types.AvailabilityZoneRootPOST, status_code=201) def post(self, availability_zone_): """Creates an Availability Zone.""" availability_zone = availability_zone_.availability_zone context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_POST) lock_session = db_api.get_session(autocommit=False) try: availability_zone_dict = availability_zone.to_dict( render_unsets=True) db_availability_zone = self.repositories.availability_zone.create( lock_session, **availability_zone_dict) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.RecordAlreadyExists(field='availability zone', name=availability_zone.name) except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() result = self._convert_db_to_type( db_availability_zone, availability_zone_types.AvailabilityZoneResponse) return availability_zone_types.AvailabilityZoneRootResponse( availability_zone=result) @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZoneRootResponse, wtypes.text, status_code=200, body=availability_zone_types.AvailabilityZoneRootPUT) def put(self, name, availability_zone_): availability_zone = availability_zone_.availability_zone context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_PUT) if name == constants.NIL_UUID: raise exceptions.NotFound(resource='Availability Zone', id=constants.NIL_UUID) lock_session = db_api.get_session(autocommit=False) try: availability_zone_dict = availability_zone.to_dict( render_unsets=False) if availability_zone_dict: self.repositories.availability_zone.update( lock_session, name, **availability_zone_dict) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_availability_zone = self._get_db_availability_zone( context.session, name) result = self._convert_db_to_type( db_availability_zone, availability_zone_types.AvailabilityZoneResponse) return availability_zone_types.AvailabilityZoneRootResponse( availability_zone=result) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, availability_zone_name): """Deletes an Availability Zone""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_DELETE) if availability_zone_name == constants.NIL_UUID: raise exceptions.NotFound(resource='Availability Zone', id=constants.NIL_UUID) serial_session = db_api.get_session(autocommit=False) serial_session.connection( execution_options={'isolation_level': 'SERIALIZABLE'}) try: self.repositories.availability_zone.delete( serial_session, name=availability_zone_name) serial_session.commit() # Handle when load balancers still reference this availability_zone except odb_exceptions.DBReferenceError: serial_session.rollback() raise exceptions.ObjectInUse(object='Availability Zone', id=availability_zone_name) except sa_exception.NoResultFound: serial_session.rollback() raise exceptions.NotFound(resource='Availability Zone', id=availability_zone_name) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error( 'Unknown availability_zone delete exception: %s', str(e)) serial_session.rollback() finally: serial_session.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/base.py0000664000175000017500000003513700000000000021563 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat.backends import default_backend from cryptography import x509 from oslo_config import cfg from oslo_log import log as logging from pecan import request as pecan_request from pecan import rest as pecan_rest from stevedore import driver as stevedore_driver from wsme import types as wtypes from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common import policy from octavia.db import repositories from octavia.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseController(pecan_rest.RestController): RBAC_TYPE = None def __init__(self): super(BaseController, self).__init__() self.cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', name=CONF.certificates.cert_manager, invoke_on_load=True, ).driver self.repositories = repositories.Repositories() @staticmethod def _convert_db_to_type(db_entity, to_type, children=False): """Converts a data model into an Octavia WSME type :param db_entity: data model to convert :param to_type: converts db_entity to this type """ if isinstance(to_type, list): to_type = to_type[0] def _convert(db_obj): return to_type.from_data_model(db_obj, children=children) if isinstance(db_entity, list): converted = [_convert(db_obj) for db_obj in db_entity] else: converted = _convert(db_entity) return converted @staticmethod def _get_db_obj(session, repo, data_model, id, show_deleted=True): """Gets an object from the database and returns it.""" db_obj = repo.get(session, id=id, show_deleted=show_deleted) if not db_obj: LOG.debug('%(name)s %(id)s not found', {'name': data_model._name(), 'id': id}) raise exceptions.NotFound( resource=data_model._name(), id=id) return db_obj def _get_db_lb(self, session, id, show_deleted=True): """Get a load balancer from the database.""" return self._get_db_obj(session, self.repositories.load_balancer, data_models.LoadBalancer, id, show_deleted=show_deleted) def _get_db_listener(self, session, id, show_deleted=True): """Get a listener from the database.""" return self._get_db_obj(session, self.repositories.listener, data_models.Listener, id, show_deleted=show_deleted) def _get_listener_and_loadbalancer_id(self, db_l7policy): """Get listener and loadbalancer ids from the l7policy db_model.""" load_balancer_id = db_l7policy.listener.load_balancer_id listener_id = db_l7policy.listener_id return load_balancer_id, listener_id def _get_db_pool(self, session, id, show_deleted=True): """Get a pool from the database.""" return self._get_db_obj(session, self.repositories.pool, data_models.Pool, id, show_deleted=show_deleted) def _get_db_member(self, session, id, show_deleted=True): """Get a member from the database.""" return self._get_db_obj(session, self.repositories.member, data_models.Member, id, show_deleted=show_deleted) def _get_db_hm(self, session, id, show_deleted=True): """Get a health monitor from the database.""" return self._get_db_obj(session, self.repositories.health_monitor, data_models.HealthMonitor, id, show_deleted=show_deleted) def _get_db_flavor(self, session, id): """Get a flavor from the database.""" return self._get_db_obj(session, self.repositories.flavor, data_models.Flavor, id) def _get_db_flavor_profile(self, session, id): """Get a flavor profile from the database.""" return self._get_db_obj(session, self.repositories.flavor_profile, data_models.FlavorProfile, id) def _get_db_availability_zone(self, session, name): """Get an availability zone from the database.""" db_obj = self.repositories.availability_zone.get(session, name=name) if not db_obj: LOG.debug('%(obj_name)s %(name)s not found', {'obj_name': data_models.AvailabilityZone._name(), 'name': name}) raise exceptions.NotFound( resource=data_models.AvailabilityZone._name(), id=name) return db_obj def _get_db_availability_zone_profile(self, session, id): """Get an availability zone profile from the database.""" return self._get_db_obj(session, self.repositories.availability_zone_profile, data_models.AvailabilityZoneProfile, id) def _get_db_l7policy(self, session, id, show_deleted=True): """Get a L7 Policy from the database.""" return self._get_db_obj(session, self.repositories.l7policy, data_models.L7Policy, id, show_deleted=show_deleted) def _get_db_l7rule(self, session, id, show_deleted=True): """Get a L7 Rule from the database.""" return self._get_db_obj(session, self.repositories.l7rule, data_models.L7Rule, id, show_deleted=show_deleted) def _get_db_amp(self, session, id, show_deleted=True): """Gets an Amphora from the database.""" return self._get_db_obj(session, self.repositories.amphora, data_models.Amphora, id, show_deleted=show_deleted) def _get_lb_project_id(self, session, id, show_deleted=True): """Get the project_id of the load balancer from the database.""" lb = self._get_db_obj(session, self.repositories.load_balancer, data_models.LoadBalancer, id, show_deleted=show_deleted) return lb.project_id def _get_lb_project_id_provider(self, session, id, show_deleted=True): """Get the project_id of the load balancer from the database.""" lb = self._get_db_obj(session, self.repositories.load_balancer, data_models.LoadBalancer, id, show_deleted=show_deleted) return lb.project_id, lb.provider def _get_l7policy_project_id(self, session, id, show_deleted=True): """Get the project_id of the load balancer from the database.""" l7policy = self._get_db_obj(session, self.repositories.l7policy, data_models.LoadBalancer, id, show_deleted=show_deleted) return l7policy.project_id def _get_default_quotas(self, project_id): """Gets the project's default quotas.""" quotas = data_models.Quotas( project_id=project_id, load_balancer=CONF.quotas.default_load_balancer_quota, listener=CONF.quotas.default_listener_quota, pool=CONF.quotas.default_pool_quota, health_monitor=CONF.quotas.default_health_monitor_quota, member=CONF.quotas.default_member_quota) return quotas def _get_db_quotas(self, session, project_id): """Gets the project's quotas from the database, or responds with the default quotas. """ # At this point project_id should not ever be None or Unset db_quotas = self.repositories.quotas.get( session, project_id=project_id) if not db_quotas: LOG.debug("No custom quotas for project %s. Returning " "defaults...", project_id) db_quotas = self._get_default_quotas(project_id=project_id) else: # Fill in any that are using the configured defaults if db_quotas.load_balancer is None: db_quotas.load_balancer = (CONF.quotas. default_load_balancer_quota) if db_quotas.listener is None: db_quotas.listener = CONF.quotas.default_listener_quota if db_quotas.pool is None: db_quotas.pool = CONF.quotas.default_pool_quota if db_quotas.health_monitor is None: db_quotas.health_monitor = (CONF.quotas. default_health_monitor_quota) if db_quotas.member is None: db_quotas.member = CONF.quotas.default_member_quota return db_quotas def _auth_get_all(self, context, project_id): # Check authorization to list objects under all projects action = '{rbac_obj}{action}'.format( rbac_obj=self.RBAC_TYPE, action=constants.RBAC_GET_ALL_GLOBAL) target = {'project_id': project_id} if not policy.get_enforcer().authorize(action, target, context, do_raise=False): # Not a global observer or admin if project_id is None: project_id = context.project_id # If we still don't know who it is, reject it. if project_id is None: raise exceptions.PolicyForbidden() # Check authorization to list objects under this project self._auth_validate_action(context, project_id, constants.RBAC_GET_ALL) if project_id is None: query_filter = {} else: query_filter = {'project_id': project_id} return query_filter def _auth_validate_action(self, context, project_id, action): # Check that the user is authorized to do an action in this object action = '{rbac_obj}{action}'.format( rbac_obj=self.RBAC_TYPE, action=action) target = {'project_id': project_id} policy.get_enforcer().authorize(action, target, context) def _filter_fields(self, object_list, fields): if CONF.api_settings.allow_field_selection: for index, obj in enumerate(object_list): members = self._get_attrs(obj) for member in members: if member not in fields: setattr(obj, member, wtypes.Unset) return object_list @staticmethod def _get_attrs(obj): attrs = [attr for attr in dir(obj) if not callable( getattr(obj, attr)) and not attr.startswith("_")] return attrs def _validate_tls_refs(self, tls_refs): context = pecan_request.context.get('octavia_context') bad_refs = [] for ref in tls_refs: try: self.cert_manager.set_acls(context, ref) self.cert_manager.get_cert(context, ref, check_only=True) except exceptions.UnreadablePKCS12: raise except Exception: bad_refs.append(ref) if bad_refs: raise exceptions.CertificateRetrievalException(ref=bad_refs) def _validate_client_ca_and_crl_refs(self, client_ca_ref, crl_ref): context = pecan_request.context.get('octavia_context') bad_refs = [] try: self.cert_manager.set_acls(context, client_ca_ref) ca_pem = self.cert_manager.get_secret(context, client_ca_ref) except Exception: bad_refs.append(client_ca_ref) pem_crl = None if crl_ref: try: self.cert_manager.set_acls(context, crl_ref) pem_crl = self.cert_manager.get_secret(context, crl_ref) except Exception: bad_refs.append(crl_ref) if bad_refs: raise exceptions.CertificateRetrievalException(ref=bad_refs) ca_cert = None try: # Test if it needs to be UTF-8 encoded try: ca_pem = ca_pem.encode('utf-8') except AttributeError: pass ca_cert = x509.load_pem_x509_certificate(ca_pem, default_backend()) except Exception as e: raise exceptions.ValidationException(detail=_( "The client authentication CA certificate is invalid. " "It must be a valid x509 PEM format certificate. " "Error: %s") % str(e)) # Validate the CRL is for the client CA if pem_crl: ca_pub_key = ca_cert.public_key() crl = None # Test if it needs to be UTF-8 encoded try: pem_crl = pem_crl.encode('utf-8') except AttributeError: pass try: crl = x509.load_pem_x509_crl(pem_crl, default_backend()) except Exception as e: raise exceptions.ValidationException(detail=_( "The client authentication certificate revocation list " "is invalid. It must be a valid x509 PEM format " "certificate revocation list. Error: %s") % str(e)) if not crl.is_signature_valid(ca_pub_key): raise exceptions.ValidationException(detail=_( "The CRL specified is not valid for client certificate " "authority reference supplied.")) @staticmethod def _validate_protocol(listener_protocol, pool_protocol): proto_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP for valid_pool_proto in proto_map[listener_protocol]: if pool_protocol == valid_pool_proto: return detail = _("The pool protocol '%(pool_protocol)s' is invalid while " "the listener protocol is '%(listener_protocol)s'.") % { "pool_protocol": pool_protocol, "listener_protocol": listener_protocol} raise exceptions.ValidationException(detail=detail) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/flavor_profiles.py0000664000175000017500000002352100000000000024037 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import uuidutils from pecan import request as pecan_request from sqlalchemy.orm import exc as sa_exception from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.types import flavor_profile as profile_types from octavia.common import constants from octavia.common import exceptions from octavia.db import api as db_api LOG = logging.getLogger(__name__) class FlavorProfileController(base.BaseController): RBAC_TYPE = constants.RBAC_FLAVOR_PROFILE def __init__(self): super(FlavorProfileController, self).__init__() @wsme_pecan.wsexpose(profile_types.FlavorProfileRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, id, fields=None): """Gets a flavor profile's detail.""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ONE) if id == constants.NIL_UUID: raise exceptions.NotFound(resource='Flavor profile', id=constants.NIL_UUID) db_flavor_profile = self._get_db_flavor_profile(context.session, id) result = self._convert_db_to_type(db_flavor_profile, profile_types.FlavorProfileResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return profile_types.FlavorProfileRootResponse(flavorprofile=result) @wsme_pecan.wsexpose(profile_types.FlavorProfilesRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """Lists all flavor profiles.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) db_flavor_profiles, links = self.repositories.flavor_profile.get_all( context.session, pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) result = self._convert_db_to_type( db_flavor_profiles, [profile_types.FlavorProfileResponse]) if fields is not None: result = self._filter_fields(result, fields) return profile_types.FlavorProfilesRootResponse( flavorprofiles=result, flavorprofile_links=links) @wsme_pecan.wsexpose(profile_types.FlavorProfileRootResponse, body=profile_types.FlavorProfileRootPOST, status_code=201) def post(self, flavor_profile_): """Creates a flavor Profile.""" flavorprofile = flavor_profile_.flavorprofile context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_POST) # Do a basic JSON validation on the metadata try: flavor_data_dict = jsonutils.loads(flavorprofile.flavor_data) except Exception: raise exceptions.InvalidOption( value=flavorprofile.flavor_data, option=constants.FLAVOR_DATA) # Validate that the provider driver supports the metadata driver = driver_factory.get_driver(flavorprofile.provider_name) driver_utils.call_provider(driver.name, driver.validate_flavor, flavor_data_dict) lock_session = db_api.get_session(autocommit=False) try: flavorprofile_dict = flavorprofile.to_dict(render_unsets=True) flavorprofile_dict['id'] = uuidutils.generate_uuid() db_flavor_profile = self.repositories.flavor_profile.create( lock_session, **flavorprofile_dict) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() result = self._convert_db_to_type( db_flavor_profile, profile_types.FlavorProfileResponse) return profile_types.FlavorProfileRootResponse(flavorprofile=result) def _validate_update_fp(self, context, id, flavorprofile): if flavorprofile.name is None: raise exceptions.InvalidOption(value=None, option=constants.NAME) if flavorprofile.provider_name is None: raise exceptions.InvalidOption(value=None, option=constants.PROVIDER_NAME) if flavorprofile.flavor_data is None: raise exceptions.InvalidOption(value=None, option=constants.FLAVOR_DATA) # Don't allow changes to the flavor_data or provider_name if it # is in use. if (not isinstance(flavorprofile.flavor_data, wtypes.UnsetType) or not isinstance(flavorprofile.provider_name, wtypes.UnsetType)): if self.repositories.flavor.count(context.session, flavor_profile_id=id) > 0: raise exceptions.ObjectInUse(object='Flavor profile', id=id) @wsme_pecan.wsexpose(profile_types.FlavorProfileRootResponse, wtypes.text, status_code=200, body=profile_types.FlavorProfileRootPUT) def put(self, id, flavor_profile_): """Updates a flavor Profile.""" flavorprofile = flavor_profile_.flavorprofile context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_PUT) self._validate_update_fp(context, id, flavorprofile) if id == constants.NIL_UUID: raise exceptions.NotFound(resource='Flavor profile', id=constants.NIL_UUID) if not isinstance(flavorprofile.flavor_data, wtypes.UnsetType): # Do a basic JSON validation on the metadata try: flavor_data_dict = jsonutils.loads(flavorprofile.flavor_data) except Exception: raise exceptions.InvalidOption( value=flavorprofile.flavor_data, option=constants.FLAVOR_DATA) if isinstance(flavorprofile.provider_name, wtypes.UnsetType): db_flavor_profile = self._get_db_flavor_profile( context.session, id) provider_driver = db_flavor_profile.provider_name else: provider_driver = flavorprofile.provider_name # Validate that the provider driver supports the metadata driver = driver_factory.get_driver(provider_driver) driver_utils.call_provider(driver.name, driver.validate_flavor, flavor_data_dict) lock_session = db_api.get_session(autocommit=False) try: flavorprofile_dict = flavorprofile.to_dict(render_unsets=False) if flavorprofile_dict: self.repositories.flavor_profile.update(lock_session, id, **flavorprofile_dict) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_flavor_profile = self._get_db_flavor_profile(context.session, id) result = self._convert_db_to_type( db_flavor_profile, profile_types.FlavorProfileResponse) return profile_types.FlavorProfileRootResponse(flavorprofile=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, flavor_profile_id): """Deletes a Flavor Profile""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_DELETE) if flavor_profile_id == constants.NIL_UUID: raise exceptions.NotFound(resource='Flavor profile', id=constants.NIL_UUID) # Don't allow it to be deleted if it is in use by a flavor if self.repositories.flavor.count( context.session, flavor_profile_id=flavor_profile_id) > 0: raise exceptions.ObjectInUse(object='Flavor profile', id=flavor_profile_id) try: self.repositories.flavor_profile.delete(context.session, id=flavor_profile_id) except sa_exception.NoResultFound: raise exceptions.NotFound(resource='Flavor profile', id=flavor_profile_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/flavors.py0000664000175000017500000001614100000000000022317 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import api as oslo_db_api from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from pecan import request as pecan_request from sqlalchemy.orm import exc as sa_exception from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import base from octavia.api.v2.types import flavors as flavor_types from octavia.common import constants from octavia.common import exceptions from octavia.db import api as db_api LOG = logging.getLogger(__name__) class FlavorsController(base.BaseController): RBAC_TYPE = constants.RBAC_FLAVOR def __init__(self): super(FlavorsController, self).__init__() @wsme_pecan.wsexpose(flavor_types.FlavorRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, id, fields=None): """Gets a flavor's detail.""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ONE) if id == constants.NIL_UUID: raise exceptions.NotFound(resource='Flavor', id=constants.NIL_UUID) db_flavor = self._get_db_flavor(context.session, id) result = self._convert_db_to_type(db_flavor, flavor_types.FlavorResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return flavor_types.FlavorRootResponse(flavor=result) @wsme_pecan.wsexpose(flavor_types.FlavorsRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """Lists all flavors.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) db_flavors, links = self.repositories.flavor.get_all( context.session, pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) result = self._convert_db_to_type( db_flavors, [flavor_types.FlavorResponse]) if fields is not None: result = self._filter_fields(result, fields) return flavor_types.FlavorsRootResponse( flavors=result, flavors_links=links) @wsme_pecan.wsexpose(flavor_types.FlavorRootResponse, body=flavor_types.FlavorRootPOST, status_code=201) def post(self, flavor_): """Creates a flavor.""" flavor = flavor_.flavor context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_POST) # TODO(johnsom) Validate the flavor profile ID lock_session = db_api.get_session(autocommit=False) try: flavor_dict = flavor.to_dict(render_unsets=True) flavor_dict['id'] = uuidutils.generate_uuid() db_flavor = self.repositories.flavor.create(lock_session, **flavor_dict) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.RecordAlreadyExists(field='flavor', name=flavor.name) except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() result = self._convert_db_to_type(db_flavor, flavor_types.FlavorResponse) return flavor_types.FlavorRootResponse(flavor=result) @wsme_pecan.wsexpose(flavor_types.FlavorRootResponse, wtypes.text, status_code=200, body=flavor_types.FlavorRootPUT) def put(self, id, flavor_): flavor = flavor_.flavor context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_PUT) if id == constants.NIL_UUID: raise exceptions.NotFound(resource='Flavor', id=constants.NIL_UUID) lock_session = db_api.get_session(autocommit=False) try: flavor_dict = flavor.to_dict(render_unsets=False) if flavor_dict: self.repositories.flavor.update(lock_session, id, **flavor_dict) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_flavor = self._get_db_flavor(context.session, id) result = self._convert_db_to_type(db_flavor, flavor_types.FlavorResponse) return flavor_types.FlavorRootResponse(flavor=result) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, flavor_id): """Deletes a Flavor""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_DELETE) if flavor_id == constants.NIL_UUID: raise exceptions.NotFound(resource='Flavor', id=constants.NIL_UUID) serial_session = db_api.get_session(autocommit=False) serial_session.connection( execution_options={'isolation_level': 'SERIALIZABLE'}) try: self.repositories.flavor.delete(serial_session, id=flavor_id) serial_session.commit() # Handle when load balancers still reference this flavor except odb_exceptions.DBReferenceError: serial_session.rollback() raise exceptions.ObjectInUse(object='Flavor', id=flavor_id) except sa_exception.NoResultFound: serial_session.rollback() raise exceptions.NotFound(resource='Flavor', id=flavor_id) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Unknown flavor delete exception: %s', str(e)) serial_session.rollback() finally: serial_session.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/health_monitor.py0000664000175000017500000004670200000000000023665 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.types import health_monitor as hm_types from octavia.common import constants as consts from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api from octavia.db import prepare as db_prepare from octavia.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) class HealthMonitorController(base.BaseController): RBAC_TYPE = consts.RBAC_HEALTHMONITOR def __init__(self): super(HealthMonitorController, self).__init__() @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, id, fields=None): """Gets a single healthmonitor's details.""" context = pecan_request.context.get('octavia_context') db_hm = self._get_db_hm(context.session, id, show_deleted=False) self._auth_validate_action(context, db_hm.project_id, consts.RBAC_GET_ONE) result = self._convert_db_to_type( db_hm, hm_types.HealthMonitorResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return hm_types.HealthMonitorRootResponse(healthmonitor=result) @wsme_pecan.wsexpose(hm_types.HealthMonitorsRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_all(self, project_id=None, fields=None): """Gets all health monitors.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') query_filter = self._auth_get_all(context, project_id) db_hm, links = self.repositories.health_monitor.get_all_API_list( context.session, show_deleted=False, pagination_helper=pcontext.get(consts.PAGINATION_HELPER), **query_filter) result = self._convert_db_to_type( db_hm, [hm_types.HealthMonitorResponse]) if fields is not None: result = self._filter_fields(result, fields) return hm_types.HealthMonitorsRootResponse( healthmonitors=result, healthmonitors_links=links) def _get_affected_listener_ids(self, session, hm): """Gets a list of all listeners this request potentially affects.""" pool = self.repositories.pool.get(session, id=hm.pool_id) listener_ids = [li.id for li in pool.listeners] return listener_ids def _test_lb_and_listener_and_pool_statuses(self, session, hm): """Verify load balancer is in a mutable state.""" # We need to verify that any listeners referencing this pool are also # mutable pool = self.repositories.pool.get(session, id=hm.pool_id) load_balancer_id = pool.load_balancer_id # Check the parent is not locked for some reason (ERROR, etc.) if pool.provisioning_status not in consts.MUTABLE_STATUSES: raise exceptions.ImmutableObject(resource='Pool', id=hm.pool_id) if not self.repositories.test_and_set_lb_and_listeners_prov_status( session, load_balancer_id, consts.PENDING_UPDATE, consts.PENDING_UPDATE, listener_ids=self._get_affected_listener_ids(session, hm), pool_id=hm.pool_id): LOG.info("Health Monitor cannot be created or modified because " "the Load Balancer is in an immutable state") raise exceptions.ImmutableObject(resource='Load Balancer', id=load_balancer_id) def _validate_create_hm(self, lock_session, hm_dict): """Validate creating health monitor on pool.""" mandatory_fields = (consts.TYPE, consts.DELAY, consts.TIMEOUT, consts.POOL_ID) for field in mandatory_fields: if hm_dict.get(field, None) is None: raise exceptions.InvalidOption(value='None', option=field) # MAX_RETRIES is renamed fall_threshold so handle is special if hm_dict.get(consts.RISE_THRESHOLD, None) is None: raise exceptions.InvalidOption(value='None', option=consts.MAX_RETRIES) if hm_dict[consts.TYPE] not in (consts.HEALTH_MONITOR_HTTP, consts.HEALTH_MONITOR_HTTPS): if hm_dict.get(consts.HTTP_METHOD, None): raise exceptions.InvalidOption( value=consts.HTTP_METHOD, option='health monitors of ' 'type {}'.format(hm_dict[consts.TYPE])) if hm_dict.get(consts.URL_PATH, None): raise exceptions.InvalidOption( value=consts.URL_PATH, option='health monitors of ' 'type {}'.format(hm_dict[consts.TYPE])) if hm_dict.get(consts.EXPECTED_CODES, None): raise exceptions.InvalidOption( value=consts.EXPECTED_CODES, option='health monitors of ' 'type {}'.format(hm_dict[consts.TYPE])) else: if not hm_dict.get(consts.HTTP_METHOD, None): hm_dict[consts.HTTP_METHOD] = ( consts.HEALTH_MONITOR_HTTP_DEFAULT_METHOD) if not hm_dict.get(consts.URL_PATH, None): hm_dict[consts.URL_PATH] = ( consts.HEALTH_MONITOR_DEFAULT_URL_PATH) if not hm_dict.get(consts.EXPECTED_CODES, None): hm_dict[consts.EXPECTED_CODES] = ( consts.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES) if hm_dict.get('domain_name') and not hm_dict.get('http_version'): raise exceptions.ValidationException( detail=_("'http_version' must be specified when 'domain_name' " "is provided.")) if hm_dict.get('http_version') and hm_dict.get('domain_name'): if hm_dict['http_version'] < 1.1: raise exceptions.InvalidOption( value='http_version %s' % hm_dict['http_version'], option='health monitors HTTP 1.1 domain name health check') try: return self.repositories.health_monitor.create( lock_session, **hm_dict) except odb_exceptions.DBDuplicateEntry: raise exceptions.DuplicateHealthMonitor() except odb_exceptions.DBError: # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') def _validate_healthmonitor_request_for_udp(self, request): if request.type not in ( consts.HEALTH_MONITOR_UDP_CONNECT, consts.HEALTH_MONITOR_TCP, consts.HEALTH_MONITOR_HTTP): raise exceptions.ValidationException(detail=_( "The associated pool protocol is %(pool_protocol)s, so only " "a %(types)s health monitor is supported.") % { 'pool_protocol': consts.PROTOCOL_UDP, 'types': '/'.join((consts.HEALTH_MONITOR_UDP_CONNECT, consts.HEALTH_MONITOR_TCP, consts.HEALTH_MONITOR_HTTP))}) # check the delay value if the HM type is UDP-CONNECT hm_is_type_udp = ( request.type == consts.HEALTH_MONITOR_UDP_CONNECT) conf_min_delay = ( CONF.api_settings.udp_connect_min_interval_health_monitor) if hm_is_type_udp and request.delay < conf_min_delay: raise exceptions.ValidationException(detail=_( "The request delay value %(delay)s should be larger than " "%(conf_min_delay)s for %(type)s health monitor type.") % { 'delay': request.delay, 'conf_min_delay': conf_min_delay, 'type': consts.HEALTH_MONITOR_UDP_CONNECT}) @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, body=hm_types.HealthMonitorRootPOST, status_code=201) def post(self, health_monitor_): """Creates a health monitor on a pool.""" context = pecan_request.context.get('octavia_context') health_monitor = health_monitor_.healthmonitor pool = self._get_db_pool(context.session, health_monitor.pool_id) health_monitor.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, health_monitor.project_id, consts.RBAC_POST) if (not CONF.api_settings.allow_ping_health_monitors and health_monitor.type == consts.HEALTH_MONITOR_PING): raise exceptions.DisabledOption( option='type', value=consts.HEALTH_MONITOR_PING) if pool.protocol == consts.PROTOCOL_UDP: self._validate_healthmonitor_request_for_udp(health_monitor) else: if health_monitor.type == consts.HEALTH_MONITOR_UDP_CONNECT: raise exceptions.ValidationException(detail=_( "The %(type)s type is only supported for pools of type " "%(protocol)s.") % {'type': health_monitor.type, 'protocol': consts.PROTOCOL_UDP}) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.HealthMonitor, health_monitor.project_id): raise exceptions.QuotaException( resource=data_models.HealthMonitor._name()) hm_dict = db_prepare.create_health_monitor( health_monitor.to_dict(render_unsets=True)) self._test_lb_and_listener_and_pool_statuses( lock_session, health_monitor) db_hm = self._validate_create_hm(lock_session, hm_dict) # Prepare the data for the driver data model provider_healthmon = (driver_utils.db_HM_to_provider_HM(db_hm)) # Dispatch to the driver LOG.info("Sending create Health Monitor %s to provider %s", db_hm.id, driver.name) driver_utils.call_provider( driver.name, driver.health_monitor_create, provider_healthmon) lock_session.commit() except odb_exceptions.DBError: lock_session.rollback() raise exceptions.InvalidOption( value=hm_dict.get('type'), option='type') except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_hm = self._get_db_hm(context.session, db_hm.id) result = self._convert_db_to_type( db_hm, hm_types.HealthMonitorResponse) return hm_types.HealthMonitorRootResponse(healthmonitor=result) def _graph_create(self, lock_session, hm_dict): hm_dict = db_prepare.create_health_monitor(hm_dict) db_hm = self._validate_create_hm(lock_session, hm_dict) return db_hm def _validate_update_hm(self, db_hm, health_monitor): if db_hm.type not in (consts.HEALTH_MONITOR_HTTP, consts.HEALTH_MONITOR_HTTPS): if health_monitor.http_method != wtypes.Unset: raise exceptions.InvalidOption( value=consts.HTTP_METHOD, option='health monitors of ' 'type {}'.format(db_hm.type)) if health_monitor.url_path != wtypes.Unset: raise exceptions.InvalidOption( value=consts.URL_PATH, option='health monitors of ' 'type {}'.format(db_hm.type)) if health_monitor.expected_codes != wtypes.Unset: raise exceptions.InvalidOption( value=consts.EXPECTED_CODES, option='health monitors of ' 'type {}'.format(db_hm.type)) if health_monitor.delay is None: raise exceptions.InvalidOption(value=None, option=consts.DELAY) if health_monitor.max_retries is None: raise exceptions.InvalidOption(value=None, option=consts.MAX_RETRIES) if health_monitor.timeout is None: raise exceptions.InvalidOption(value=None, option=consts.TIMEOUT) if health_monitor.domain_name and not ( db_hm.http_version or health_monitor.http_version): raise exceptions.ValidationException( detail=_("'http_version' must be specified when 'domain_name' " "is provided.")) if ((db_hm.http_version or health_monitor.http_version) and (db_hm.domain_name or health_monitor.domain_name)): http_version = health_monitor.http_version or db_hm.http_version if http_version < 1.1: raise exceptions.InvalidOption( value='http_version %s' % http_version, option='health monitors HTTP 1.1 domain name health check') def _set_default_on_none(self, health_monitor): """Reset settings to their default values if None/null was passed in A None/null value can be passed in to clear a value. PUT values that were not provided by the user have a type of wtypes.UnsetType. If the user is attempting to clear values, they should either be set to None (for example in the name field) or they should be reset to their default values. This method is intended to handle those values that need to be set back to a default value. """ if health_monitor.http_method is None: health_monitor.http_method = ( consts.HEALTH_MONITOR_HTTP_DEFAULT_METHOD) if health_monitor.url_path is None: health_monitor.url_path = ( consts.HEALTH_MONITOR_DEFAULT_URL_PATH) if health_monitor.expected_codes is None: health_monitor.expected_codes = ( consts.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES) if health_monitor.max_retries_down is None: health_monitor.max_retries_down = consts.DEFAULT_MAX_RETRIES_DOWN @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, wtypes.text, body=hm_types.HealthMonitorRootPUT, status_code=200) def put(self, id, health_monitor_): """Updates a health monitor.""" context = pecan_request.context.get('octavia_context') health_monitor = health_monitor_.healthmonitor db_hm = self._get_db_hm(context.session, id, show_deleted=False) pool = self._get_db_pool(context.session, db_hm.pool_id) project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, project_id, consts.RBAC_PUT) self._validate_update_hm(db_hm, health_monitor) # Validate health monitor update options for UDP-CONNECT type. if (pool.protocol == consts.PROTOCOL_UDP and db_hm.type == consts.HEALTH_MONITOR_UDP_CONNECT): health_monitor.type = db_hm.type self._validate_healthmonitor_request_for_udp(health_monitor) self._set_default_on_none(health_monitor) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session, db_hm) # Prepare the data for the driver data model healthmon_dict = health_monitor.to_dict(render_unsets=False) healthmon_dict['id'] = id provider_healthmon_dict = ( driver_utils.hm_dict_to_provider_dict(healthmon_dict)) # Also prepare the baseline object data old_provider_healthmon = driver_utils.db_HM_to_provider_HM(db_hm) # Dispatch to the driver LOG.info("Sending update Health Monitor %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.health_monitor_update, old_provider_healthmon, driver_dm.HealthMonitor.from_dict(provider_healthmon_dict)) # Update the database to reflect what the driver just accepted health_monitor.provisioning_status = consts.PENDING_UPDATE db_hm_dict = health_monitor.to_dict(render_unsets=False) self.repositories.health_monitor.update(lock_session, id, **db_hm_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_hm = self._get_db_hm(context.session, id) result = self._convert_db_to_type( db_hm, hm_types.HealthMonitorResponse) return hm_types.HealthMonitorRootResponse(healthmonitor=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, id): """Deletes a health monitor.""" context = pecan_request.context.get('octavia_context') db_hm = self._get_db_hm(context.session, id, show_deleted=False) pool = self._get_db_pool(context.session, db_hm.pool_id) project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, project_id, consts.RBAC_DELETE) if db_hm.provisioning_status == consts.DELETED: return # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session, db_hm) self.repositories.health_monitor.update( lock_session, db_hm.id, provisioning_status=consts.PENDING_DELETE) LOG.info("Sending delete Health Monitor %s to provider %s", id, driver.name) provider_healthmon = driver_utils.db_HM_to_provider_HM(db_hm) driver_utils.call_provider( driver.name, driver.health_monitor_delete, provider_healthmon) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/l7policy.py0000664000175000017500000003423400000000000022410 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from pecan import expose as pecan_expose from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.controllers import l7rule from octavia.api.v2.types import l7policy as l7policy_types from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common import validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare CONF = cfg.CONF LOG = logging.getLogger(__name__) class L7PolicyController(base.BaseController): RBAC_TYPE = constants.RBAC_L7POLICY def __init__(self): super(L7PolicyController, self).__init__() @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get(self, id, fields=None): """Gets a single l7policy's details.""" context = pecan_request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) self._auth_validate_action(context, db_l7policy.project_id, constants.RBAC_GET_ONE) result = self._convert_db_to_type( db_l7policy, l7policy_types.L7PolicyResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return l7policy_types.L7PolicyRootResponse(l7policy=result) @wsme_pecan.wsexpose(l7policy_types.L7PoliciesRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_all(self, project_id=None, fields=None): """Lists all l7policies of a listener.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') query_filter = self._auth_get_all(context, project_id) db_l7policies, links = self.repositories.l7policy.get_all_API_list( context.session, show_deleted=False, pagination_helper=pcontext.get(constants.PAGINATION_HELPER), **query_filter) result = self._convert_db_to_type( db_l7policies, [l7policy_types.L7PolicyResponse]) if fields is not None: result = self._filter_fields(result, fields) return l7policy_types.L7PoliciesRootResponse( l7policies=result, l7policies_links=links) def _test_lb_and_listener_statuses(self, session, lb_id, listener_ids): """Verify load balancer is in a mutable state.""" if not self.repositories.test_and_set_lb_and_listeners_prov_status( session, lb_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE, listener_ids=listener_ids): LOG.info("L7Policy cannot be created or modified because the " "Load Balancer is in an immutable state") raise exceptions.ImmutableObject(resource='Load Balancer', id=lb_id) def _validate_create_l7policy(self, lock_session, l7policy_dict): try: # Set the default HTTP redirect code here so it's explicit if ((l7policy_dict.get('redirect_url') or l7policy_dict.get('redirect_prefix')) and not l7policy_dict.get('redirect_http_code')): l7policy_dict['redirect_http_code'] = 302 return self.repositories.l7policy.create(lock_session, **l7policy_dict) except odb_exceptions.DBDuplicateEntry: raise exceptions.IDAlreadyExists() except odb_exceptions.DBError: # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, body=l7policy_types.L7PolicyRootPOST, status_code=201) def post(self, l7policy_): """Creates a l7policy on a listener.""" l7policy = l7policy_.l7policy context = pecan_request.context.get('octavia_context') # Verify the parent listener exists listener_id = l7policy.listener_id listener = self._get_db_listener( context.session, listener_id) load_balancer_id = listener.load_balancer_id l7policy.project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, l7policy.project_id, constants.RBAC_POST) # Make sure any pool specified by redirect_pool_id exists if l7policy.redirect_pool_id: db_pool = self._get_db_pool( context.session, l7policy.redirect_pool_id) self._validate_protocol(listener.protocol, db_pool.protocol) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.L7Policy, l7policy.project_id): raise exceptions.QuotaException( resource=data_models.L7Policy._name()) l7policy_dict = db_prepare.create_l7policy( l7policy.to_dict(render_unsets=True), load_balancer_id, listener_id) self._test_lb_and_listener_statuses( lock_session, lb_id=load_balancer_id, listener_ids=[listener_id]) db_l7policy = self._validate_create_l7policy( lock_session, l7policy_dict) # Prepare the data for the driver data model provider_l7policy = ( driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) # Dispatch to the driver LOG.info("Sending create L7 Policy %s to provider %s", db_l7policy.id, driver.name) driver_utils.call_provider( driver.name, driver.l7policy_create, provider_l7policy) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_l7policy = self._get_db_l7policy(context.session, db_l7policy.id) result = self._convert_db_to_type(db_l7policy, l7policy_types.L7PolicyResponse) return l7policy_types.L7PolicyRootResponse(l7policy=result) def _graph_create(self, lock_session, policy_dict): load_balancer_id = policy_dict.pop('load_balancer_id', None) listener_id = policy_dict['listener_id'] policy_dict = db_prepare.create_l7policy( policy_dict, load_balancer_id, listener_id) rules = policy_dict.pop('l7rules', []) or [] db_policy = self._validate_create_l7policy(lock_session, policy_dict) new_rules = [] for r in rules: r['project_id'] = db_policy.project_id new_rules.append( l7rule.L7RuleController(db_policy.id)._graph_create( lock_session, r)) db_policy.l7rules = new_rules return db_policy @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, wtypes.text, body=l7policy_types.L7PolicyRootPUT, status_code=200) def put(self, id, l7policy_): """Updates a l7policy.""" l7policy = l7policy_.l7policy context = pecan_request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) l7policy_dict = validate.sanitize_l7policy_api_args( l7policy.to_dict(render_unsets=False)) # Reset renamed attributes for attr, val in l7policy_types.L7PolicyPUT._type_to_model_map.items(): if val in l7policy_dict: l7policy_dict[attr] = l7policy_dict.pop(val) sanitized_l7policy = l7policy_types.L7PolicyPUT(**l7policy_dict) listener = self._get_db_listener( context.session, db_l7policy.listener_id) # Make sure any specified redirect_pool_id exists if l7policy_dict.get('redirect_pool_id'): db_pool = self._get_db_pool( context.session, l7policy_dict['redirect_pool_id']) self._validate_protocol(listener.protocol, db_pool.protocol) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, lb_id=load_balancer_id, listener_ids=[listener_id]) # Prepare the data for the driver data model l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) l7policy_dict['id'] = id provider_l7policy_dict = ( driver_utils.l7policy_dict_to_provider_dict(l7policy_dict)) # Also prepare the baseline object data old_provider_l7policy = ( driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) # Dispatch to the driver LOG.info("Sending update L7 Policy %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.l7policy_update, old_provider_l7policy, driver_dm.L7Policy.from_dict(provider_l7policy_dict)) # Update the database to reflect what the driver just accepted sanitized_l7policy.provisioning_status = constants.PENDING_UPDATE db_l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) self.repositories.l7policy.update(lock_session, id, **db_l7policy_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_l7policy = self._get_db_l7policy(context.session, id) result = self._convert_db_to_type(db_l7policy, l7policy_types.L7PolicyResponse) return l7policy_types.L7PolicyRootResponse(l7policy=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, id): """Deletes a l7policy.""" context = pecan_request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) if db_l7policy.provisioning_status == constants.DELETED: return # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, lb_id=load_balancer_id, listener_ids=[listener_id]) self.repositories.l7policy.update( lock_session, db_l7policy.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete L7 Policy %s to provider %s", id, driver.name) provider_l7policy = driver_utils.db_l7policy_to_provider_l7policy( db_l7policy) driver_utils.call_provider(driver.name, driver.l7policy_delete, provider_l7policy) @pecan_expose() def _lookup(self, l7policy_id, *remainder): """Overridden pecan _lookup method for custom routing. Verifies that the l7policy passed in the url exists, and if so decides which controller, if any, should control be passed. """ context = pecan_request.context.get('octavia_context') if l7policy_id and remainder and remainder[0] == 'rules': remainder = remainder[1:] db_l7policy = self.repositories.l7policy.get( context.session, id=l7policy_id) if not db_l7policy: LOG.info("L7Policy %s not found.", l7policy_id) raise exceptions.NotFound( resource='L7Policy', id=l7policy_id) return l7rule.L7RuleController( l7policy_id=db_l7policy.id), remainder return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/l7rule.py0000664000175000017500000003053400000000000022057 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.types import l7rule as l7rule_types from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common import validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare LOG = logging.getLogger(__name__) class L7RuleController(base.BaseController): RBAC_TYPE = constants.RBAC_L7RULE def __init__(self, l7policy_id): super(L7RuleController, self).__init__() self.l7policy_id = l7policy_id @wsme_pecan.wsexpose(l7rule_types.L7RuleRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get(self, id, fields=None): """Gets a single l7rule's details.""" context = pecan_request.context.get('octavia_context') db_l7rule = self._get_db_l7rule(context.session, id, show_deleted=False) self._auth_validate_action(context, db_l7rule.project_id, constants.RBAC_GET_ONE) result = self._convert_db_to_type( db_l7rule, l7rule_types.L7RuleResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return l7rule_types.L7RuleRootResponse(rule=result) @wsme_pecan.wsexpose(l7rule_types.L7RulesRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """Lists all l7rules of a l7policy.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') l7policy = self._get_db_l7policy(context.session, self.l7policy_id, show_deleted=False) self._auth_validate_action(context, l7policy.project_id, constants.RBAC_GET_ALL) db_l7rules, links = self.repositories.l7rule.get_all_API_list( context.session, show_deleted=False, l7policy_id=self.l7policy_id, pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) result = self._convert_db_to_type( db_l7rules, [l7rule_types.L7RuleResponse]) if fields is not None: result = self._filter_fields(result, fields) return l7rule_types.L7RulesRootResponse( rules=result, rules_links=links) def _test_lb_listener_policy_statuses(self, session): """Verify load balancer is in a mutable state.""" l7policy = self._get_db_l7policy(session, self.l7policy_id) listener_id = l7policy.listener_id load_balancer_id = l7policy.listener.load_balancer_id # Check the parent is not locked for some reason (ERROR, etc.) if l7policy.provisioning_status not in constants.MUTABLE_STATUSES: raise exceptions.ImmutableObject(resource='L7Policy', id=self.l7policy_id) if not self.repositories.test_and_set_lb_and_listeners_prov_status( session, load_balancer_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE, listener_ids=[listener_id], l7policy_id=self.l7policy_id): LOG.info("L7Rule cannot be created or modified because the " "Load Balancer is in an immutable state") raise exceptions.ImmutableObject(resource='Load Balancer', id=load_balancer_id) def _check_l7policy_max_rules(self, session): """Checks to make sure the L7Policy doesn't have too many rules.""" count = self.repositories.l7rule.count( session, l7policy_id=self.l7policy_id) if count >= constants.MAX_L7RULES_PER_L7POLICY: raise exceptions.TooManyL7RulesOnL7Policy(id=self.l7policy_id) def _validate_create_l7rule(self, lock_session, l7rule_dict): try: return self.repositories.l7rule.create(lock_session, **l7rule_dict) except odb_exceptions.DBDuplicateEntry: raise exceptions.IDAlreadyExists() except odb_exceptions.DBError: # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') @wsme_pecan.wsexpose(l7rule_types.L7RuleRootResponse, body=l7rule_types.L7RuleRootPOST, status_code=201) def post(self, rule_): """Creates a l7rule on an l7policy.""" l7rule = rule_.rule context = pecan_request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) l7rule.project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, l7rule.project_id, constants.RBAC_POST) try: validate.l7rule_data(l7rule) except Exception as e: raise exceptions.L7RuleValidation(error=e) self._check_l7policy_max_rules(context.session) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: l7rule_dict = db_prepare.create_l7rule( l7rule.to_dict(render_unsets=True), self.l7policy_id) self._test_lb_listener_policy_statuses(context.session) db_l7rule = self._validate_create_l7rule(lock_session, l7rule_dict) # Prepare the data for the driver data model provider_l7rule = ( driver_utils.db_l7rule_to_provider_l7rule(db_l7rule)) # Dispatch to the driver LOG.info("Sending create L7 Rule %s to provider %s", db_l7rule.id, driver.name) driver_utils.call_provider( driver.name, driver.l7rule_create, provider_l7rule) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_l7rule = self._get_db_l7rule(context.session, db_l7rule.id) result = self._convert_db_to_type(db_l7rule, l7rule_types.L7RuleResponse) return l7rule_types.L7RuleRootResponse(rule=result) def _graph_create(self, lock_session, rule_dict): try: validate.l7rule_data(l7rule_types.L7RulePOST(**rule_dict)) except Exception as e: raise exceptions.L7RuleValidation(error=e) rule_dict = db_prepare.create_l7rule(rule_dict, self.l7policy_id) db_rule = self._validate_create_l7rule(lock_session, rule_dict) return db_rule @wsme_pecan.wsexpose(l7rule_types.L7RuleRootResponse, wtypes.text, body=l7rule_types.L7RuleRootPUT, status_code=200) def put(self, id, l7rule_): """Updates a l7rule.""" l7rule = l7rule_.rule context = pecan_request.context.get('octavia_context') db_l7rule = self._get_db_l7rule(context.session, id, show_deleted=False) db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) # Handle the invert unset if l7rule.invert is None: l7rule.invert = False new_l7rule = db_l7rule.to_dict() new_l7rule.update(l7rule.to_dict()) new_l7rule = data_models.L7Rule.from_dict(new_l7rule) try: validate.l7rule_data(new_l7rule) except Exception as e: raise exceptions.L7RuleValidation(error=e) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_listener_policy_statuses(lock_session) # Prepare the data for the driver data model l7rule_dict = l7rule.to_dict(render_unsets=False) l7rule_dict['id'] = id provider_l7rule_dict = ( driver_utils.l7rule_dict_to_provider_dict(l7rule_dict)) # Also prepare the baseline object data old_provider_l7rule = driver_utils.db_l7rule_to_provider_l7rule( db_l7rule) # Dispatch to the driver LOG.info("Sending update L7 Rule %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.l7rule_update, old_provider_l7rule, driver_dm.L7Rule.from_dict(provider_l7rule_dict)) # Update the database to reflect what the driver just accepted l7rule.provisioning_status = constants.PENDING_UPDATE db_l7rule_dict = l7rule.to_dict(render_unsets=False) self.repositories.l7rule.update(lock_session, id, **db_l7rule_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_l7rule = self._get_db_l7rule(context.session, id) result = self._convert_db_to_type(db_l7rule, l7rule_types.L7RuleResponse) return l7rule_types.L7RuleRootResponse(rule=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, id): """Deletes a l7rule.""" context = pecan_request.context.get('octavia_context') db_l7rule = self._get_db_l7rule(context.session, id, show_deleted=False) db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( db_l7policy) project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) if db_l7rule.provisioning_status == constants.DELETED: return # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_listener_policy_statuses(lock_session) self.repositories.l7rule.update( lock_session, db_l7rule.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete L7 Rule %s to provider %s", id, driver.name) provider_l7rule = ( driver_utils.db_l7rule_to_provider_l7rule(db_l7rule)) driver_utils.call_provider(driver.name, driver.l7rule_delete, provider_l7rule) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/listener.py0000664000175000017500000007124700000000000022500 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from pecan import expose as pecan_expose from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.controllers import l7policy from octavia.api.v2.types import listener as listener_types from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common import stats from octavia.common import utils as common_utils from octavia.db import api as db_api from octavia.db import prepare as db_prepare from octavia.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) class ListenersController(base.BaseController): RBAC_TYPE = constants.RBAC_LISTENER def __init__(self): super(ListenersController, self).__init__() @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, id, fields=None): """Gets a single listener's details.""" context = pecan_request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id, show_deleted=False) if not db_listener: raise exceptions.NotFound(resource=data_models.Listener._name(), id=id) self._auth_validate_action(context, db_listener.project_id, constants.RBAC_GET_ONE) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return listener_types.ListenerRootResponse(listener=result) @wsme_pecan.wsexpose(listener_types.ListenersRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_all(self, project_id=None, fields=None): """Lists all listeners.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') query_filter = self._auth_get_all(context, project_id) db_listeners, links = self.repositories.listener.get_all_API_list( context.session, show_deleted=False, pagination_helper=pcontext.get(constants.PAGINATION_HELPER), **query_filter) result = self._convert_db_to_type( db_listeners, [listener_types.ListenerResponse]) if fields is not None: result = self._filter_fields(result, fields) return listener_types.ListenersRootResponse( listeners=result, listeners_links=links) def _test_lb_and_listener_statuses( self, session, lb_id, id=None, listener_status=constants.PENDING_UPDATE): """Verify load balancer is in a mutable state.""" lb_repo = self.repositories.load_balancer if id: if not self.repositories.test_and_set_lb_and_listeners_prov_status( session, lb_id, constants.PENDING_UPDATE, listener_status, listener_ids=[id]): LOG.info("Load Balancer %s is immutable.", lb_id) db_lb = lb_repo.get(session, id=lb_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=lb_id) else: if not lb_repo.test_and_set_provisioning_status( session, lb_id, constants.PENDING_UPDATE): db_lb = lb_repo.get(session, id=lb_id) LOG.info("Load Balancer %s is immutable.", db_lb.id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=lb_id) def _validate_pool(self, session, lb_id, pool_id, listener_protocol): """Validate pool given exists on same load balancer as listener.""" db_pool = self.repositories.pool.get( session, load_balancer_id=lb_id, id=pool_id) if not db_pool: raise exceptions.NotFound( resource=data_models.Pool._name(), id=pool_id) self._validate_protocol(listener_protocol, db_pool.protocol) def _has_tls_container_refs(self, listener_dict): return (listener_dict.get('tls_certificate_id') or listener_dict.get('client_ca_tls_container_id') or listener_dict.get('sni_containers')) def _is_tls_or_insert_header(self, listener_dict): return (self._has_tls_container_refs(listener_dict) or listener_dict.get('insert_headers')) def _validate_insert_headers(self, insert_header_list, listener_protocol): if (listener_protocol not in constants.LISTENER_PROTOCOLS_SUPPORTING_HEADER_INSERTION): raise exceptions.InvalidOption( value='insert-headers', option=('a %s protocol listener.' % listener_protocol)) if list(set(insert_header_list) - ( set(constants.SUPPORTED_HTTP_HEADERS + constants.SUPPORTED_SSL_HEADERS))): raise exceptions.InvalidOption( value=insert_header_list, option='insert_headers') if not listener_protocol == constants.PROTOCOL_TERMINATED_HTTPS: is_matched = len( constants.SUPPORTED_SSL_HEADERS) > len( list(set(constants.SUPPORTED_SSL_HEADERS) - set( insert_header_list))) if is_matched: headers = [] for header_name in insert_header_list: if header_name in constants.SUPPORTED_SSL_HEADERS: headers.append(header_name) raise exceptions.InvalidOption( value=headers, option=('%s protocol listener.' % listener_protocol)) def _validate_cidr_compatible_with_vip(self, vip, allowed_cidrs): for cidr in allowed_cidrs: # Check if CIDR IP version matches VIP IP version if common_utils.is_cidr_ipv6(cidr) != common_utils.is_ipv6(vip): msg = _("CIDR %(cidr)s IP version incompatible with VIP " "%(vip)s IP version.") raise exceptions.ValidationException( detail=msg % {'cidr': cidr, 'vip': vip}) def _validate_create_listener(self, lock_session, listener_dict): """Validate listener for wrong protocol or duplicate listeners Update the load balancer db when provisioning status changes. """ listener_protocol = listener_dict.get('protocol') if listener_dict and listener_dict.get('insert_headers'): self._validate_insert_headers( listener_dict['insert_headers'].keys(), listener_protocol) # Check for UDP compatibility if (listener_protocol == constants.PROTOCOL_UDP and self._is_tls_or_insert_header(listener_dict)): raise exceptions.ValidationException( detail=_("%s protocol listener does not " "support TLS.") % constants.PROTOCOL_UDP) # Check for TLS disabled if (not CONF.api_settings.allow_tls_terminated_listeners and listener_protocol == constants.PROTOCOL_TERMINATED_HTTPS): raise exceptions.DisabledOption( value=constants.PROTOCOL_TERMINATED_HTTPS, option='protocol') # Check for certs when not TERMINATED_HTTPS if (listener_protocol != constants.PROTOCOL_TERMINATED_HTTPS and self._has_tls_container_refs(listener_dict)): raise exceptions.ValidationException(detail=_( "Certificate container references are only allowed on " "%s protocol listeners.") % constants.PROTOCOL_TERMINATED_HTTPS) # Make sure a base certificate exists if specifying a client ca if (listener_dict.get('client_ca_tls_certificate_id') and not (listener_dict.get('tls_certificate_id') or listener_dict.get('sni_containers'))): raise exceptions.ValidationException(detail=_( "An SNI or default certificate container reference must " "be provided with a client CA container reference.")) # Make sure a certificate container is specified for TERMINATED_HTTPS if (listener_protocol == constants.PROTOCOL_TERMINATED_HTTPS and not (listener_dict.get('tls_certificate_id') or listener_dict.get('sni_containers'))): raise exceptions.ValidationException(detail=_( "An SNI or default certificate container reference must " "be provided for %s protocol listeners.") % constants.PROTOCOL_TERMINATED_HTTPS) # Make sure we have a client CA cert if they enable client auth if (listener_dict.get('client_authentication') != constants.CLIENT_AUTH_NONE and not listener_dict.get('client_ca_tls_certificate_id')): raise exceptions.ValidationException(detail=_( "Client authentication setting %s requires a client CA " "container reference.") % listener_dict.get('client_authentication')) # Make sure we have a client CA if they specify a CRL if (listener_dict.get('client_crl_container_id') and not listener_dict.get('client_ca_tls_certificate_id')): raise exceptions.ValidationException(detail=_( "A client authentication CA reference is required to " "specify a client authentication revocation list.")) # Validate the TLS containers sni_containers = listener_dict.pop('sni_containers', []) tls_refs = [sni['tls_container_id'] for sni in sni_containers] if listener_dict.get('tls_certificate_id'): tls_refs.append(listener_dict.get('tls_certificate_id')) self._validate_tls_refs(tls_refs) # Validate the client CA cert and optional client CRL if listener_dict.get('client_ca_tls_certificate_id'): self._validate_client_ca_and_crl_refs( listener_dict.get('client_ca_tls_certificate_id'), listener_dict.get('client_crl_container_id', None)) # Validate that the L4 protocol (UDP or TCP) is not already used for # the specified protocol_port in this load balancer pcontext = pecan_request.context query_filter = { 'project_id': listener_dict['project_id'], 'load_balancer_id': listener_dict['load_balancer_id'], 'protocol_port': listener_dict['protocol_port'] } # Get listeners on the same load balancer that use the same # protocol port db_listeners = self.repositories.listener.get_all_API_list( lock_session, show_deleted=False, pagination_helper=pcontext.get(constants.PAGINATION_HELPER), **query_filter)[0] if db_listeners: l4_protocol = constants.L4_PROTOCOL_MAP[listener_protocol] # List supported protocols that share the same L4 protocol as our # new listener disallowed_protocols = [ p for p in constants.L4_PROTOCOL_MAP if constants.L4_PROTOCOL_MAP[p] == l4_protocol ] for db_l in db_listeners: # Check if l4 protocol ports conflict if db_l.protocol in disallowed_protocols: raise exceptions.DuplicateListenerEntry( protocol=db_l.protocol, port=listener_dict.get('protocol_port')) # Validate allowed CIDRs allowed_cidrs = listener_dict.get('allowed_cidrs', []) or [] lb_id = listener_dict.get('load_balancer_id') vip_db = self.repositories.vip.get( lock_session, load_balancer_id=lb_id) vip_address = vip_db.ip_address self._validate_cidr_compatible_with_vip(vip_address, allowed_cidrs) try: db_listener = self.repositories.listener.create( lock_session, **listener_dict) if sni_containers: for container in sni_containers: sni_dict = {'listener_id': db_listener.id, 'tls_container_id': container.get( 'tls_container_id')} self.repositories.sni.create(lock_session, **sni_dict) # DB listener needs to be refreshed db_listener = self.repositories.listener.get( lock_session, id=db_listener.id) return db_listener except odb_exceptions.DBDuplicateEntry as de: column_list = ['load_balancer_id', 'protocol', 'protocol_port'] constraint_list = ['uq_listener_load_balancer_id_protocol_port'] if ['id'] == de.columns: raise exceptions.IDAlreadyExists() if (set(column_list) == set(de.columns) or set(constraint_list) == set(de.columns)): raise exceptions.DuplicateListenerEntry( protocol=listener_dict.get('protocol'), port=listener_dict.get('protocol_port')) except odb_exceptions.DBError: raise exceptions.InvalidOption(value=listener_dict.get('protocol'), option='protocol') @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, body=listener_types.ListenerRootPOST, status_code=201) def post(self, listener_): """Creates a listener on a load balancer.""" listener = listener_.listener context = pecan_request.context.get('octavia_context') load_balancer_id = listener.loadbalancer_id listener.project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, listener.project_id, constants.RBAC_POST) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.Listener, listener.project_id): raise exceptions.QuotaException( resource=data_models.Listener._name()) listener_dict = db_prepare.create_listener( listener.to_dict(render_unsets=True), None) if listener_dict['default_pool_id']: self._validate_pool(context.session, load_balancer_id, listener_dict['default_pool_id'], listener.protocol) self._test_lb_and_listener_statuses( lock_session, lb_id=load_balancer_id) db_listener = self._validate_create_listener( lock_session, listener_dict) # Prepare the data for the driver data model provider_listener = ( driver_utils.db_listener_to_provider_listener(db_listener)) # re-inject the sni container references lost due to SNI # being a separate table in the DB if listener.sni_container_refs != wtypes.Unset: provider_listener.sni_container_refs = ( listener.sni_container_refs) # Dispatch to the driver LOG.info("Sending create Listener %s to provider %s", db_listener.id, driver.name) driver_utils.call_provider( driver.name, driver.listener_create, provider_listener) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_listener = self._get_db_listener(context.session, db_listener.id) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) return listener_types.ListenerRootResponse(listener=result) def _graph_create(self, lock_session, listener_dict, l7policies=None, pool_name_ids=None): load_balancer_id = listener_dict['load_balancer_id'] listener_dict = db_prepare.create_listener( listener_dict, load_balancer_id) l7policies = listener_dict.pop('l7policies', l7policies) if listener_dict.get('default_pool_id'): self._validate_pool(lock_session, load_balancer_id, listener_dict['default_pool_id'], listener_dict['protocol']) db_listener = self._validate_create_listener( lock_session, listener_dict) # Now create l7policies new_l7ps = [] for l7p in l7policies: l7p['project_id'] = db_listener.project_id l7p['load_balancer_id'] = load_balancer_id l7p['listener_id'] = db_listener.id redirect_pool = l7p.pop('redirect_pool', None) if redirect_pool: pool_name = redirect_pool['name'] pool_id = pool_name_ids.get(pool_name) if not pool_id: raise exceptions.SingleCreateDetailsMissing( type='Pool', name=pool_name) l7p['redirect_pool_id'] = pool_id new_l7ps.append(l7policy.L7PolicyController()._graph_create( lock_session, l7p)) db_listener.l7policies = new_l7ps return db_listener def _validate_listener_PUT(self, listener, db_listener): # TODO(rm_work): Do we need something like this? What do we do on an # empty body for a PUT? if not listener: raise exceptions.ValidationException( detail='No listener object supplied.') # Check for UDP compatibility if (db_listener.protocol == constants.PROTOCOL_UDP and self._is_tls_or_insert_header(listener.to_dict())): raise exceptions.ValidationException(detail=_( "%s protocol listener does not support TLS or header " "insertion.") % constants.PROTOCOL_UDP) # Check for certs when not TERMINATED_HTTPS if (db_listener.protocol != constants.PROTOCOL_TERMINATED_HTTPS and self._has_tls_container_refs(listener.to_dict())): raise exceptions.ValidationException(detail=_( "Certificate container references are only allowed on " "%s protocol listeners.") % constants.PROTOCOL_TERMINATED_HTTPS) # Make sure we have a client CA cert if they enable client auth if ((listener.client_authentication != wtypes.Unset and listener.client_authentication != constants.CLIENT_AUTH_NONE) and not (db_listener.client_ca_tls_certificate_id or listener.client_ca_tls_container_ref)): raise exceptions.ValidationException(detail=_( "Client authentication setting %s requires a client CA " "container reference.") % listener.client_authentication) if listener.insert_headers: self._validate_insert_headers( list(listener.insert_headers.keys()), db_listener.protocol) sni_containers = listener.sni_container_refs or [] tls_refs = list(sni_containers) if listener.default_tls_container_ref: tls_refs.append(listener.default_tls_container_ref) self._validate_tls_refs(tls_refs) ca_ref = None if (listener.client_ca_tls_container_ref and listener.client_ca_tls_container_ref != wtypes.Unset): ca_ref = listener.client_ca_tls_container_ref elif db_listener.client_ca_tls_certificate_id: ca_ref = db_listener.client_ca_tls_certificate_id crl_ref = None if (listener.client_crl_container_ref and listener.client_crl_container_ref != wtypes.Unset): crl_ref = listener.client_crl_container_ref elif db_listener.client_crl_container_id: crl_ref = db_listener.client_crl_container_id if crl_ref and not ca_ref: raise exceptions.ValidationException(detail=_( "A client authentication CA reference is required to " "specify a client authentication revocation list.")) if ca_ref or crl_ref: self._validate_client_ca_and_crl_refs(ca_ref, crl_ref) # Validate allowed CIDRs if (listener.allowed_cidrs and listener.allowed_cidrs != wtypes.Unset): vip_address = db_listener.load_balancer.vip.ip_address self._validate_cidr_compatible_with_vip( vip_address, listener.allowed_cidrs) def _set_default_on_none(self, listener): """Reset settings to their default values if None/null was passed in A None/null value can be passed in to clear a value. PUT values that were not provided by the user have a type of wtypes.UnsetType. If the user is attempting to clear values, they should either be set to None (for example in the name field) or they should be reset to their default values. This method is intended to handle those values that need to be set back to a default value. """ if listener.connection_limit is None: listener.connection_limit = constants.DEFAULT_CONNECTION_LIMIT if listener.timeout_client_data is None: listener.timeout_client_data = ( CONF.haproxy_amphora.timeout_client_data) if listener.timeout_member_connect is None: listener.timeout_member_connect = ( CONF.haproxy_amphora.timeout_member_connect) if listener.timeout_member_data is None: listener.timeout_member_data = ( CONF.haproxy_amphora.timeout_member_data) if listener.timeout_tcp_inspect is None: listener.timeout_tcp_inspect = ( CONF.haproxy_amphora.timeout_tcp_inspect) if listener.client_authentication is None: listener.client_authentication = constants.CLIENT_AUTH_NONE @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, wtypes.text, body=listener_types.ListenerRootPUT, status_code=200) def put(self, id, listener_): """Updates a listener on a load balancer.""" listener = listener_.listener context = pecan_request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id, show_deleted=False) load_balancer_id = db_listener.load_balancer_id project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._validate_listener_PUT(listener, db_listener) self._set_default_on_none(listener) if listener.default_pool_id: self._validate_pool(context.session, load_balancer_id, listener.default_pool_id, db_listener.protocol) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses(lock_session, load_balancer_id, id=id) # Prepare the data for the driver data model listener_dict = listener.to_dict(render_unsets=False) listener_dict['id'] = id provider_listener_dict = ( driver_utils.listener_dict_to_provider_dict(listener_dict)) # Also prepare the baseline object data old_provider_listener = ( driver_utils.db_listener_to_provider_listener(db_listener, for_delete=True)) # Dispatch to the driver LOG.info("Sending update Listener %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.listener_update, old_provider_listener, driver_dm.Listener.from_dict(provider_listener_dict)) # Update the database to reflect what the driver just accepted self.repositories.listener.update( lock_session, id, **listener.to_dict(render_unsets=False)) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_listener = self._get_db_listener(context.session, id) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) return listener_types.ListenerRootResponse(listener=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, id): """Deletes a listener from a load balancer.""" context = pecan_request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id, show_deleted=False) load_balancer_id = db_listener.load_balancer_id project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( lock_session, load_balancer_id, id=id, listener_status=constants.PENDING_DELETE) LOG.info("Sending delete Listener %s to provider %s", id, driver.name) provider_listener = ( driver_utils.db_listener_to_provider_listener( db_listener, for_delete=True)) driver_utils.call_provider(driver.name, driver.listener_delete, provider_listener) @pecan_expose() def _lookup(self, id, *remainder): """Overridden pecan _lookup method for custom routing. Currently it checks if this was a stats request and routes the request to the StatsController. """ if id and remainder and remainder[0] == 'stats': return StatisticsController(listener_id=id), remainder[1:] return None class StatisticsController(base.BaseController, stats.StatsMixin): RBAC_TYPE = constants.RBAC_LISTENER def __init__(self, listener_id): super(StatisticsController, self).__init__() self.id = listener_id @wsme_pecan.wsexpose(listener_types.StatisticsRootResponse, wtypes.text, status_code=200) def get(self): context = pecan_request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, self.id, show_deleted=False) if not db_listener: LOG.info("Listener %s not found.", id) raise exceptions.NotFound( resource=data_models.Listener._name(), id=id) self._auth_validate_action(context, db_listener.project_id, constants.RBAC_GET_STATS) listener_stats = self.get_listener_stats(context.session, self.id) result = self._convert_db_to_type( listener_stats, listener_types.ListenerStatisticsResponse) return listener_types.StatisticsRootResponse(stats=result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/load_balancer.py0000664000175000017500000011316700000000000023417 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress from oslo_config import cfg from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from pecan import expose as pecan_expose from pecan import request as pecan_request from sqlalchemy.orm import exc as sa_exception from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.controllers import listener from octavia.api.v2.controllers import pool from octavia.api.v2.types import load_balancer as lb_types from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common import stats from octavia.common import utils import octavia.common.validate as validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare from octavia.i18n import _ from octavia.network import base as network_base CONF = cfg.CONF LOG = logging.getLogger(__name__) class LoadBalancersController(base.BaseController): RBAC_TYPE = constants.RBAC_LOADBALANCER def __init__(self): super(LoadBalancersController, self).__init__() @wsme_pecan.wsexpose(lb_types.LoadBalancerRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_one(self, id, fields=None): """Gets a single load balancer's details.""" context = pecan_request.context.get('octavia_context') load_balancer = self._get_db_lb(context.session, id, show_deleted=False) if not load_balancer: raise exceptions.NotFound( resource=data_models.LoadBalancer._name(), id=id) self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_GET_ONE) result = self._convert_db_to_type( load_balancer, lb_types.LoadBalancerResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return lb_types.LoadBalancerRootResponse(loadbalancer=result) @wsme_pecan.wsexpose(lb_types.LoadBalancersRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_all(self, project_id=None, fields=None): """Lists all load balancers.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') query_filter = self._auth_get_all(context, project_id) load_balancers, links = ( self.repositories.load_balancer.get_all_API_list( context.session, show_deleted=False, pagination_helper=pcontext.get(constants.PAGINATION_HELPER), **query_filter)) result = self._convert_db_to_type( load_balancers, [lb_types.LoadBalancerResponse]) if fields is not None: result = self._filter_fields(result, fields) return lb_types.LoadBalancersRootResponse( loadbalancers=result, loadbalancers_links=links) def _test_lb_status(self, session, id, lb_status=constants.PENDING_UPDATE): """Verify load balancer is in a mutable state.""" lb_repo = self.repositories.load_balancer if not lb_repo.test_and_set_provisioning_status( session, id, lb_status): prov_status = lb_repo.get(session, id=id).provisioning_status LOG.info("Invalid state %(state)s of loadbalancer resource %(id)s", {"state": prov_status, "id": id}) raise exceptions.LBPendingStateError( state=prov_status, id=id) def _test_and_set_failover_prov_status(self, session, id): lb_repo = self.repositories.load_balancer if not lb_repo.set_status_for_failover(session, id, constants.PENDING_UPDATE): prov_status = lb_repo.get(session, id=id).provisioning_status LOG.info("Invalid state %(state)s of loadbalancer resource %(id)s", {"state": prov_status, "id": id}) raise exceptions.LBPendingStateError( state=prov_status, id=id) @staticmethod def _validate_network_and_fill_or_validate_subnet(load_balancer, context=None): network = validate.network_exists_optionally_contains_subnet( network_id=load_balancer.vip_network_id, subnet_id=load_balancer.vip_subnet_id, context=context) if not load_balancer.vip_subnet_id: network_driver = utils.get_network_driver() if load_balancer.vip_address: for subnet_id in network.subnets: subnet = network_driver.get_subnet(subnet_id) if validate.is_ip_member_of_cidr(load_balancer.vip_address, subnet.cidr): load_balancer.vip_subnet_id = subnet_id break if not load_balancer.vip_subnet_id: raise exceptions.ValidationException(detail=_( "Supplied network does not contain a subnet for " "VIP address specified." )) else: # If subnet and IP are not provided, pick the first subnet with # enough available IPs, preferring ipv4 if not network.subnets: raise exceptions.ValidationException(detail=_( "Supplied network does not contain a subnet." )) ip_avail = network_driver.get_network_ip_availability( network) if (CONF.controller_worker.loadbalancer_topology == constants.TOPOLOGY_SINGLE): num_req_ips = 2 if (CONF.controller_worker.loadbalancer_topology == constants.TOPOLOGY_ACTIVE_STANDBY): num_req_ips = 3 subnets = [subnet_id for subnet_id in network.subnets if utils.subnet_ip_availability(ip_avail, subnet_id, num_req_ips)] if not subnets: raise exceptions.ValidationException(detail=_( "Subnet(s) in the supplied network do not contain " "enough available IPs." )) for subnet_id in subnets: # Use the first subnet, in case there are no ipv4 subnets if not load_balancer.vip_subnet_id: load_balancer.vip_subnet_id = subnet_id subnet = network_driver.get_subnet(subnet_id) if subnet.ip_version == 4: load_balancer.vip_subnet_id = subnet_id break @staticmethod def _validate_port_and_fill_or_validate_subnet(load_balancer, context=None): port = validate.port_exists(port_id=load_balancer.vip_port_id, context=context) validate.check_port_in_use(port) load_balancer.vip_network_id = port.network_id # validate the request vip port whether applied the qos_policy and # store the port_qos_policy to loadbalancer obj if possible. The # default behavior is that if 'vip_qos_policy_id' is specified in the # request, it will override the qos_policy applied on vip_port. port_qos_policy_id = port.qos_policy_id if (port_qos_policy_id and isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)): load_balancer.vip_qos_policy_id = port_qos_policy_id if load_balancer.vip_subnet_id: # If we were provided a subnet_id, validate it exists and that # there is a fixed_ip on the port that matches the provided subnet validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id, context=context) for port_fixed_ip in port.fixed_ips: if port_fixed_ip.subnet_id == load_balancer.vip_subnet_id: load_balancer.vip_address = port_fixed_ip.ip_address break # Just pick the first address found in the subnet if not load_balancer.vip_address: raise exceptions.ValidationException(detail=_( "No VIP address found on the specified VIP port within " "the specified subnet.")) elif load_balancer.vip_address: normalized_lb_ip = ipaddress.ip_address( load_balancer.vip_address).compressed for port_fixed_ip in port.fixed_ips: normalized_port_ip = ipaddress.ip_address( port_fixed_ip.ip_address).compressed if normalized_port_ip == normalized_lb_ip: load_balancer.vip_subnet_id = port_fixed_ip.subnet_id break if not load_balancer.vip_subnet_id: raise exceptions.ValidationException(detail=_( "Specified VIP address not found on the " "specified VIP port.")) elif len(port.fixed_ips) == 1: # User provided only a port, get the subnet and address from it load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id load_balancer.vip_address = port.fixed_ips[0].ip_address else: raise exceptions.ValidationException(detail=_( "VIP port's subnet could not be determined. Please " "specify either a VIP subnet or address.")) def _validate_vip_request_object(self, load_balancer, context=None): allowed_network_objects = [] if CONF.networking.allow_vip_port_id: allowed_network_objects.append('vip_port_id') if CONF.networking.allow_vip_network_id: allowed_network_objects.append('vip_network_id') if CONF.networking.allow_vip_subnet_id: allowed_network_objects.append('vip_subnet_id') msg = _("use of %(object)s is disallowed by this deployment's " "configuration.") if (load_balancer.vip_port_id and not CONF.networking.allow_vip_port_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_port_id'}) if (load_balancer.vip_network_id and not CONF.networking.allow_vip_network_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_network_id'}) if (load_balancer.vip_subnet_id and not CONF.networking.allow_vip_subnet_id): raise exceptions.ValidationException( detail=msg % {'object': 'vip_subnet_id'}) if not (load_balancer.vip_port_id or load_balancer.vip_network_id or load_balancer.vip_subnet_id): raise exceptions.VIPValidationException( objects=', '.join(allowed_network_objects)) # Validate the port id if load_balancer.vip_port_id: self._validate_port_and_fill_or_validate_subnet(load_balancer, context=context) # If no port id, validate the network id (and subnet if provided) elif load_balancer.vip_network_id: self._validate_network_and_fill_or_validate_subnet(load_balancer, context=context) # Validate just the subnet id elif load_balancer.vip_subnet_id: subnet = validate.subnet_exists( subnet_id=load_balancer.vip_subnet_id, context=context) load_balancer.vip_network_id = subnet.network_id if load_balancer.vip_qos_policy_id: validate.qos_policy_exists( qos_policy_id=load_balancer.vip_qos_policy_id) def _create_vip_port_if_not_exist(self, load_balancer_db): """Create vip port.""" network_driver = utils.get_network_driver() try: return network_driver.allocate_vip(load_balancer_db) except network_base.AllocateVIPException as e: # Convert neutron style exception to octavia style # if the error was API ready if getattr(e, 'orig_code', None) is not None: e.code = e.orig_code if getattr(e, 'orig_msg', None) is not None: e.message = e.orig_msg e.msg = e.orig_msg raise e def _get_provider(self, session, load_balancer): """Decide on the provider for this load balancer.""" provider = None if not isinstance(load_balancer.flavor_id, wtypes.UnsetType): try: provider = self.repositories.flavor.get_flavor_provider( session, load_balancer.flavor_id) except sa_exception.NoResultFound: raise exceptions.ValidationException( detail=_("Invalid flavor_id.")) # No provider specified and no flavor specified, use conf default if (isinstance(load_balancer.provider, wtypes.UnsetType) and not provider): provider = CONF.api_settings.default_provider_driver # Both provider and flavor specified, they must match elif (not isinstance(load_balancer.provider, wtypes.UnsetType) and provider): if provider != load_balancer.provider: raise exceptions.ProviderFlavorMismatchError( flav=load_balancer.flavor_id, prov=load_balancer.provider) # No flavor, but provider, use the provider specified elif not provider: provider = load_balancer.provider # Otherwise, use the flavor provider we found above return provider def _apply_flavor_to_lb_dict(self, lock_session, driver, lb_dict): flavor_dict = {} if 'flavor_id' in lb_dict: try: flavor_dict = ( self.repositories.flavor.get_flavor_metadata_dict( lock_session, lb_dict['flavor_id'])) except sa_exception.NoResultFound: raise exceptions.ValidationException( detail=_("Invalid flavor_id.")) # Make sure the driver will still accept the flavor metadata if flavor_dict: driver_utils.call_provider(driver.name, driver.validate_flavor, flavor_dict) # Apply the flavor settings to the load balanacer # Use the configuration file settings as defaults lb_dict[constants.TOPOLOGY] = flavor_dict.get( constants.LOADBALANCER_TOPOLOGY, CONF.controller_worker.loadbalancer_topology) return flavor_dict def _validate_flavor(self, session, load_balancer): if not isinstance(load_balancer.flavor_id, wtypes.UnsetType): flavor = self.repositories.flavor.get(session, id=load_balancer.flavor_id) if not flavor: raise exceptions.ValidationException( detail=_("Invalid flavor_id.")) if not flavor.enabled: raise exceptions.DisabledOption(option='flavor', value=load_balancer.flavor_id) def _validate_and_return_az_dict(self, lock_session, driver, lb_dict): az_dict = {} if 'availability_zone' in lb_dict: try: az = self.repositories.availability_zone.get( lock_session, name=lb_dict['availability_zone']) az_dict = ( self.repositories.availability_zone .get_availability_zone_metadata_dict(lock_session, az.name) ) except sa_exception.NoResultFound: raise exceptions.ValidationException( detail=_("Invalid availability_zone.")) # Make sure the driver will still accept the availability zone metadata if az_dict: try: driver_utils.call_provider(driver.name, driver.validate_availability_zone, az_dict) except NotImplementedError: raise exceptions.ProviderNotImplementedError( prov=driver.name, user_msg="This provider does not support" " availability zones.") return az_dict def _validate_availability_zone(self, session, load_balancer): if not isinstance(load_balancer.availability_zone, wtypes.UnsetType): az = self.repositories.availability_zone.get( session, name=load_balancer.availability_zone) if not az: raise exceptions.ValidationException( detail=_("Invalid availability zone.")) if not az.enabled: raise exceptions.DisabledOption( option='availability_zone', value=load_balancer.availability_zone) @wsme_pecan.wsexpose(lb_types.LoadBalancerFullRootResponse, body=lb_types.LoadBalancerRootPOST, status_code=201) def post(self, load_balancer): """Creates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan_request.context.get('octavia_context') if not load_balancer.project_id and context.project_id: load_balancer.project_id = context.project_id if not load_balancer.project_id: raise exceptions.ValidationException(detail=_( "Missing project ID in request where one is required. " "An administrator should check the keystone settings " "in the Octavia configuration.")) self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_POST) self._validate_vip_request_object(load_balancer, context=context) self._validate_flavor(context.session, load_balancer) self._validate_availability_zone(context.session, load_balancer) provider = self._get_provider(context.session, load_balancer) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.LoadBalancer, load_balancer.project_id): raise exceptions.QuotaException( resource=data_models.LoadBalancer._name()) db_lb, db_pools, db_lists = None, None, None lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict( render_unsets=False )) vip_dict = lb_dict.pop('vip', {}) # Make sure we store the right provider in the DB lb_dict['provider'] = driver.name # NoneType can be weird here, have to force type a second time listeners = lb_dict.pop('listeners', []) or [] pools = lb_dict.pop('pools', []) or [] flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver, lb_dict) az_dict = self._validate_and_return_az_dict(lock_session, driver, lb_dict) # Validate the network as soon as we have the AZ data validate.network_allowed_by_config( load_balancer.vip_network_id, valid_networks=az_dict.get(constants.VALID_VIP_NETWORKS)) db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) # Pass the flavor dictionary through for the provider drivers # This is a "virtual" lb_dict item that includes the expanded # flavor dict instead of just the flavor_id we store in the DB. lb_dict['flavor'] = flavor_dict # Do the same with the availability_zone dict lb_dict['availability_zone'] = az_dict # See if the provider driver wants to manage the VIP port # This will still be called if the user provided a port to # allow drivers to collect any required information about the # VIP port. octavia_owned = False try: provider_vip_dict = driver_utils.vip_dict_to_provider_dict( vip_dict) vip_dict = driver_utils.call_provider( driver.name, driver.create_vip_port, db_lb.id, db_lb.project_id, provider_vip_dict) vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict) except exceptions.ProviderNotImplementedError: # create vip port if not exist, driver didn't want to create # the VIP port vip = self._create_vip_port_if_not_exist(db_lb) LOG.info('Created VIP port %s for provider %s.', vip.port_id, driver.name) # If a port_id wasn't passed in and we made it this far # we created the VIP if 'port_id' not in vip_dict or not vip_dict['port_id']: octavia_owned = True # Check if the driver claims octavia owns the VIP port. if vip.octavia_owned: octavia_owned = True self.repositories.vip.update( lock_session, db_lb.id, ip_address=vip.ip_address, port_id=vip.port_id, network_id=vip.network_id, subnet_id=vip.subnet_id, octavia_owned=octavia_owned) if listeners or pools: db_pools, db_lists = self._graph_create( context.session, lock_session, db_lb, listeners, pools) # Prepare the data for the driver data model driver_lb_dict = driver_utils.lb_dict_to_provider_dict( lb_dict, vip, db_pools, db_lists) # Dispatch to the driver LOG.info("Sending create Load Balancer %s to provider %s", db_lb.id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_create, driver_dm.LoadBalancer.from_dict(driver_lb_dict)) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_lb = self._get_db_lb(context.session, db_lb.id) result = self._convert_db_to_type( db_lb, lb_types.LoadBalancerFullResponse) return lb_types.LoadBalancerFullRootResponse(loadbalancer=result) def _graph_create(self, session, lock_session, db_lb, listeners, pools): # Track which pools must have a full specification pools_required = set() # Look through listeners and find any extra pools, and move them to the # top level so they are created first. for li in listeners: default_pool = li.get('default_pool') pool_name = ( default_pool.get('name') if default_pool else None) # All pools need to have a name so they can be referenced if default_pool and not pool_name: raise exceptions.ValidationException( detail='Pools must be named when creating a fully ' 'populated loadbalancer.') # If a pool has more than a name, assume it's a full specification # (but use >3 because it will also have "enabled" and "tls_enabled" # as default) if default_pool and len(default_pool) > 3: pools.append(default_pool) li['default_pool'] = {'name': pool_name} # Otherwise, it's a reference and we record it and move on elif default_pool: pools_required.add(pool_name) # We also need to check policy redirects for policy in li.get('l7policies'): redirect_pool = policy.get('redirect_pool') pool_name = ( redirect_pool.get('name') if redirect_pool else None) # All pools need to have a name so they can be referenced if redirect_pool and not pool_name: raise exceptions.ValidationException( detail='Pools must be named when creating a fully ' 'populated loadbalancer.') # If a pool has more than a name, assume it's a full spec # (but use >2 because it will also have "enabled" and # "tls_enabled" as default) if redirect_pool and len(redirect_pool) > 3: pool_name = redirect_pool['name'] policy['redirect_pool'] = {'name': pool_name} pools.append(redirect_pool) # Otherwise, it's a reference and we record it and move on elif redirect_pool: pools_required.add(pool_name) # Make sure all pool names are unique. pool_names = [p.get('name') for p in pools] if len(set(pool_names)) != len(pool_names): raise exceptions.ValidationException( detail="Pool names must be unique when creating a fully " "populated loadbalancer.") # Make sure every reference is present in our spec list for pool_ref in pools_required: if pool_ref not in pool_names: raise exceptions.ValidationException( detail="Pool '{name}' was referenced but no full " "definition was found.".format(name=pool_ref)) # Check quotas for pools. if pools and self.repositories.check_quota_met( session, lock_session, data_models.Pool, db_lb.project_id, count=len(pools)): raise exceptions.QuotaException(resource=data_models.Pool._name()) # Now create all of the pools ahead of the listeners. new_pools = [] pool_name_ids = {} for p in pools: # Check that pools have mandatory attributes, since we have to # bypass the normal validation layer to allow for name-only for attr in ('protocol', 'lb_algorithm'): if attr not in p: raise exceptions.ValidationException( detail="Pool definition for '{name}' missing required " "attribute: {attr}".format(name=p['name'], attr=attr)) p['load_balancer_id'] = db_lb.id p['project_id'] = db_lb.project_id new_pool = (pool.PoolsController()._graph_create( session, lock_session, p)) new_pools.append(new_pool) pool_name_ids[new_pool.name] = new_pool.id # Now check quotas for listeners if listeners and self.repositories.check_quota_met( session, lock_session, data_models.Listener, db_lb.project_id, count=len(listeners)): raise exceptions.QuotaException( resource=data_models.Listener._name()) # Now create all of the listeners new_lists = [] for li in listeners: default_pool = li.pop('default_pool', None) # If there's a default pool, replace it with the ID if default_pool: pool_name = default_pool['name'] pool_id = pool_name_ids.get(pool_name) if not pool_id: raise exceptions.SingleCreateDetailsMissing( type='Pool', name=pool_name) li['default_pool_id'] = pool_id li['load_balancer_id'] = db_lb.id li['project_id'] = db_lb.project_id new_lists.append(listener.ListenersController()._graph_create( lock_session, li, pool_name_ids=pool_name_ids)) return new_pools, new_lists @wsme_pecan.wsexpose(lb_types.LoadBalancerRootResponse, wtypes.text, status_code=200, body=lb_types.LoadBalancerRootPUT) def put(self, id, load_balancer): """Updates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan_request.context.get('octavia_context') db_lb = self._get_db_lb(context.session, id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_PUT) if not isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType): network_driver = utils.get_network_driver() validate.qos_extension_enabled(network_driver) if load_balancer.vip_qos_policy_id is not None: if db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id: validate.qos_policy_exists(load_balancer.vip_qos_policy_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: self._test_lb_status(lock_session, id) # Prepare the data for the driver data model lb_dict = load_balancer.to_dict(render_unsets=False) lb_dict['id'] = id vip_dict = lb_dict.pop('vip', {}) lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict) if 'qos_policy_id' in vip_dict: lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] # Also prepare the baseline object data old_provider_lb = ( driver_utils.db_loadbalancer_to_provider_loadbalancer( db_lb, for_delete=True)) # Dispatch to the driver LOG.info("Sending update Load Balancer %s to provider " "%s", id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_update, old_provider_lb, driver_dm.LoadBalancer.from_dict(lb_dict)) db_lb_dict = load_balancer.to_dict(render_unsets=False) if 'vip' in db_lb_dict: db_vip_dict = db_lb_dict.pop('vip') self.repositories.vip.update(lock_session, id, **db_vip_dict) if db_lb_dict: self.repositories.load_balancer.update(lock_session, id, **db_lb_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_lb = self._get_db_lb(context.session, id) result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse) return lb_types.LoadBalancerRootResponse(loadbalancer=result) @wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204) def delete(self, id, cascade=False): """Deletes a load balancer.""" context = pecan_request.context.get('octavia_context') cascade = strutils.bool_from_string(cascade) db_lb = self._get_db_lb(context.session, id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_DELETE) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: if (db_lb.listeners or db_lb.pools) and not cascade: msg = _("Cannot delete Load Balancer %s - " "it has children") % id LOG.warning(msg) raise exceptions.ValidationException(detail=msg) self._test_lb_status(lock_session, id, lb_status=constants.PENDING_DELETE) LOG.info("Sending delete Load Balancer %s to provider %s", id, driver.name) provider_loadbalancer = ( driver_utils.db_loadbalancer_to_provider_loadbalancer( db_lb, for_delete=True)) driver_utils.call_provider(driver.name, driver.loadbalancer_delete, provider_loadbalancer, cascade) @pecan_expose() def _lookup(self, id, *remainder): """Overridden pecan _lookup method for custom routing. Currently it checks if this was a status request and routes the request to the StatusController. 'statuses' is aliased here for backward compatibility with neutron-lbaas LBaaS v2 API. """ is_children = ( id and remainder and ( remainder[0] == 'status' or remainder[0] == 'statuses' or ( remainder[0] == 'stats' or remainder[0] == 'failover' ) ) ) if is_children: controller = remainder[0] remainder = remainder[1:] if controller in ('status', 'statuses'): return StatusController(lb_id=id), remainder if controller == 'stats': return StatisticsController(lb_id=id), remainder if controller == 'failover': return FailoverController(lb_id=id), remainder return None class StatusController(base.BaseController): RBAC_TYPE = constants.RBAC_LOADBALANCER def __init__(self, lb_id): super(StatusController, self).__init__() self.id = lb_id @wsme_pecan.wsexpose(lb_types.StatusRootResponse, wtypes.text, status_code=200) def get(self): context = pecan_request.context.get('octavia_context') load_balancer = self._get_db_lb(context.session, self.id, show_deleted=False) if not load_balancer: LOG.info("Load balancer %s not found.", id) raise exceptions.NotFound( resource=data_models.LoadBalancer._name(), id=id) self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_GET_STATUS) result = self._convert_db_to_type( load_balancer, lb_types.LoadBalancerStatusResponse) result = lb_types.StatusResponse(loadbalancer=result) return lb_types.StatusRootResponse(statuses=result) class StatisticsController(base.BaseController, stats.StatsMixin): RBAC_TYPE = constants.RBAC_LOADBALANCER def __init__(self, lb_id): super(StatisticsController, self).__init__() self.id = lb_id @wsme_pecan.wsexpose(lb_types.StatisticsRootResponse, wtypes.text, status_code=200) def get(self): context = pecan_request.context.get('octavia_context') load_balancer = self._get_db_lb(context.session, self.id, show_deleted=False) if not load_balancer: LOG.info("Load balancer %s not found.", id) raise exceptions.NotFound( resource=data_models.LoadBalancer._name(), id=id) self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_GET_STATS) lb_stats = self.get_loadbalancer_stats(context.session, self.id) result = self._convert_db_to_type( lb_stats, lb_types.LoadBalancerStatisticsResponse) return lb_types.StatisticsRootResponse(stats=result) class FailoverController(LoadBalancersController): def __init__(self, lb_id): super(FailoverController, self).__init__() self.lb_id = lb_id @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) def put(self, **kwargs): """Fails over a loadbalancer""" context = pecan_request.context.get('octavia_context') db_lb = self._get_db_lb(context.session, self.lb_id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_PUT_FAILOVER) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: self._test_and_set_failover_prov_status(lock_session, self.lb_id) LOG.info("Sending failover request for load balancer %s to the " "provider %s", self.lb_id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_failover, self.lb_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/member.py0000664000175000017500000004603700000000000022121 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.types import member as member_types from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions import octavia.common.validate as validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare LOG = logging.getLogger(__name__) class MemberController(base.BaseController): RBAC_TYPE = constants.RBAC_MEMBER def __init__(self, pool_id): super(MemberController, self).__init__() self.pool_id = pool_id @wsme_pecan.wsexpose(member_types.MemberRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get(self, id, fields=None): """Gets a single pool member's details.""" context = pecan_request.context.get('octavia_context') db_member = self._get_db_member(context.session, id, show_deleted=False) self._auth_validate_action(context, db_member.project_id, constants.RBAC_GET_ONE) self._validate_pool_id(id, db_member.pool_id) result = self._convert_db_to_type( db_member, member_types.MemberResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return member_types.MemberRootResponse(member=result) @wsme_pecan.wsexpose(member_types.MembersRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """Lists all pool members of a pool.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') pool = self._get_db_pool(context.session, self.pool_id, show_deleted=False) self._auth_validate_action(context, pool.project_id, constants.RBAC_GET_ALL) db_members, links = self.repositories.member.get_all_API_list( context.session, show_deleted=False, pool_id=self.pool_id, pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) result = self._convert_db_to_type( db_members, [member_types.MemberResponse]) if fields is not None: result = self._filter_fields(result, fields) return member_types.MembersRootResponse( members=result, members_links=links) def _get_affected_listener_ids(self, session, member=None): """Gets a list of all listeners this request potentially affects.""" if member: listener_ids = [li.id for li in member.pool.listeners] else: pool = self._get_db_pool(session, self.pool_id) listener_ids = [li.id for li in pool.listeners] return listener_ids def _test_lb_and_listener_and_pool_statuses(self, session, member=None): """Verify load balancer is in a mutable state.""" # We need to verify that any listeners referencing this member's # pool are also mutable pool = self._get_db_pool(session, self.pool_id) # Check the parent is not locked for some reason (ERROR, etc.) if pool.provisioning_status not in constants.MUTABLE_STATUSES: raise exceptions.ImmutableObject(resource='Pool', id=self.pool_id) load_balancer_id = pool.load_balancer_id if not self.repositories.test_and_set_lb_and_listeners_prov_status( session, load_balancer_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE, listener_ids=self._get_affected_listener_ids(session, member), pool_id=self.pool_id): LOG.info("Member cannot be created or modified because the " "Load Balancer is in an immutable state") raise exceptions.ImmutableObject(resource='Load Balancer', id=load_balancer_id) def _validate_create_member(self, lock_session, member_dict): """Validate creating member on pool.""" try: return self.repositories.member.create(lock_session, **member_dict) except odb_exceptions.DBDuplicateEntry as de: column_list = ['pool_id', 'ip_address', 'protocol_port'] constraint_list = ['uq_member_pool_id_address_protocol_port'] if ['id'] == de.columns: raise exceptions.IDAlreadyExists() if (set(column_list) == set(de.columns) or set(constraint_list) == set(de.columns)): raise exceptions.DuplicateMemberEntry( ip_address=member_dict.get('ip_address'), port=member_dict.get('protocol_port')) except odb_exceptions.DBError: # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') def _validate_pool_id(self, member_id, db_member_pool_id): if db_member_pool_id != self.pool_id: raise exceptions.NotFound(resource='Member', id=member_id) @wsme_pecan.wsexpose(member_types.MemberRootResponse, body=member_types.MemberRootPOST, status_code=201) def post(self, member_): """Creates a pool member on a pool.""" member = member_.member context = pecan_request.context.get('octavia_context') pool = self.repositories.pool.get(context.session, id=self.pool_id) member.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, member.project_id, constants.RBAC_POST) validate.ip_not_reserved(member.address) # Validate member subnet if (member.subnet_id and not validate.subnet_exists(member.subnet_id, context=context)): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.Member, member.project_id): raise exceptions.QuotaException( resource=data_models.Member._name()) member_dict = db_prepare.create_member(member.to_dict( render_unsets=True), self.pool_id, bool(pool.health_monitor)) self._test_lb_and_listener_and_pool_statuses(lock_session) db_member = self._validate_create_member(lock_session, member_dict) # Prepare the data for the driver data model provider_member = ( driver_utils.db_member_to_provider_member(db_member)) # Dispatch to the driver LOG.info("Sending create Member %s to provider %s", db_member.id, driver.name) driver_utils.call_provider( driver.name, driver.member_create, provider_member) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_member = self._get_db_member(context.session, db_member.id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MemberRootResponse(member=result) def _graph_create(self, lock_session, member_dict): pool = self.repositories.pool.get(lock_session, id=self.pool_id) member_dict = db_prepare.create_member( member_dict, self.pool_id, bool(pool.health_monitor)) db_member = self._validate_create_member(lock_session, member_dict) return db_member def _set_default_on_none(self, member): """Reset settings to their default values if None/null was passed in A None/null value can be passed in to clear a value. PUT values that were not provided by the user have a type of wtypes.UnsetType. If the user is attempting to clear values, they should either be set to None (for example in the name field) or they should be reset to their default values. This method is intended to handle those values that need to be set back to a default value. """ if member.backup is None: member.backup = False if member.weight is None: member.weight = constants.DEFAULT_WEIGHT @wsme_pecan.wsexpose(member_types.MemberRootResponse, wtypes.text, body=member_types.MemberRootPUT, status_code=200) def put(self, id, member_): """Updates a pool member.""" member = member_.member context = pecan_request.context.get('octavia_context') db_member = self._get_db_member(context.session, id, show_deleted=False) pool = self.repositories.pool.get(context.session, id=db_member.pool_id) project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._validate_pool_id(id, db_member.pool_id) self._set_default_on_none(member) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session, member=db_member) # Prepare the data for the driver data model member_dict = member.to_dict(render_unsets=False) member_dict['id'] = id provider_member_dict = ( driver_utils.member_dict_to_provider_dict(member_dict)) # Also prepare the baseline object data old_provider_member = driver_utils.db_member_to_provider_member( db_member) # Dispatch to the driver LOG.info("Sending update Member %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.member_update, old_provider_member, driver_dm.Member.from_dict(provider_member_dict)) # Update the database to reflect what the driver just accepted member.provisioning_status = constants.PENDING_UPDATE db_member_dict = member.to_dict(render_unsets=False) self.repositories.member.update(lock_session, id, **db_member_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_member = self._get_db_member(context.session, id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MemberRootResponse(member=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, id): """Deletes a pool member.""" context = pecan_request.context.get('octavia_context') db_member = self._get_db_member(context.session, id, show_deleted=False) pool = self.repositories.pool.get(context.session, id=db_member.pool_id) project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) self._validate_pool_id(id, db_member.pool_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session, member=db_member) self.repositories.member.update( lock_session, db_member.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete Member %s to provider %s", id, driver.name) provider_member = ( driver_utils.db_member_to_provider_member(db_member)) driver_utils.call_provider(driver.name, driver.member_delete, provider_member) class MembersController(MemberController): def __init__(self, pool_id): super(MembersController, self).__init__(pool_id) @wsme_pecan.wsexpose(None, wtypes.text, body=member_types.MembersRootPUT, status_code=202) def put(self, additive_only=False, members_=None): """Updates all members.""" members = members_.members additive_only = strutils.bool_from_string(additive_only) context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, self.pool_id) old_members = db_pool.members project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) # Check POST+PUT+DELETE since this operation is all of 'CUD' self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_PUT) if not additive_only: self._auth_validate_action(context, project_id, constants.RBAC_DELETE) # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( member.subnet_id, context=context): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_and_pool_statuses(lock_session) old_member_uniques = { (m.ip_address, m.protocol_port): m.id for m in old_members} new_member_uniques = [ (m.address, m.protocol_port) for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: if (m.address, m.protocol_port) not in old_member_uniques: validate.ip_not_reserved(m.address) new_members.append(m) else: m.id = old_member_uniques[(m.address, m.protocol_port)] updated_members.append(m) # Find members that are deleted deleted_members = [] for m in old_members: if (m.ip_address, m.protocol_port) not in new_member_uniques: deleted_members.append(m) if not (deleted_members or new_members or updated_members): LOG.info("Member batch update is a noop, rolling back and " "returning early.") lock_session.rollback() return if additive_only: member_count_diff = len(new_members) else: member_count_diff = len(new_members) - len(deleted_members) if member_count_diff > 0 and self.repositories.check_quota_met( context.session, lock_session, data_models.Member, db_pool.project_id, count=member_count_diff): raise exceptions.QuotaException( resource=data_models.Member._name()) provider_members = [] # Create new members for m in new_members: m = m.to_dict(render_unsets=False) m['project_id'] = db_pool.project_id created_member = self._graph_create(lock_session, m) provider_member = driver_utils.db_member_to_provider_member( created_member) provider_members.append(provider_member) # Update old members for m in updated_members: m.provisioning_status = constants.PENDING_UPDATE m.project_id = db_pool.project_id db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') self.repositories.member.update( lock_session, m.id, **db_member_dict) m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) # Delete old members for m in deleted_members: if additive_only: # Members are appended to the dict and their status remains # unchanged, because they are logically "untouched". db_member_dict = m.to_dict(render_unsets=False) db_member_dict.pop('id') m.pool_id = self.pool_id provider_members.append( driver_utils.db_member_to_provider_member(m)) else: # Members are changed to PENDING_DELETE and not passed. self.repositories.member.update( lock_session, m.id, provisioning_status=constants.PENDING_DELETE) # Dispatch to the driver LOG.info("Sending Pool %s batch member update to provider %s", db_pool.id, driver.name) driver_utils.call_provider( driver.name, driver.member_batch_update, db_pool.id, provider_members) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/pool.py0000664000175000017500000005300000000000000021607 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from pecan import expose as pecan_expose from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import driver_factory from octavia.api.drivers import utils as driver_utils from octavia.api.v2.controllers import base from octavia.api.v2.controllers import health_monitor from octavia.api.v2.controllers import member from octavia.api.v2.types import pool as pool_types from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common import validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare from octavia.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) class PoolsController(base.BaseController): RBAC_TYPE = constants.RBAC_POOL def __init__(self): super(PoolsController, self).__init__() @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get(self, id, fields=None): """Gets a pool's details.""" context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) self._auth_validate_action(context, db_pool.project_id, constants.RBAC_GET_ONE) result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) if fields is not None: result = self._filter_fields([result], fields)[0] return pool_types.PoolRootResponse(pool=result) @wsme_pecan.wsexpose(pool_types.PoolsRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) def get_all(self, project_id=None, fields=None): """Lists all pools.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') query_filter = self._auth_get_all(context, project_id) db_pools, links = self.repositories.pool.get_all_API_list( context.session, show_deleted=False, pagination_helper=pcontext.get(constants.PAGINATION_HELPER), **query_filter) result = self._convert_db_to_type(db_pools, [pool_types.PoolResponse]) if fields is not None: result = self._filter_fields(result, fields) return pool_types.PoolsRootResponse(pools=result, pools_links=links) def _get_affected_listener_ids(self, pool): """Gets a list of all listeners this request potentially affects.""" listener_ids = [li.id for li in pool.listeners] return listener_ids def _test_lb_and_listener_statuses(self, session, lb_id, listener_ids): """Verify load balancer is in a mutable state.""" # We need to verify that any listeners referencing this pool are also # mutable if not self.repositories.test_and_set_lb_and_listeners_prov_status( session, lb_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE, listener_ids=listener_ids): LOG.info("Pool cannot be created or modified because the Load " "Balancer is in an immutable state") raise exceptions.ImmutableObject(resource=_('Load Balancer'), id=lb_id) def _validate_create_pool(self, lock_session, pool_dict, listener_id=None): """Validate creating pool on load balancer. Update database for load balancer and (optional) listener based on provisioning status. """ # Make sure we have a client CA if they specify a CRL if (pool_dict.get('crl_container_id') and not pool_dict.get('ca_tls_certificate_id')): raise exceptions.ValidationException(detail=_( "A CA certificate reference is required to " "specify a revocation list.")) tls_certificate_id = pool_dict.get('tls_certificate_id', None) tls_refs = [tls_certificate_id] if tls_certificate_id else [] self._validate_tls_refs(tls_refs) # Validate the client CA cert and optional client CRL if pool_dict.get('ca_tls_certificate_id'): self._validate_client_ca_and_crl_refs( pool_dict.get('ca_tls_certificate_id'), pool_dict.get('crl_container_id', None)) try: return self.repositories.create_pool_on_load_balancer( lock_session, pool_dict, listener_id=listener_id) except odb_exceptions.DBDuplicateEntry: raise exceptions.IDAlreadyExists() except odb_exceptions.DBError: # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') def _is_only_specified_in_request(self, request, **kwargs): request_attrs = [] check_attrs = kwargs['check_exist_attrs'] escaped_attrs = ['from_data_model', 'translate_key_to_data_model', 'translate_dict_keys_to_data_model', 'to_dict'] for attr in dir(request): if attr.startswith('_') or attr in escaped_attrs: continue request_attrs.append(attr) for req_attr in request_attrs: if (getattr(request, req_attr) and req_attr not in check_attrs): return False return True def _validate_pool_request_for_udp(self, request): if request.session_persistence: if (request.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP and not self._is_only_specified_in_request( request.session_persistence, check_exist_attrs=['type', 'persistence_timeout', 'persistence_granularity'])): raise exceptions.ValidationException(detail=_( "session_persistence %s type for UDP protocol " "only accepts: type, persistence_timeout, " "persistence_granularity.") % ( constants.SESSION_PERSISTENCE_SOURCE_IP)) if request.session_persistence.cookie_name: raise exceptions.ValidationException(detail=_( "Cookie names are not supported for %s pools.") % constants.PROTOCOL_UDP) if request.session_persistence.type in [ constants.SESSION_PERSISTENCE_HTTP_COOKIE, constants.SESSION_PERSISTENCE_APP_COOKIE]: raise exceptions.ValidationException(detail=_( "Session persistence of type %(type)s is not supported " "for %(protocol)s protocol pools.") % { 'type': request.session_persistence.type, 'protocol': constants.PROTOCOL_UDP}) @wsme_pecan.wsexpose(pool_types.PoolRootResponse, body=pool_types.PoolRootPOST, status_code=201) def post(self, pool_): """Creates a pool on a load balancer or listener. Note that this can optionally take a listener_id with which the pool should be associated as the listener's default_pool. If specified, the pool creation will fail if the listener specified already has a default_pool. """ # For some API requests the listener_id will be passed in the # pool_dict: pool = pool_.pool context = pecan_request.context.get('octavia_context') listener = None if pool.loadbalancer_id: pool.project_id, provider = self._get_lb_project_id_provider( context.session, pool.loadbalancer_id) elif pool.listener_id: listener = self.repositories.listener.get( context.session, id=pool.listener_id) pool.loadbalancer_id = listener.load_balancer_id pool.project_id, provider = self._get_lb_project_id_provider( context.session, pool.loadbalancer_id) else: msg = _("Must provide at least one of: " "loadbalancer_id, listener_id") raise exceptions.ValidationException(detail=msg) self._auth_validate_action(context, pool.project_id, constants.RBAC_POST) if pool.listener_id and listener: self._validate_protocol(listener.protocol, pool.protocol) if pool.protocol == constants.PROTOCOL_UDP: self._validate_pool_request_for_udp(pool) else: if (pool.session_persistence and ( pool.session_persistence.persistence_timeout or pool.session_persistence.persistence_granularity)): raise exceptions.ValidationException(detail=_( "persistence_timeout and persistence_granularity " "is only for UDP protocol pools.")) if pool.session_persistence: sp_dict = pool.session_persistence.to_dict(render_unsets=False) validate.check_session_persistence(sp_dict) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.Pool, pool.project_id): raise exceptions.QuotaException( resource=data_models.Pool._name()) listener_repo = self.repositories.listener pool_dict = db_prepare.create_pool( pool.to_dict(render_unsets=True)) listener_id = pool_dict.pop('listener_id', None) if listener_id: if listener_repo.has_default_pool(lock_session, listener_id): raise exceptions.DuplicatePoolEntry() self._test_lb_and_listener_statuses( lock_session, lb_id=pool_dict['load_balancer_id'], listener_ids=[listener_id] if listener_id else []) db_pool = self._validate_create_pool( lock_session, pool_dict, listener_id) # Prepare the data for the driver data model provider_pool = ( driver_utils.db_pool_to_provider_pool(db_pool)) # Dispatch to the driver LOG.info("Sending create Pool %s to provider %s", db_pool.id, driver.name) driver_utils.call_provider( driver.name, driver.pool_create, provider_pool) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_pool = self._get_db_pool(context.session, db_pool.id) result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) return pool_types.PoolRootResponse(pool=result) def _graph_create(self, session, lock_session, pool_dict): load_balancer_id = pool_dict['load_balancer_id'] pool_dict = db_prepare.create_pool( pool_dict, load_balancer_id) members = pool_dict.pop('members', []) or [] hm = pool_dict.pop('health_monitor', None) db_pool = self._validate_create_pool( lock_session, pool_dict) # Check quotas for healthmonitors if hm and self.repositories.check_quota_met( session, lock_session, data_models.HealthMonitor, db_pool.project_id): raise exceptions.QuotaException( resource=data_models.HealthMonitor._name()) # Now possibly create a healthmonitor if hm: hm[constants.POOL_ID] = db_pool.id hm[constants.PROJECT_ID] = db_pool.project_id new_hm = health_monitor.HealthMonitorController()._graph_create( lock_session, hm) if db_pool.protocol == constants.PROTOCOL_UDP: health_monitor.HealthMonitorController( )._validate_healthmonitor_request_for_udp(new_hm) else: if new_hm.type == constants.HEALTH_MONITOR_UDP_CONNECT: raise exceptions.ValidationException(detail=_( "The %(type)s type is only supported for pools of " "type %(protocol)s.") % { 'type': new_hm.type, 'protocol': constants.PROTOCOL_UDP}) db_pool.health_monitor = new_hm # Now check quotas for members if members and self.repositories.check_quota_met( session, lock_session, data_models.Member, db_pool.project_id, count=len(members)): raise exceptions.QuotaException( resource=data_models.Member._name()) # Now create members new_members = [] for m in members: validate.ip_not_reserved(m["ip_address"]) m['project_id'] = db_pool.project_id new_members.append( member.MembersController(db_pool.id)._graph_create( lock_session, m)) db_pool.members = new_members return db_pool def _validate_pool_PUT(self, pool, db_pool): if db_pool.protocol == constants.PROTOCOL_UDP: self._validate_pool_request_for_udp(pool) else: if (pool.session_persistence and ( pool.session_persistence.persistence_timeout or pool.session_persistence.persistence_granularity)): raise exceptions.ValidationException(detail=_( "persistence_timeout and persistence_granularity " "is only for UDP protocol pools.")) if pool.session_persistence: sp_dict = pool.session_persistence.to_dict(render_unsets=False) validate.check_session_persistence(sp_dict) crl_ref = None # If we got a crl_ref and it's not unset, use it if (pool.crl_container_ref and pool.crl_container_ref != wtypes.Unset): crl_ref = pool.crl_container_ref # If we got Unset and a CRL exists in the DB, use the DB crl_ref elif (db_pool.crl_container_id and pool.crl_container_ref == wtypes.Unset): crl_ref = db_pool.crl_container_id ca_ref = None db_ca_ref = db_pool.ca_tls_certificate_id if pool.ca_tls_container_ref != wtypes.Unset: if not pool.ca_tls_container_ref and db_ca_ref and crl_ref: raise exceptions.ValidationException(detail=_( "A CA reference cannot be removed when a " "certificate revocation list is present.")) if not pool.ca_tls_container_ref and not db_ca_ref and crl_ref: raise exceptions.ValidationException(detail=_( "A CA reference is required to " "specify a certificate revocation list.")) if pool.ca_tls_container_ref: ca_ref = pool.ca_tls_container_ref elif db_ca_ref and pool.ca_tls_container_ref == wtypes.Unset: ca_ref = db_ca_ref elif crl_ref and not db_ca_ref: raise exceptions.ValidationException(detail=_( "A CA reference is required to " "specify a certificate revocation list.")) if pool.tls_container_ref: self._validate_tls_refs([pool.tls_container_ref]) # Validate the client CA cert and optional client CRL if ca_ref: self._validate_client_ca_and_crl_refs(ca_ref, crl_ref) @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text, body=pool_types.PoolRootPut, status_code=200) def put(self, id, pool_): """Updates a pool on a load balancer.""" pool = pool_.pool context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_PUT) if (pool.session_persistence and not pool.session_persistence.type and db_pool.session_persistence and db_pool.session_persistence.type): pool.session_persistence.type = db_pool.session_persistence.type self._validate_pool_PUT(pool, db_pool) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( context.session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) # Prepare the data for the driver data model pool_dict = pool.to_dict(render_unsets=False) pool_dict['id'] = id provider_pool_dict = ( driver_utils.pool_dict_to_provider_dict(pool_dict)) # Also prepare the baseline object data old_provider_pool = driver_utils.db_pool_to_provider_pool( db_pool, for_delete=True) # Dispatch to the driver LOG.info("Sending update Pool %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.pool_update, old_provider_pool, driver_dm.Pool.from_dict(provider_pool_dict)) # Update the database to reflect what the driver just accepted pool.provisioning_status = constants.PENDING_UPDATE db_pool_dict = pool.to_dict(render_unsets=False) self.repositories.update_pool_and_sp(lock_session, id, db_pool_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_pool = self._get_db_pool(context.session, id) result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) return pool_types.PoolRootResponse(pool=result) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, id): """Deletes a pool from a load balancer.""" context = pecan_request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) if db_pool.l7policies: raise exceptions.PoolInUseByL7Policy( id=db_pool.id, l7policy_id=db_pool.l7policies[0].id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) with db_api.get_lock_session() as lock_session: self._test_lb_and_listener_statuses( lock_session, lb_id=db_pool.load_balancer_id, listener_ids=self._get_affected_listener_ids(db_pool)) self.repositories.pool.update( lock_session, db_pool.id, provisioning_status=constants.PENDING_DELETE) LOG.info("Sending delete Pool %s to provider %s", id, driver.name) provider_pool = ( driver_utils.db_pool_to_provider_pool(db_pool, for_delete=True)) driver_utils.call_provider(driver.name, driver.pool_delete, provider_pool) @pecan_expose() def _lookup(self, pool_id, *remainder): """Overridden pecan _lookup method for custom routing. Verifies that the pool passed in the url exists, and if so decides which controller, if any, should control be passed. """ context = pecan_request.context.get('octavia_context') if pool_id and remainder and remainder[0] == 'members': remainder = remainder[1:] db_pool = self.repositories.pool.get(context.session, id=pool_id) if not db_pool: LOG.info("Pool %s not found.", pool_id) raise exceptions.NotFound(resource=data_models.Pool._name(), id=pool_id) if remainder: return member.MemberController(pool_id=db_pool.id), remainder return member.MembersController(pool_id=db_pool.id), remainder return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/provider.py0000664000175000017500000001646000000000000022501 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.api.drivers import exceptions as lib_exceptions from oslo_config import cfg from oslo_log import log as logging from pecan import expose as pecan_expose from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.drivers import driver_factory from octavia.api.v2.controllers import base from octavia.api.v2.types import provider as provider_types from octavia.common import constants from octavia.common import exceptions CONF = cfg.CONF LOG = logging.getLogger(__name__) class ProviderController(base.BaseController): RBAC_TYPE = constants.RBAC_PROVIDER def __init__(self): super(ProviderController, self).__init__() @wsme_pecan.wsexpose(provider_types.ProvidersRootResponse, [wtypes.text], ignore_extra_args=True) def get_all(self, fields=None): """List enabled provider drivers and their descriptions.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) enabled_providers = CONF.api_settings.enabled_provider_drivers response_list = [ provider_types.ProviderResponse(name=key, description=value) for key, value in enabled_providers.items()] if fields is not None: response_list = self._filter_fields(response_list, fields) return provider_types.ProvidersRootResponse(providers=response_list) @pecan_expose() def _lookup(self, provider, *remainder): """Overridden pecan _lookup method for custom routing. Currently it checks if this was a flavor capabilities request and routes the request to the FlavorCapabilitiesController. """ if provider and remainder: if remainder[0] == 'flavor_capabilities': return (FlavorCapabilitiesController(provider=provider), remainder[1:]) if remainder[0] == 'availability_zone_capabilities': return ( AvailabilityZoneCapabilitiesController(provider=provider), remainder[1:]) return None class FlavorCapabilitiesController(base.BaseController): RBAC_TYPE = constants.RBAC_PROVIDER_FLAVOR def __init__(self, provider): super(FlavorCapabilitiesController, self).__init__() self.provider = provider @wsme_pecan.wsexpose(provider_types.FlavorCapabilitiesResponse, [wtypes.text], ignore_extra_args=True, status_code=200) def get_all(self, fields=None): context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) self.driver = driver_factory.get_driver(self.provider) try: metadata_dict = self.driver.get_supported_flavor_metadata() except lib_exceptions.NotImplementedError as e: LOG.warning('Provider %s get_supported_flavor_metadata() ' 'reported: %s', self.provider, e.operator_fault_string) raise exceptions.ProviderNotImplementedError( prov=self.provider, user_msg=e.user_fault_string) # Apply any valid filters provided as URL parameters name_filter = None description_filter = None pagination_helper = pecan_request.context.get( constants.PAGINATION_HELPER) if pagination_helper: name_filter = pagination_helper.params.get(constants.NAME) description_filter = pagination_helper.params.get( constants.DESCRIPTION) if name_filter: metadata_dict = { key: value for key, value in metadata_dict.items() if key == name_filter} if description_filter: metadata_dict = { key: value for key, value in metadata_dict.items() if value == description_filter} response_list = [ provider_types.ProviderResponse(name=key, description=value) for key, value in metadata_dict.items()] if fields is not None: response_list = self._filter_fields(response_list, fields) return provider_types.FlavorCapabilitiesResponse( flavor_capabilities=response_list) class AvailabilityZoneCapabilitiesController(base.BaseController): RBAC_TYPE = constants.RBAC_PROVIDER_AVAILABILITY_ZONE def __init__(self, provider): super(AvailabilityZoneCapabilitiesController, self).__init__() self.provider = provider @wsme_pecan.wsexpose(provider_types.AvailabilityZoneCapabilitiesResponse, [wtypes.text], ignore_extra_args=True, status_code=200) def get_all(self, fields=None): context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, context.project_id, constants.RBAC_GET_ALL) self.driver = driver_factory.get_driver(self.provider) try: metadata_dict = ( self.driver.get_supported_availability_zone_metadata()) except lib_exceptions.NotImplementedError as e: LOG.warning( 'Provider %s get_supported_availability_zone_metadata() ' 'reported: %s', self.provider, e.operator_fault_string) raise exceptions.ProviderNotImplementedError( prov=self.provider, user_msg=e.user_fault_string) # Apply any valid filters provided as URL parameters name_filter = None description_filter = None pagination_helper = pecan_request.context.get( constants.PAGINATION_HELPER) if pagination_helper: name_filter = pagination_helper.params.get(constants.NAME) description_filter = pagination_helper.params.get( constants.DESCRIPTION) if name_filter: metadata_dict = { key: value for key, value in metadata_dict.items() if key == name_filter} if description_filter: metadata_dict = { key: value for key, value in metadata_dict.items() if value == description_filter} response_list = [ provider_types.ProviderResponse(name=key, description=value) for key, value in metadata_dict.items()] if fields is not None: response_list = self._filter_fields(response_list, fields) return provider_types.AvailabilityZoneCapabilitiesResponse( availability_zone_capabilities=response_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/controllers/quotas.py0000664000175000017500000001115000000000000022152 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from pecan import expose as pecan_expose from pecan import request as pecan_request from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import base from octavia.api.v2.types import quotas as quota_types from octavia.common import constants from octavia.common import exceptions CONF = cfg.CONF CONF.import_group('quotas', 'octavia.common.config') class QuotasController(base.BaseController): RBAC_TYPE = constants.RBAC_QUOTA def __init__(self): super(QuotasController, self).__init__() @wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text) def get(self, project_id): """Get a single project's quota details.""" context = pecan_request.context.get('octavia_context') self._auth_validate_action(context, project_id, constants.RBAC_GET_ONE) db_quotas = self._get_db_quotas(context.session, project_id) return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse) @wsme_pecan.wsexpose(quota_types.QuotaAllResponse, ignore_extra_args=True) def get_all(self, project_id=None): """List all non-default quotas.""" pcontext = pecan_request.context context = pcontext.get('octavia_context') query_filter = self._auth_get_all(context, project_id) db_quotas, links = self.repositories.quotas.get_all( context.session, pagination_helper=pcontext.get(constants.PAGINATION_HELPER), **query_filter) quotas = quota_types.QuotaAllResponse.from_data_model(db_quotas) quotas.quotas_links = links return quotas @wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text, body=quota_types.QuotaPUT, status_code=202) def put(self, project_id, quotas): """Update any or all quotas for a project.""" context = pecan_request.context.get('octavia_context') if not project_id: raise exceptions.MissingAPIProjectID() self._auth_validate_action(context, project_id, constants.RBAC_PUT) quotas_dict = quotas.to_dict() self.repositories.quotas.update(context.session, project_id, **quotas_dict) db_quotas = self._get_db_quotas(context.session, project_id) return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse) @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) def delete(self, project_id): """Reset a project's quotas to the default values.""" context = pecan_request.context.get('octavia_context') if not project_id: raise exceptions.MissingAPIProjectID() self._auth_validate_action(context, project_id, constants.RBAC_DELETE) self.repositories.quotas.delete(context.session, project_id) db_quotas = self._get_db_quotas(context.session, project_id) return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse) @pecan_expose() def _lookup(self, project_id, *remainder): """Overridden pecan _lookup method for routing default endpoint.""" if project_id and remainder and remainder[0] == 'default': return QuotasDefaultController(project_id), '' return None class QuotasDefaultController(base.BaseController): RBAC_TYPE = constants.RBAC_QUOTA def __init__(self, project_id): super(QuotasDefaultController, self).__init__() self.project_id = project_id @wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text) def get(self): """Get a project's default quota details.""" context = pecan_request.context.get('octavia_context') if not self.project_id: raise exceptions.MissingAPIProjectID() self._auth_validate_action(context, self.project_id, constants.RBAC_GET_DEFAULTS) quotas = self._get_default_quotas(self.project_id) return self._convert_db_to_type(quotas, quota_types.QuotaResponse) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/api/v2/types/0000775000175000017500000000000000000000000017064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/__init__.py0000664000175000017500000000107400000000000021177 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/amphora.py0000664000175000017500000000574600000000000021101 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types class BaseAmphoraType(types.BaseType): _type_to_model_map = {'loadbalancer_id': 'load_balancer_id'} _child_map = {} class AmphoraResponse(BaseAmphoraType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) loadbalancer_id = wtypes.wsattr(wtypes.UuidType()) compute_id = wtypes.wsattr(wtypes.UuidType()) lb_network_ip = wtypes.wsattr(types.IPAddressType()) vrrp_ip = wtypes.wsattr(types.IPAddressType()) ha_ip = wtypes.wsattr(types.IPAddressType()) vrrp_port_id = wtypes.wsattr(wtypes.UuidType()) ha_port_id = wtypes.wsattr(wtypes.UuidType()) cert_expiration = wtypes.wsattr(wtypes.datetime.datetime) cert_busy = wtypes.wsattr(bool) role = wtypes.wsattr(wtypes.StringType()) status = wtypes.wsattr(wtypes.StringType()) vrrp_interface = wtypes.wsattr(wtypes.StringType()) vrrp_id = wtypes.wsattr(wtypes.IntegerType()) vrrp_priority = wtypes.wsattr(wtypes.IntegerType()) cached_zone = wtypes.wsattr(wtypes.StringType()) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) image_id = wtypes.wsattr(wtypes.UuidType()) compute_flavor = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): amphorae = super(AmphoraResponse, cls).from_data_model( data_model, children=children) return amphorae class AmphoraRootResponse(types.BaseType): amphora = wtypes.wsattr(AmphoraResponse) class AmphoraeRootResponse(types.BaseType): amphorae = wtypes.wsattr([AmphoraResponse]) amphorae_links = wtypes.wsattr([types.PageType]) class AmphoraStatisticsResponse(BaseAmphoraType): """Defines which attributes are to show on stats response.""" active_connections = wtypes.wsattr(wtypes.IntegerType()) bytes_in = wtypes.wsattr(wtypes.IntegerType()) bytes_out = wtypes.wsattr(wtypes.IntegerType()) id = wtypes.wsattr(wtypes.UuidType()) listener_id = wtypes.wsattr(wtypes.UuidType()) loadbalancer_id = wtypes.wsattr(wtypes.UuidType()) request_errors = wtypes.wsattr(wtypes.IntegerType()) total_connections = wtypes.wsattr(wtypes.IntegerType()) class StatisticsRootResponse(types.BaseType): amphora_stats = wtypes.wsattr([AmphoraStatisticsResponse]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/availability_zone_profile.py0000664000175000017500000000541700000000000024672 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types class BaseAvailabilityZoneProfileType(types.BaseType): _type_to_model_map = {} _child_map = {} class AvailabilityZoneProfileResponse(BaseAvailabilityZoneProfileType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) provider_name = wtypes.wsattr(wtypes.StringType()) availability_zone_data = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): availability_zone_profile = super( AvailabilityZoneProfileResponse, cls).from_data_model( data_model, children=children) return availability_zone_profile class AvailabilityZoneProfileRootResponse(types.BaseType): availability_zone_profile = wtypes.wsattr(AvailabilityZoneProfileResponse) class AvailabilityZoneProfilesRootResponse(types.BaseType): availability_zone_profiles = wtypes.wsattr( [AvailabilityZoneProfileResponse]) availability_zone_profile_links = wtypes.wsattr([types.PageType]) class AvailabilityZoneProfilePOST(BaseAvailabilityZoneProfileType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) provider_name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) availability_zone_data = wtypes.wsattr(wtypes.StringType(max_length=4096), mandatory=True) class AvailabilityZoneProfileRootPOST(types.BaseType): availability_zone_profile = wtypes.wsattr(AvailabilityZoneProfilePOST) class AvailabilityZoneProfilePUT(BaseAvailabilityZoneProfileType): """Defines the attributes of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) provider_name = wtypes.wsattr(wtypes.StringType(max_length=255)) availability_zone_data = wtypes.wsattr(wtypes.StringType(max_length=4096)) class AvailabilityZoneProfileRootPUT(types.BaseType): availability_zone_profile = wtypes.wsattr(AvailabilityZoneProfilePUT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/availability_zones.py0000664000175000017500000000474700000000000023342 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types class BaseAvailabilityZoneType(types.BaseType): _type_to_model_map = {} _child_map = {} class AvailabilityZoneResponse(BaseAvailabilityZoneType): """Defines which attributes are to be shown on any response.""" name = wtypes.wsattr(wtypes.StringType()) description = wtypes.wsattr(wtypes.StringType()) enabled = wtypes.wsattr(bool) availability_zone_profile_id = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): availability_zone = super( AvailabilityZoneResponse, cls).from_data_model( data_model, children=children) return availability_zone class AvailabilityZoneRootResponse(types.BaseType): availability_zone = wtypes.wsattr(AvailabilityZoneResponse) class AvailabilityZonesRootResponse(types.BaseType): availability_zones = wtypes.wsattr([AvailabilityZoneResponse]) availability_zones_links = wtypes.wsattr([types.PageType]) class AvailabilityZonePOST(BaseAvailabilityZoneType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) description = wtypes.wsattr(wtypes.StringType(max_length=255)) enabled = wtypes.wsattr(bool, default=True) availability_zone_profile_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) class AvailabilityZoneRootPOST(types.BaseType): availability_zone = wtypes.wsattr(AvailabilityZonePOST) class AvailabilityZonePUT(BaseAvailabilityZoneType): """Defines the attributes of a PUT request.""" description = wtypes.wsattr(wtypes.StringType(max_length=255)) enabled = wtypes.wsattr(bool) class AvailabilityZoneRootPUT(types.BaseType): availability_zone = wtypes.wsattr(AvailabilityZonePUT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/flavor_profile.py0000664000175000017500000000472500000000000022457 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types class BaseFlavorProfileType(types.BaseType): _type_to_model_map = {} _child_map = {} class FlavorProfileResponse(BaseFlavorProfileType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) provider_name = wtypes.wsattr(wtypes.StringType()) flavor_data = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): flavorprofile = super(FlavorProfileResponse, cls).from_data_model( data_model, children=children) return flavorprofile class FlavorProfileRootResponse(types.BaseType): flavorprofile = wtypes.wsattr(FlavorProfileResponse) class FlavorProfilesRootResponse(types.BaseType): flavorprofiles = wtypes.wsattr([FlavorProfileResponse]) flavorprofile_links = wtypes.wsattr([types.PageType]) class FlavorProfilePOST(BaseFlavorProfileType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) provider_name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) flavor_data = wtypes.wsattr(wtypes.StringType(max_length=4096), mandatory=True) class FlavorProfileRootPOST(types.BaseType): flavorprofile = wtypes.wsattr(FlavorProfilePOST) class FlavorProfilePUT(BaseFlavorProfileType): """Defines the attributes of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) provider_name = wtypes.wsattr(wtypes.StringType(max_length=255)) flavor_data = wtypes.wsattr(wtypes.StringType(max_length=4096)) class FlavorProfileRootPUT(types.BaseType): flavorprofile = wtypes.wsattr(FlavorProfilePUT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/flavors.py0000664000175000017500000000441000000000000021111 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types class BaseFlavorType(types.BaseType): _type_to_model_map = {} _child_map = {} class FlavorResponse(BaseFlavorType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) description = wtypes.wsattr(wtypes.StringType()) enabled = wtypes.wsattr(bool) flavor_profile_id = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): flavor = super(FlavorResponse, cls).from_data_model( data_model, children=children) return flavor class FlavorRootResponse(types.BaseType): flavor = wtypes.wsattr(FlavorResponse) class FlavorsRootResponse(types.BaseType): flavors = wtypes.wsattr([FlavorResponse]) flavors_links = wtypes.wsattr([types.PageType]) class FlavorPOST(BaseFlavorType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) description = wtypes.wsattr(wtypes.StringType(max_length=255)) enabled = wtypes.wsattr(bool, default=True) flavor_profile_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) class FlavorRootPOST(types.BaseType): flavor = wtypes.wsattr(FlavorPOST) class FlavorPUT(BaseFlavorType): """Defines the attributes of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) enabled = wtypes.wsattr(bool) class FlavorRootPUT(types.BaseType): flavor = wtypes.wsattr(FlavorPUT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/health_monitor.py0000664000175000017500000001736300000000000022464 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.common import constants class BaseHealthMonitorType(types.BaseType): _type_to_model_map = {'admin_state_up': 'enabled', 'max_retries': 'rise_threshold', 'max_retries_down': 'fall_threshold'} _child_map = {} class HealthMonitorResponse(BaseHealthMonitorType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) type = wtypes.wsattr(wtypes.text) delay = wtypes.wsattr(wtypes.IntegerType()) timeout = wtypes.wsattr(wtypes.IntegerType()) max_retries = wtypes.wsattr(wtypes.IntegerType()) max_retries_down = wtypes.wsattr(wtypes.IntegerType()) http_method = wtypes.wsattr(wtypes.text) url_path = wtypes.wsattr(wtypes.text) expected_codes = wtypes.wsattr(wtypes.text) admin_state_up = wtypes.wsattr(bool) project_id = wtypes.wsattr(wtypes.StringType()) pools = wtypes.wsattr([types.IdOnlyType]) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) http_version = wtypes.wsattr(float) domain_name = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): healthmonitor = super(HealthMonitorResponse, cls).from_data_model( data_model, children=children) if cls._full_response(): del healthmonitor.pools else: healthmonitor.pools = [ types.IdOnlyType.from_data_model(data_model.pool)] return healthmonitor class HealthMonitorFullResponse(HealthMonitorResponse): @classmethod def _full_response(cls): return True class HealthMonitorRootResponse(types.BaseType): healthmonitor = wtypes.wsattr(HealthMonitorResponse) class HealthMonitorsRootResponse(types.BaseType): healthmonitors = wtypes.wsattr([HealthMonitorResponse]) healthmonitors_links = wtypes.wsattr([types.PageType]) class HealthMonitorPOST(BaseHealthMonitorType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_TYPES), mandatory=True) delay = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) timeout = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) max_retries_down = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, maximum=constants.MAX_HM_RETRIES), default=constants.DEFAULT_MAX_RETRIES_DOWN) max_retries = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, maximum=constants.MAX_HM_RETRIES), mandatory=True) http_method = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS)) url_path = wtypes.wsattr( types.URLPathType()) expected_codes = wtypes.wsattr( wtypes.StringType(pattern=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$')) admin_state_up = wtypes.wsattr(bool, default=True) # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) pool_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) http_version = wtypes.wsattr( wtypes.Enum(float, *constants.SUPPORTED_HTTP_VERSIONS)) domain_name = wtypes.wsattr( wtypes.StringType(min_length=1, max_length=255, pattern=constants.DOMAIN_NAME_REGEX)) class HealthMonitorRootPOST(types.BaseType): healthmonitor = wtypes.wsattr(HealthMonitorPOST) class HealthMonitorPUT(BaseHealthMonitorType): """Defines attributes that are acceptable of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) delay = wtypes.wsattr(wtypes.IntegerType(minimum=0)) timeout = wtypes.wsattr(wtypes.IntegerType(minimum=0)) max_retries_down = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, maximum=constants.MAX_HM_RETRIES)) max_retries = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, maximum=constants.MAX_HM_RETRIES)) http_method = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS)) url_path = wtypes.wsattr(types.URLPathType()) expected_codes = wtypes.wsattr( wtypes.StringType(pattern=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$')) admin_state_up = wtypes.wsattr(bool) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) http_version = wtypes.wsattr( wtypes.Enum(float, *constants.SUPPORTED_HTTP_VERSIONS)) domain_name = wtypes.wsattr( wtypes.StringType(min_length=1, max_length=255, pattern=constants.DOMAIN_NAME_REGEX)) class HealthMonitorRootPUT(types.BaseType): healthmonitor = wtypes.wsattr(HealthMonitorPUT) class HealthMonitorSingleCreate(BaseHealthMonitorType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_TYPES), mandatory=True) delay = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) timeout = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) max_retries_down = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, maximum=constants.MAX_HM_RETRIES), default=constants.DEFAULT_MAX_RETRIES_DOWN) max_retries = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, maximum=constants.MAX_HM_RETRIES), mandatory=True) http_method = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS)) url_path = wtypes.wsattr(types.URLPathType()) expected_codes = wtypes.wsattr( wtypes.StringType(pattern=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$')) admin_state_up = wtypes.wsattr(bool, default=True) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) http_version = wtypes.wsattr( wtypes.Enum(float, *constants.SUPPORTED_HTTP_VERSIONS)) domain_name = wtypes.wsattr( wtypes.StringType(min_length=1, max_length=255, pattern=constants.DOMAIN_NAME_REGEX)) class HealthMonitorStatusResponse(BaseHealthMonitorType): """Defines which attributes are to be shown on status response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) type = wtypes.wsattr(wtypes.text) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/l7policy.py0000664000175000017500000001372200000000000021205 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.api.v2.types import l7rule from octavia.api.v2.types import pool from octavia.common import constants class BaseL7PolicyType(types.BaseType): _type_to_model_map = {'admin_state_up': 'enabled'} _child_map = {} class L7PolicyResponse(BaseL7PolicyType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) description = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) admin_state_up = wtypes.wsattr(bool) project_id = wtypes.wsattr(wtypes.StringType()) action = wtypes.wsattr(wtypes.StringType()) listener_id = wtypes.wsattr(wtypes.UuidType()) redirect_pool_id = wtypes.wsattr(wtypes.UuidType()) redirect_url = wtypes.wsattr(wtypes.StringType()) redirect_prefix = wtypes.wsattr(wtypes.StringType()) position = wtypes.wsattr(wtypes.IntegerType()) rules = wtypes.wsattr([types.IdOnlyType]) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) redirect_http_code = wtypes.wsattr(wtypes.IntegerType()) @classmethod def from_data_model(cls, data_model, children=False): policy = super(L7PolicyResponse, cls).from_data_model( data_model, children=children) if cls._full_response(): rule_model = l7rule.L7RuleFullResponse else: rule_model = types.IdOnlyType policy.rules = [ rule_model.from_data_model(i) for i in data_model.l7rules] return policy class L7PolicyFullResponse(L7PolicyResponse): @classmethod def _full_response(cls): return True rules = wtypes.wsattr([l7rule.L7RuleFullResponse]) class L7PolicyRootResponse(types.BaseType): l7policy = wtypes.wsattr(L7PolicyResponse) class L7PoliciesRootResponse(types.BaseType): l7policies = wtypes.wsattr([L7PolicyResponse]) l7policies_links = wtypes.wsattr([types.PageType]) class L7PolicyPOST(BaseL7PolicyType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) action = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7POLICY_ACTIONS), mandatory=True) redirect_pool_id = wtypes.wsattr(wtypes.UuidType()) redirect_url = wtypes.wsattr(types.URLType()) redirect_prefix = wtypes.wsattr(types.URLType()) position = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_POLICY_POSITION, maximum=constants.MAX_POLICY_POSITION), default=constants.MAX_POLICY_POSITION) listener_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) rules = wtypes.wsattr([l7rule.L7RuleSingleCreate]) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) redirect_http_code = wtypes.wsattr( wtypes.Enum(int, *constants.SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES)) class L7PolicyRootPOST(types.BaseType): l7policy = wtypes.wsattr(L7PolicyPOST) class L7PolicyPUT(BaseL7PolicyType): """Defines attributes that are acceptable of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool) action = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7POLICY_ACTIONS)) redirect_pool_id = wtypes.wsattr(wtypes.UuidType()) redirect_url = wtypes.wsattr(types.URLType()) redirect_prefix = wtypes.wsattr(types.URLType()) position = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_POLICY_POSITION, maximum=constants.MAX_POLICY_POSITION)) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) redirect_http_code = wtypes.wsattr( wtypes.Enum(int, *constants.SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES)) class L7PolicyRootPUT(types.BaseType): l7policy = wtypes.wsattr(L7PolicyPUT) class L7PolicySingleCreate(BaseL7PolicyType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) action = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7POLICY_ACTIONS), mandatory=True) redirect_pool = wtypes.wsattr(pool.PoolSingleCreate) redirect_url = wtypes.wsattr(types.URLType()) redirect_prefix = wtypes.wsattr(types.URLType()) position = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_POLICY_POSITION, maximum=constants.MAX_POLICY_POSITION), default=constants.MAX_POLICY_POSITION) rules = wtypes.wsattr([l7rule.L7RuleSingleCreate]) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) redirect_http_code = wtypes.wsattr( wtypes.Enum(int, *constants.SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/l7rule.py0000664000175000017500000001035100000000000020650 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.common import constants class BaseL7Type(types.BaseType): _type_to_model_map = {'admin_state_up': 'enabled'} _child_map = {} class L7RuleResponse(BaseL7Type): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) type = wtypes.wsattr(wtypes.StringType()) compare_type = wtypes.wsattr(wtypes.StringType()) key = wtypes.wsattr(wtypes.StringType()) value = wtypes.wsattr(wtypes.StringType()) invert = wtypes.wsattr(bool) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) project_id = wtypes.wsattr(wtypes.StringType()) admin_state_up = wtypes.wsattr(bool) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) @classmethod def from_data_model(cls, data_model, children=False): rule = super(L7RuleResponse, cls).from_data_model( data_model, children=children) return rule class L7RuleFullResponse(L7RuleResponse): @classmethod def _full_response(cls): return True class L7RuleRootResponse(types.BaseType): rule = wtypes.wsattr(L7RuleResponse) class L7RulesRootResponse(types.BaseType): rules = wtypes.wsattr([L7RuleResponse]) rules_links = wtypes.wsattr([types.PageType]) class L7RulePOST(BaseL7Type): """Defines mandatory and optional attributes of a POST request.""" type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7RULE_TYPES), mandatory=True) compare_type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7RULE_COMPARE_TYPES), mandatory=True) key = wtypes.wsattr(wtypes.StringType(max_length=255)) value = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) invert = wtypes.wsattr(bool, default=False) admin_state_up = wtypes.wsattr(bool, default=True) # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) class L7RuleRootPOST(types.BaseType): rule = wtypes.wsattr(L7RulePOST) class L7RulePUT(BaseL7Type): """Defines attributes that are acceptable of a PUT request.""" type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7RULE_TYPES)) compare_type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7RULE_COMPARE_TYPES)) key = wtypes.wsattr(wtypes.StringType(max_length=255)) value = wtypes.wsattr(wtypes.StringType(max_length=255)) invert = wtypes.wsattr(bool) admin_state_up = wtypes.wsattr(bool) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) class L7RuleRootPUT(types.BaseType): rule = wtypes.wsattr(L7RulePUT) class L7RuleSingleCreate(BaseL7Type): """Defines mandatory and optional attributes of a POST request.""" type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7RULE_TYPES), mandatory=True) compare_type = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_L7RULE_COMPARE_TYPES), mandatory=True) key = wtypes.wsattr(wtypes.StringType(max_length=255)) value = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) invert = wtypes.wsattr(bool, default=False) admin_state_up = wtypes.wsattr(bool, default=True) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/listener.py0000664000175000017500000003002300000000000021261 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.api.v2.types import l7policy from octavia.api.v2.types import pool from octavia.common import constants class BaseListenerType(types.BaseType): _type_to_model_map = { 'admin_state_up': 'enabled', 'default_tls_container_ref': 'tls_certificate_id', 'sni_container_refs': 'sni_containers', 'client_ca_tls_container_ref': 'client_ca_tls_certificate_id', 'client_crl_container_ref': 'client_crl_container_id'} _child_map = {} class ListenerResponse(BaseListenerType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) description = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) admin_state_up = wtypes.wsattr(bool) protocol = wtypes.wsattr(wtypes.text) protocol_port = wtypes.wsattr(wtypes.IntegerType()) connection_limit = wtypes.wsattr(wtypes.IntegerType()) default_tls_container_ref = wtypes.wsattr(wtypes.StringType()) sni_container_refs = [wtypes.StringType()] project_id = wtypes.wsattr(wtypes.StringType()) default_pool_id = wtypes.wsattr(wtypes.UuidType()) l7policies = wtypes.wsattr([types.IdOnlyType]) insert_headers = wtypes.wsattr(wtypes.DictType(str, str)) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) loadbalancers = wtypes.wsattr([types.IdOnlyType]) timeout_client_data = wtypes.wsattr(wtypes.IntegerType()) timeout_member_connect = wtypes.wsattr(wtypes.IntegerType()) timeout_member_data = wtypes.wsattr(wtypes.IntegerType()) timeout_tcp_inspect = wtypes.wsattr(wtypes.IntegerType()) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) client_ca_tls_container_ref = wtypes.StringType() client_authentication = wtypes.wsattr(wtypes.StringType()) client_crl_container_ref = wtypes.wsattr(wtypes.StringType()) allowed_cidrs = wtypes.wsattr([types.CidrType()]) tls_ciphers = wtypes.StringType() @classmethod def from_data_model(cls, data_model, children=False): listener = super(ListenerResponse, cls).from_data_model( data_model, children=children) listener.sni_container_refs = [ sni_c.tls_container_id for sni_c in data_model.sni_containers] listener.allowed_cidrs = [ c.cidr for c in data_model.allowed_cidrs] or None if cls._full_response(): del listener.loadbalancers l7policy_type = l7policy.L7PolicyFullResponse else: listener.loadbalancers = [ types.IdOnlyType.from_data_model(data_model.load_balancer)] l7policy_type = types.IdOnlyType listener.l7policies = [ l7policy_type.from_data_model(i) for i in data_model.l7policies] return listener class ListenerFullResponse(ListenerResponse): @classmethod def _full_response(cls): return True l7policies = wtypes.wsattr([l7policy.L7PolicyFullResponse]) class ListenerRootResponse(types.BaseType): listener = wtypes.wsattr(ListenerResponse) class ListenersRootResponse(types.BaseType): listeners = wtypes.wsattr([ListenerResponse]) listeners_links = wtypes.wsattr([types.PageType]) class ListenerPOST(BaseListenerType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS), mandatory=True) protocol_port = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), mandatory=True) connection_limit = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_CONNECTION_LIMIT), default=constants.DEFAULT_CONNECTION_LIMIT) default_tls_container_ref = wtypes.wsattr( wtypes.StringType(max_length=255)) sni_container_refs = [wtypes.StringType(max_length=255)] # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) default_pool_id = wtypes.wsattr(wtypes.UuidType()) default_pool = wtypes.wsattr(pool.PoolSingleCreate) l7policies = wtypes.wsattr([l7policy.L7PolicySingleCreate], default=[]) insert_headers = wtypes.wsattr( wtypes.DictType(str, wtypes.StringType(max_length=255))) loadbalancer_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) timeout_client_data = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_member_connect = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_member_data = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_tcp_inspect = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) client_ca_tls_container_ref = wtypes.StringType(max_length=255) client_authentication = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES), default=constants.CLIENT_AUTH_NONE) client_crl_container_ref = wtypes.StringType(max_length=255) allowed_cidrs = wtypes.wsattr([types.CidrType()]) tls_ciphers = wtypes.StringType(max_length=2048) class ListenerRootPOST(types.BaseType): listener = wtypes.wsattr(ListenerPOST) class ListenerPUT(BaseListenerType): """Defines attributes that are acceptable of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool) connection_limit = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_CONNECTION_LIMIT)) default_tls_container_ref = wtypes.wsattr( wtypes.StringType(max_length=255)) sni_container_refs = [wtypes.StringType(max_length=255)] default_pool_id = wtypes.wsattr(wtypes.UuidType()) insert_headers = wtypes.wsattr( wtypes.DictType(str, wtypes.StringType(max_length=255))) timeout_client_data = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_member_connect = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_member_data = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_tcp_inspect = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) client_ca_tls_container_ref = wtypes.StringType(max_length=255) client_authentication = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES)) client_crl_container_ref = wtypes.StringType(max_length=255) allowed_cidrs = wtypes.wsattr([types.CidrType()]) tls_ciphers = wtypes.StringType(max_length=2048) class ListenerRootPUT(types.BaseType): listener = wtypes.wsattr(ListenerPUT) class ListenerSingleCreate(BaseListenerType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS), mandatory=True) protocol_port = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), mandatory=True) connection_limit = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_CONNECTION_LIMIT), default=constants.DEFAULT_CONNECTION_LIMIT) default_tls_container_ref = wtypes.wsattr( wtypes.StringType(max_length=255)) sni_container_refs = [wtypes.StringType(max_length=255)] default_pool_id = wtypes.wsattr(wtypes.UuidType()) default_pool = wtypes.wsattr(pool.PoolSingleCreate) l7policies = wtypes.wsattr([l7policy.L7PolicySingleCreate], default=[]) insert_headers = wtypes.wsattr( wtypes.DictType(str, wtypes.StringType(max_length=255))) timeout_client_data = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_member_connect = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_member_data = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) timeout_tcp_inspect = wtypes.wsattr( wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, maximum=constants.MAX_TIMEOUT)) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) client_ca_tls_container_ref = wtypes.StringType(max_length=255) client_authentication = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES), default=constants.CLIENT_AUTH_NONE) client_crl_container_ref = wtypes.StringType(max_length=255) allowed_cidrs = wtypes.wsattr([types.CidrType()]) tls_ciphers = wtypes.StringType(max_length=2048) class ListenerStatusResponse(BaseListenerType): """Defines which attributes are to be shown on status response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) pools = wtypes.wsattr([pool.PoolStatusResponse]) @classmethod def from_data_model(cls, data_model, children=False): listener = super(ListenerStatusResponse, cls).from_data_model( data_model, children=children) pool_model = pool.PoolStatusResponse listener.pools = [ pool_model.from_data_model(i) for i in data_model.pools] if not listener.name: listener.name = "" return listener class ListenerStatisticsResponse(BaseListenerType): """Defines which attributes are to show on stats response.""" bytes_in = wtypes.wsattr(wtypes.IntegerType()) bytes_out = wtypes.wsattr(wtypes.IntegerType()) active_connections = wtypes.wsattr(wtypes.IntegerType()) total_connections = wtypes.wsattr(wtypes.IntegerType()) request_errors = wtypes.wsattr(wtypes.IntegerType()) @classmethod def from_data_model(cls, data_model, children=False): result = super(ListenerStatisticsResponse, cls).from_data_model( data_model, children=children) return result class StatisticsRootResponse(types.BaseType): stats = wtypes.wsattr(ListenerStatisticsResponse) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/load_balancer.py0000664000175000017500000001674300000000000022217 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.api.v2.types import listener from octavia.api.v2.types import pool class BaseLoadBalancerType(types.BaseType): _type_to_model_map = {'vip_address': 'vip.ip_address', 'vip_subnet_id': 'vip.subnet_id', 'vip_port_id': 'vip.port_id', 'vip_network_id': 'vip.network_id', 'vip_qos_policy_id': 'vip.qos_policy_id', 'admin_state_up': 'enabled'} _child_map = {'vip': { 'ip_address': 'vip_address', 'subnet_id': 'vip_subnet_id', 'port_id': 'vip_port_id', 'network_id': 'vip_network_id', 'qos_policy_id': 'vip_qos_policy_id'}} class LoadBalancerResponse(BaseLoadBalancerType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) description = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) admin_state_up = wtypes.wsattr(bool) project_id = wtypes.wsattr(wtypes.StringType()) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) vip_address = wtypes.wsattr(types.IPAddressType()) vip_port_id = wtypes.wsattr(wtypes.UuidType()) vip_subnet_id = wtypes.wsattr(wtypes.UuidType()) vip_network_id = wtypes.wsattr(wtypes.UuidType()) listeners = wtypes.wsattr([types.IdOnlyType]) pools = wtypes.wsattr([types.IdOnlyType]) provider = wtypes.wsattr(wtypes.StringType()) flavor_id = wtypes.wsattr(wtypes.UuidType()) vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) availability_zone = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): result = super(LoadBalancerResponse, cls).from_data_model( data_model, children=children) if data_model.vip: result.vip_subnet_id = data_model.vip.subnet_id result.vip_port_id = data_model.vip.port_id result.vip_address = data_model.vip.ip_address result.vip_network_id = data_model.vip.network_id result.vip_qos_policy_id = data_model.vip.qos_policy_id if cls._full_response(): listener_model = listener.ListenerFullResponse pool_model = pool.PoolFullResponse else: listener_model = types.IdOnlyType pool_model = types.IdOnlyType result.listeners = [ listener_model.from_data_model(i) for i in data_model.listeners] result.pools = [ pool_model.from_data_model(i) for i in data_model.pools] if not result.provider: result.provider = "octavia" return result class LoadBalancerFullResponse(LoadBalancerResponse): @classmethod def _full_response(cls): return True listeners = wtypes.wsattr([listener.ListenerFullResponse]) pools = wtypes.wsattr([pool.PoolFullResponse]) class LoadBalancerRootResponse(types.BaseType): loadbalancer = wtypes.wsattr(LoadBalancerResponse) class LoadBalancerFullRootResponse(LoadBalancerRootResponse): loadbalancer = wtypes.wsattr(LoadBalancerFullResponse) class LoadBalancersRootResponse(types.BaseType): loadbalancers = wtypes.wsattr([LoadBalancerResponse]) loadbalancers_links = wtypes.wsattr([types.PageType]) class LoadBalancerPOST(BaseLoadBalancerType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) vip_address = wtypes.wsattr(types.IPAddressType()) vip_port_id = wtypes.wsattr(wtypes.UuidType()) vip_subnet_id = wtypes.wsattr(wtypes.UuidType()) vip_network_id = wtypes.wsattr(wtypes.UuidType()) vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) listeners = wtypes.wsattr([listener.ListenerSingleCreate], default=[]) pools = wtypes.wsattr([pool.PoolSingleCreate], default=[]) provider = wtypes.wsattr(wtypes.StringType(max_length=64)) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) flavor_id = wtypes.wsattr(wtypes.UuidType()) availability_zone = wtypes.wsattr(wtypes.StringType(max_length=255)) class LoadBalancerRootPOST(types.BaseType): loadbalancer = wtypes.wsattr(LoadBalancerPOST) class LoadBalancerPUT(BaseLoadBalancerType): """Defines attributes that are acceptable of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) admin_state_up = wtypes.wsattr(bool) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) class LoadBalancerRootPUT(types.BaseType): loadbalancer = wtypes.wsattr(LoadBalancerPUT) class LoadBalancerStatusResponse(BaseLoadBalancerType): """Defines which attributes are to be shown on status response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) listeners = wtypes.wsattr([listener.ListenerStatusResponse]) @classmethod def from_data_model(cls, data_model, children=False): result = super(LoadBalancerStatusResponse, cls).from_data_model( data_model, children=children) listener_model = listener.ListenerStatusResponse result.listeners = [ listener_model.from_data_model(i) for i in data_model.listeners] if not result.name: result.name = "" return result class StatusResponse(wtypes.Base): loadbalancer = wtypes.wsattr(LoadBalancerStatusResponse) class StatusRootResponse(types.BaseType): statuses = wtypes.wsattr(StatusResponse) class LoadBalancerStatisticsResponse(BaseLoadBalancerType): """Defines which attributes are to show on stats response.""" bytes_in = wtypes.wsattr(wtypes.IntegerType()) bytes_out = wtypes.wsattr(wtypes.IntegerType()) active_connections = wtypes.wsattr(wtypes.IntegerType()) total_connections = wtypes.wsattr(wtypes.IntegerType()) request_errors = wtypes.wsattr(wtypes.IntegerType()) @classmethod def from_data_model(cls, data_model, children=False): result = super(LoadBalancerStatisticsResponse, cls).from_data_model( data_model, children=children) return result class StatisticsRootResponse(types.BaseType): stats = wtypes.wsattr(LoadBalancerStatisticsResponse) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/member.py0000664000175000017500000001360500000000000020712 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.common import constants class BaseMemberType(types.BaseType): _type_to_model_map = {'admin_state_up': 'enabled', 'address': 'ip_address'} _child_map = {} class MemberResponse(BaseMemberType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) admin_state_up = wtypes.wsattr(bool) address = wtypes.wsattr(types.IPAddressType()) protocol_port = wtypes.wsattr(wtypes.IntegerType()) weight = wtypes.wsattr(wtypes.IntegerType()) backup = wtypes.wsattr(bool) subnet_id = wtypes.wsattr(wtypes.UuidType()) project_id = wtypes.wsattr(wtypes.StringType()) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) monitor_address = wtypes.wsattr(types.IPAddressType()) monitor_port = wtypes.wsattr(wtypes.IntegerType()) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) @classmethod def from_data_model(cls, data_model, children=False): member = super(MemberResponse, cls).from_data_model( data_model, children=children) return member class MemberFullResponse(MemberResponse): @classmethod def _full_response(cls): return True class MemberRootResponse(types.BaseType): member = wtypes.wsattr(MemberResponse) class MembersRootResponse(types.BaseType): members = wtypes.wsattr([MemberResponse]) members_links = wtypes.wsattr([types.PageType]) class MemberPOST(BaseMemberType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) address = wtypes.wsattr(types.IPAddressType(), mandatory=True) protocol_port = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), mandatory=True) weight = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_WEIGHT, maximum=constants.MAX_WEIGHT), default=constants.DEFAULT_WEIGHT) backup = wtypes.wsattr(bool, default=False) subnet_id = wtypes.wsattr(wtypes.UuidType()) # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) monitor_port = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), default=None) monitor_address = wtypes.wsattr(types.IPAddressType(), default=None) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) class MemberRootPOST(types.BaseType): member = wtypes.wsattr(MemberPOST) class MemberPUT(BaseMemberType): """Defines attributes that are acceptable of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool) weight = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_WEIGHT, maximum=constants.MAX_WEIGHT)) backup = wtypes.wsattr(bool) monitor_port = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER)) monitor_address = wtypes.wsattr(types.IPAddressType()) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) class MemberRootPUT(types.BaseType): member = wtypes.wsattr(MemberPUT) class MembersRootPUT(types.BaseType): members = wtypes.wsattr([MemberPOST]) class MemberSingleCreate(BaseMemberType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) address = wtypes.wsattr(types.IPAddressType(), mandatory=True) protocol_port = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), mandatory=True) weight = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_WEIGHT, maximum=constants.MAX_WEIGHT), default=constants.DEFAULT_WEIGHT) backup = wtypes.wsattr(bool, default=False) subnet_id = wtypes.wsattr(wtypes.UuidType()) monitor_port = wtypes.wsattr(wtypes.IntegerType( minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER)) monitor_address = wtypes.wsattr(types.IPAddressType()) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) class MemberStatusResponse(BaseMemberType): """Defines which attributes are to be shown on status response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) address = wtypes.wsattr(types.IPAddressType()) protocol_port = wtypes.wsattr(wtypes.IntegerType()) @classmethod def from_data_model(cls, data_model, children=False): member = super(MemberStatusResponse, cls).from_data_model( data_model, children=children) if not member.name: member.name = "" return member ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/pool.py0000664000175000017500000002360300000000000020413 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.api.v2.types import health_monitor from octavia.api.v2.types import member from octavia.common import constants class SessionPersistenceResponse(types.BaseType): """Defines which attributes are to be shown on any response.""" type = wtypes.wsattr(wtypes.text) cookie_name = wtypes.wsattr(wtypes.text) persistence_timeout = wtypes.wsattr(wtypes.IntegerType()) persistence_granularity = wtypes.wsattr(types.IPAddressType()) class SessionPersistencePOST(types.BaseType): """Defines mandatory and optional attributes of a POST request.""" type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES), mandatory=True) cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255), default=None) persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None) persistence_granularity = wtypes.wsattr(types.IPAddressType(), default=None) class SessionPersistencePUT(types.BaseType): """Defines attributes that are acceptable of a PUT request.""" type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES)) cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255), default=None) persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None) persistence_granularity = wtypes.wsattr(types.IPAddressType(), default=None) class BasePoolType(types.BaseType): _type_to_model_map = {'admin_state_up': 'enabled', 'healthmonitor': 'health_monitor', 'healthmonitor_id': 'health_monitor.id', 'tls_container_ref': 'tls_certificate_id', 'ca_tls_container_ref': 'ca_tls_certificate_id', 'crl_container_ref': 'crl_container_id'} _child_map = {'health_monitor': {'id': 'healthmonitor_id'}} class PoolResponse(BasePoolType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) description = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) admin_state_up = wtypes.wsattr(bool) protocol = wtypes.wsattr(wtypes.text) lb_algorithm = wtypes.wsattr(wtypes.text) session_persistence = wtypes.wsattr(SessionPersistenceResponse) project_id = wtypes.wsattr(wtypes.StringType()) loadbalancers = wtypes.wsattr([types.IdOnlyType]) listeners = wtypes.wsattr([types.IdOnlyType]) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) healthmonitor_id = wtypes.wsattr(wtypes.UuidType()) members = wtypes.wsattr([types.IdOnlyType]) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) tls_container_ref = wtypes.wsattr(wtypes.StringType()) ca_tls_container_ref = wtypes.wsattr(wtypes.StringType()) crl_container_ref = wtypes.wsattr(wtypes.StringType()) tls_enabled = wtypes.wsattr(bool) tls_ciphers = wtypes.StringType() @classmethod def from_data_model(cls, data_model, children=False): pool = super(PoolResponse, cls).from_data_model( data_model, children=children) if data_model.session_persistence: pool.session_persistence = ( SessionPersistenceResponse.from_data_model( data_model.session_persistence)) if cls._full_response(): del pool.loadbalancers member_model = member.MemberFullResponse if pool.healthmonitor: pool.healthmonitor = ( health_monitor.HealthMonitorFullResponse .from_data_model(data_model.health_monitor)) else: if data_model.load_balancer: pool.loadbalancers = [ types.IdOnlyType.from_data_model(data_model.load_balancer)] else: pool.loadbalancers = [] member_model = types.IdOnlyType if data_model.health_monitor: pool.healthmonitor_id = data_model.health_monitor.id pool.listeners = [ types.IdOnlyType.from_data_model(i) for i in data_model.listeners] pool.members = [ member_model.from_data_model(i) for i in data_model.members] return pool class PoolFullResponse(PoolResponse): @classmethod def _full_response(cls): return True members = wtypes.wsattr([member.MemberFullResponse]) healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorFullResponse) class PoolRootResponse(types.BaseType): pool = wtypes.wsattr(PoolResponse) class PoolsRootResponse(types.BaseType): pools = wtypes.wsattr([PoolResponse]) pools_links = wtypes.wsattr([types.PageType]) class PoolPOST(BasePoolType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) listener_id = wtypes.wsattr(wtypes.UuidType()) loadbalancer_id = wtypes.wsattr(wtypes.UuidType()) protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS), mandatory=True) lb_algorithm = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS), mandatory=True) session_persistence = wtypes.wsattr(SessionPersistencePOST) # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate) members = wtypes.wsattr([member.MemberSingleCreate]) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) tls_container_ref = wtypes.wsattr( wtypes.StringType(max_length=255)) ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) tls_enabled = wtypes.wsattr(bool, default=False) tls_ciphers = wtypes.StringType(max_length=2048) class PoolRootPOST(types.BaseType): pool = wtypes.wsattr(PoolPOST) class PoolPUT(BasePoolType): """Defines attributes that are acceptable of a PUT request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool) lb_algorithm = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS)) session_persistence = wtypes.wsattr(SessionPersistencePUT) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) tls_enabled = wtypes.wsattr(bool) tls_ciphers = wtypes.StringType(max_length=2048) class PoolRootPut(types.BaseType): pool = wtypes.wsattr(PoolPUT) class PoolSingleCreate(BasePoolType): """Defines mandatory and optional attributes of a POST request.""" name = wtypes.wsattr(wtypes.StringType(max_length=255)) description = wtypes.wsattr(wtypes.StringType(max_length=255)) admin_state_up = wtypes.wsattr(bool, default=True) protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS)) lb_algorithm = wtypes.wsattr( wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS)) session_persistence = wtypes.wsattr(SessionPersistencePOST) healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate) members = wtypes.wsattr([member.MemberSingleCreate]) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) tls_enabled = wtypes.wsattr(bool, default=False) tls_ciphers = wtypes.StringType(max_length=2048) class PoolStatusResponse(BasePoolType): """Defines which attributes are to be shown on status response.""" id = wtypes.wsattr(wtypes.UuidType()) name = wtypes.wsattr(wtypes.StringType()) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) health_monitor = wtypes.wsattr( health_monitor.HealthMonitorStatusResponse) members = wtypes.wsattr([member.MemberStatusResponse]) @classmethod def from_data_model(cls, data_model, children=False): pool = super(PoolStatusResponse, cls).from_data_model( data_model, children=children) member_model = member.MemberStatusResponse if data_model.health_monitor: pool.health_monitor = ( health_monitor.HealthMonitorStatusResponse.from_data_model( data_model.health_monitor)) pool.members = [ member_model.from_data_model(i) for i in data_model.members] return pool ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/provider.py0000664000175000017500000000220700000000000021271 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types class ProviderResponse(types.BaseType): name = wtypes.wsattr(wtypes.StringType()) description = wtypes.wsattr(wtypes.StringType()) class ProvidersRootResponse(types.BaseType): providers = wtypes.wsattr([ProviderResponse]) class FlavorCapabilitiesResponse(types.BaseType): flavor_capabilities = wtypes.wsattr([ProviderResponse]) class AvailabilityZoneCapabilitiesResponse(types.BaseType): availability_zone_capabilities = wtypes.wsattr([ProviderResponse]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/api/v2/types/quotas.py0000664000175000017500000001002400000000000020747 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types as base from octavia.common import constants as consts class QuotaBase(base.BaseType): """Individual quota definitions.""" loadbalancer = wtypes.wsattr(wtypes.IntegerType( minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) # Misspelled version, deprecated in Rocky load_balancer = wtypes.wsattr(wtypes.IntegerType( minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) listener = wtypes.wsattr(wtypes.IntegerType( minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) member = wtypes.wsattr(wtypes.IntegerType( minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) pool = wtypes.wsattr(wtypes.IntegerType( minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) healthmonitor = wtypes.wsattr(wtypes.IntegerType( minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) # Misspelled version, deprecated in Rocky health_monitor = wtypes.wsattr(wtypes.IntegerType( minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) def to_dict(self, render_unsets=False): quota_dict = super(QuotaBase, self).to_dict(render_unsets) if 'loadbalancer' in quota_dict: quota_dict['load_balancer'] = quota_dict.pop('loadbalancer') if 'healthmonitor' in quota_dict: quota_dict['health_monitor'] = quota_dict.pop('healthmonitor') return quota_dict class QuotaResponse(base.BaseType): """Wrapper object for quotas responses.""" quota = wtypes.wsattr(QuotaBase) @classmethod def from_data_model(cls, data_model, children=False): quotas = super(QuotaResponse, cls).from_data_model( data_model, children=children) quotas.quota = QuotaBase.from_data_model(data_model) return quotas class QuotaAllBase(base.BaseType): """Wrapper object for get all quotas responses.""" project_id = wtypes.wsattr(wtypes.StringType()) loadbalancer = wtypes.wsattr(wtypes.IntegerType()) # Misspelled version, deprecated in Rocky, remove in T load_balancer = wtypes.wsattr(wtypes.IntegerType()) listener = wtypes.wsattr(wtypes.IntegerType()) member = wtypes.wsattr(wtypes.IntegerType()) pool = wtypes.wsattr(wtypes.IntegerType()) healthmonitor = wtypes.wsattr(wtypes.IntegerType()) # Misspelled version, deprecated in Rocky, remove in T health_monitor = wtypes.wsattr(wtypes.IntegerType()) _type_to_model_map = {'loadbalancer': 'load_balancer', 'healthmonitor': 'health_monitor'} _child_map = {} @classmethod def from_data_model(cls, data_model, children=False): quotas = super(QuotaAllBase, cls).from_data_model( data_model, children=children) # For backwards compatibility, remove in T quotas.load_balancer = quotas.loadbalancer # For backwards compatibility, remove in T quotas.health_monitor = quotas.healthmonitor return quotas class QuotaAllResponse(base.BaseType): quotas = wtypes.wsattr([QuotaAllBase]) quotas_links = wtypes.wsattr([base.PageType]) @classmethod def from_data_model(cls, data_model, children=False): quotalist = QuotaAllResponse() quotalist.quotas = [ QuotaAllBase.from_data_model(obj) for obj in data_model] return quotalist class QuotaPUT(base.BaseType): """Overall object for quota PUT request.""" quota = wtypes.wsattr(QuotaBase) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/certificates/0000775000175000017500000000000000000000000017265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/__init__.py0000664000175000017500000000107400000000000021400 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/certificates/common/0000775000175000017500000000000000000000000020555 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/common/__init__.py0000664000175000017500000000107400000000000022670 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/certificates/common/auth/0000775000175000017500000000000000000000000021516 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/common/auth/__init__.py0000664000175000017500000000107400000000000023631 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/common/auth/barbican_acl.py0000664000175000017500000000673400000000000024462 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Barbican ACL auth class for Barbican certificate handling """ from barbicanclient import client as barbican_client from keystoneauth1.identity.generic import token from keystoneauth1 import session from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from octavia.certificates.common import barbican as barbican_common from octavia.common import keystone LOG = logging.getLogger(__name__) CONF = cfg.CONF class BarbicanACLAuth(barbican_common.BarbicanAuth): _barbican_client = None @classmethod def get_barbican_client(cls, project_id=None): if not cls._barbican_client: try: ksession = keystone.KeystoneSession() cls._barbican_client = barbican_client.Client( session=ksession.get_session(), region_name=CONF.certificates.region_name, interface=CONF.certificates.endpoint_type ) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Barbican client") return cls._barbican_client @classmethod def ensure_secret_access(cls, context, ref): # get a normal session ksession = keystone.KeystoneSession() user_id = ksession.get_service_user_id() # use barbican client to set the ACLs bc = cls.get_barbican_client_user_auth(context) acl = bc.acls.get(ref) read_oper = acl.get('read') if user_id not in read_oper.users: read_oper.users.append(user_id) acl.submit() @classmethod def revoke_secret_access(cls, context, ref): # get a normal session ksession = keystone.KeystoneSession() user_id = ksession.get_service_user_id() # use barbican client to set the ACLs bc = cls.get_barbican_client_user_auth(context) acl = bc.acls.get(ref) read_oper = acl.get('read') if user_id in read_oper.users: read_oper.users.remove(user_id) acl.submit() @classmethod def get_barbican_client_user_auth(cls, context): # get a normal session ksession = keystone.KeystoneSession() service_auth = ksession.get_auth() # make our own auth and swap it in user_auth = token.Token(auth_url=service_auth.auth_url, token=context.auth_token, project_id=context.project_id) user_session = session.Session( auth=user_auth, verify=CONF.certificates.ca_certificates_file) # create a special barbican client with our user's session return barbican_client.Client( session=user_session, region_name=CONF.certificates.region_name, interface=CONF.certificates.endpoint_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/common/barbican.py0000664000175000017500000000601600000000000022673 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common classes for Barbican certificate handling """ import abc from barbicanclient.v1 import containers from oslo_utils import encodeutils from octavia.certificates.common import cert from octavia.common.tls_utils import cert_parser from octavia.i18n import _ class BarbicanCert(cert.Cert): """Representation of a Cert based on the Barbican CertificateContainer.""" def __init__(self, cert_container): if not isinstance(cert_container, containers.CertificateContainer): raise TypeError(_("Retrieved Barbican Container is not of the " "correct type (certificate).")) self._cert_container = cert_container def get_certificate(self): if self._cert_container.certificate: return encodeutils.to_utf8( self._cert_container.certificate.payload) return None def get_intermediates(self): if self._cert_container.intermediates: intermediates = encodeutils.to_utf8( self._cert_container.intermediates.payload) return list(cert_parser.get_intermediates_pems(intermediates)) return None def get_private_key(self): if self._cert_container.private_key: return encodeutils.to_utf8( self._cert_container.private_key.payload) return None def get_private_key_passphrase(self): if self._cert_container.private_key_passphrase: return encodeutils.to_utf8( self._cert_container.private_key_passphrase.payload) return None class BarbicanAuth(object, metaclass=abc.ABCMeta): @abc.abstractmethod def get_barbican_client(self, project_id): """Creates a Barbican client object. :param project_id: Project ID that the request will be used for :return: a Barbican Client object :raises Exception: if the client cannot be created """ @abc.abstractmethod def ensure_secret_access(self, context, ref): """Do whatever steps are necessary to ensure future access to a secret. :param context: pecan context object :param ref: Reference to a Barbican object """ @abc.abstractmethod def revoke_secret_access(self, context, ref): """Revoke access of Octavia keystone user to a secret. :param context: pecan context object :param ref: Reference to a Barbican object """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/common/cert.py0000664000175000017500000000232700000000000022070 0ustar00zuulzuul00000000000000# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class Cert(object, metaclass=abc.ABCMeta): """Base class to represent all certificates.""" @abc.abstractmethod def get_certificate(self): """Returns the certificate.""" @abc.abstractmethod def get_intermediates(self): """Returns the intermediate certificates as a list.""" @abc.abstractmethod def get_private_key(self): """Returns the private key for the certificate.""" @abc.abstractmethod def get_private_key_passphrase(self): """Returns the passphrase for the private key.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/common/local.py0000664000175000017500000000730300000000000022224 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common classes for local filesystem certificate handling """ import os from oslo_config import cfg from octavia.certificates.common import cert TLS_CERT_DEFAULT = os.environ.get( 'OS_OCTAVIA_TLS_CA_CERT', '/etc/ssl/certs/ssl-cert-snakeoil.pem' ) TLS_KEY_DEFAULT = os.environ.get( 'OS_OCTAVIA_TLS_CA_KEY', '/etc/ssl/private/ssl-cert-snakeoil.key' ) TLS_PKP_DEFAULT = os.environ.get('OS_OCTAVIA_CA_KEY_PASS') TLS_PASS_AMPS_DEFAULT = os.environ.get('TLS_PASS_AMPS_DEFAULT', 'insecure-key-do-not-use-this-key') TLS_DIGEST_DEFAULT = os.environ.get('OS_OCTAVIA_CA_SIGNING_DIGEST', 'sha256') TLS_STORAGE_DEFAULT = os.environ.get( 'OS_OCTAVIA_TLS_STORAGE', '/var/lib/octavia/certificates/' ) certgen_opts = [ cfg.StrOpt('ca_certificate', default=TLS_CERT_DEFAULT, help='Absolute path to the CA Certificate for signing. Defaults' ' to env[OS_OCTAVIA_TLS_CA_CERT].'), cfg.StrOpt('ca_private_key', default=TLS_KEY_DEFAULT, help='Absolute path to the Private Key for signing. Defaults' ' to env[OS_OCTAVIA_TLS_CA_KEY].'), cfg.StrOpt('ca_private_key_passphrase', default=TLS_PKP_DEFAULT, help='Passphrase for the Private Key. Defaults' ' to env[OS_OCTAVIA_CA_KEY_PASS] or None.'), cfg.StrOpt('server_certs_key_passphrase', default=TLS_PASS_AMPS_DEFAULT, help='Passphrase for encrypting Amphora Certificates and ' 'Private Keys. Must be 32, base64(url) compatible, ' 'characters long. Defaults to env[TLS_PASS_AMPS_DEFAULT] ' 'or insecure-key-do-not-use-this-key', regex=r'^[A-Za-z0-9\-_=]{32}$', required=True), cfg.StrOpt('signing_digest', default=TLS_DIGEST_DEFAULT, help='Certificate signing digest. Defaults' ' to env[OS_OCTAVIA_CA_SIGNING_DIGEST] or "sha256".'), cfg.IntOpt('cert_validity_time', default=30 * 24 * 60 * 60, help="The validity time for the Amphora Certificates " "(in seconds)."), ] certmgr_opts = [ cfg.StrOpt('storage_path', default=TLS_STORAGE_DEFAULT, help='Absolute path to the certificate storage directory. ' 'Defaults to env[OS_OCTAVIA_TLS_STORAGE].') ] class LocalCert(cert.Cert): """Representation of a Cert for local storage.""" def __init__(self, certificate, private_key, intermediates=None, private_key_passphrase=None): self.certificate = certificate self.intermediates = intermediates self.private_key = private_key self.private_key_passphrase = private_key_passphrase def get_certificate(self): return self.certificate def get_intermediates(self): return self.intermediates def get_private_key(self): return self.private_key def get_private_key_passphrase(self): return self.private_key_passphrase ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/common/pkcs12.py0000664000175000017500000000406100000000000022233 0ustar00zuulzuul00000000000000# Copyright (c) 2017 GoDaddy # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common classes for pkcs12 based certificate handling """ from cryptography.hazmat.primitives import serialization from OpenSSL import crypto from octavia.certificates.common import cert from octavia.common import exceptions class PKCS12Cert(cert.Cert): """Representation of a Cert for local storage.""" def __init__(self, certbag): try: p12 = crypto.load_pkcs12(certbag) except crypto.Error as e: raise exceptions.UnreadablePKCS12(error=str(e)) self.certificate = p12.get_certificate() self.intermediates = p12.get_ca_certificates() self.private_key = p12.get_privatekey() def get_certificate(self): return self.certificate.to_cryptography().public_bytes( encoding=serialization.Encoding.PEM).strip() def get_intermediates(self): if self.intermediates: int_data = [ ic.to_cryptography().public_bytes( encoding=serialization.Encoding.PEM).strip() for ic in self.intermediates ] return int_data return None def get_private_key(self): return self.private_key.to_cryptography_key().private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()).strip() def get_private_key_passphrase(self): return None ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/certificates/generator/0000775000175000017500000000000000000000000021253 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/generator/__init__.py0000664000175000017500000000107400000000000023366 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/generator/cert_gen.py0000664000175000017500000000362300000000000023417 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Certificate Generator API """ import abc class CertGenerator(object, metaclass=abc.ABCMeta): """Base Cert Generator Interface A Certificate Generator is responsible for generating private keys, generating CSRs, and signing TLS certificates. """ @abc.abstractmethod def sign_cert(self, csr, validity): """Generates a signed certificate from the provided CSR This call is designed to block until a signed certificate can be returned. :param csr: A Certificate Signing Request :param validity: Valid for seconds from the current time :return: PEM Encoded Signed certificate :raises Exception: If certificate signing fails """ @abc.abstractmethod def generate_cert_key_pair(self, cn, validity, bit_length, passphrase): """Generates a private key and certificate pair :param cn: Common name to use for the Certificate :param validity: Validity period for the Certificate :param bit_length: Private key bit length :param passphrase: Passphrase to use for encrypting the private key :return: octavia.certificates.common.Cert representation of the certificate data :raises Exception: If generation fails """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/generator/local.py0000664000175000017500000002264000000000000022723 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from cryptography import exceptions as crypto_exceptions from cryptography.hazmat import backends from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 from oslo_config import cfg from oslo_log import log as logging from octavia.certificates.common import local as local_common from octavia.certificates.generator import cert_gen from octavia.common import exceptions LOG = logging.getLogger(__name__) CONF = cfg.CONF class LocalCertGenerator(cert_gen.CertGenerator): """Cert Generator Interface that signs certs locally.""" @classmethod def _new_serial(cls): return int(uuid.uuid4()) @classmethod def _validate_cert(cls, ca_cert, ca_key, ca_key_pass): if not ca_cert: LOG.info("Using CA Certificate from config.") try: ca_cert = open(CONF.certificates.ca_certificate, 'rb').read() except IOError: raise exceptions.CertificateGenerationException( msg="Failed to load CA Certificate {0}." .format(CONF.certificates.ca_certificate) ) if not ca_key: LOG.info("Using CA Private Key from config.") try: ca_key = open(CONF.certificates.ca_private_key, 'rb').read() except IOError: raise exceptions.CertificateGenerationException( msg="Failed to load CA Private Key {0}." .format(CONF.certificates.ca_private_key) ) if not ca_key_pass: ca_key_pass = CONF.certificates.ca_private_key_passphrase if ca_key_pass: LOG.info("Using CA Private Key Passphrase from config.") else: LOG.info("No Passphrase found for CA Private Key, not using " "one.") @classmethod def sign_cert(cls, csr, validity, ca_cert=None, ca_key=None, ca_key_pass=None, ca_digest=None): """Signs a certificate using our private CA based on the specified CSR The signed certificate will be valid from now until seconds from now. :param csr: A Certificate Signing Request :param validity: Valid for seconds from the current time :param ca_cert: Signing Certificate (default: config) :param ca_key: Signing Certificate Key (default: config) :param ca_key_pass: Signing Certificate Key Pass (default: config) :param ca_digest: Digest method to use for signing (default: config) :return: Signed certificate :raises Exception: if certificate signing fails """ LOG.info("Signing a certificate request using OpenSSL locally.") cls._validate_cert(ca_cert, ca_key, ca_key_pass) if not ca_digest: ca_digest = CONF.certificates.signing_digest try: algorithm = getattr(hashes, ca_digest.upper())() except AttributeError: raise crypto_exceptions.UnsupportedAlgorithm( "Supplied digest method not found: %s" % ca_digest ) if not ca_cert: with open(CONF.certificates.ca_certificate, 'rb') as f: ca_cert = f.read() if not ca_key: with open(CONF.certificates.ca_private_key, 'rb') as f: ca_key = f.read() if not ca_key_pass: ca_key_pass = CONF.certificates.ca_private_key_passphrase if ca_key_pass is not None: ca_key_pass = ca_key_pass.encode('utf-8') try: lo_cert = x509.load_pem_x509_certificate( data=ca_cert, backend=backends.default_backend()) lo_key = serialization.load_pem_private_key( data=ca_key, password=ca_key_pass, backend=backends.default_backend()) lo_req = x509.load_pem_x509_csr(data=csr, backend=backends.default_backend()) new_cert = x509.CertificateBuilder() new_cert = new_cert.serial_number(cls._new_serial()) valid_from_datetime = datetime.datetime.utcnow() valid_to_datetime = (datetime.datetime.utcnow() + datetime.timedelta(seconds=validity)) new_cert = new_cert.not_valid_before(valid_from_datetime) new_cert = new_cert.not_valid_after(valid_to_datetime) new_cert = new_cert.issuer_name(lo_cert.subject) new_cert = new_cert.subject_name(lo_req.subject) new_cert = new_cert.public_key(lo_req.public_key()) new_cert = new_cert.add_extension( x509.BasicConstraints(ca=False, path_length=None), critical=True ) cn_str = lo_req.subject.get_attributes_for_oid( x509.oid.NameOID.COMMON_NAME)[0].value new_cert = new_cert.add_extension( x509.SubjectAlternativeName([x509.DNSName(cn_str)]), critical=False ) new_cert = new_cert.add_extension( x509.KeyUsage( digital_signature=True, key_encipherment=True, data_encipherment=True, key_agreement=True, content_commitment=False, key_cert_sign=False, crl_sign=False, encipher_only=False, decipher_only=False ), critical=True ) new_cert = new_cert.add_extension( x509.ExtendedKeyUsage([ x509.oid.ExtendedKeyUsageOID.SERVER_AUTH, x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH ]), critical=True ) signed_cert = new_cert.sign(private_key=lo_key, algorithm=algorithm, backend=backends.default_backend()) return signed_cert.public_bytes( encoding=serialization.Encoding.PEM) except Exception as e: LOG.error("Unable to sign certificate.") raise exceptions.CertificateGenerationException(msg=e) @classmethod def _generate_private_key(cls, bit_length=2048, passphrase=None): pk = rsa.generate_private_key( public_exponent=65537, key_size=bit_length, backend=backends.default_backend() ) if passphrase: encryption = serialization.BestAvailableEncryption(passphrase) else: encryption = serialization.NoEncryption() return pk.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=encryption, ) @classmethod def _generate_csr(cls, cn, private_key, passphrase=None): pk = serialization.load_pem_private_key( data=private_key, password=passphrase, backend=backends.default_backend()) csr = x509.CertificateSigningRequestBuilder().subject_name( x509.Name([ x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, cn), ]) ) csr = csr.add_extension( x509.BasicConstraints( ca=False, path_length=None ), critical=True ) csr = csr.add_extension( x509.KeyUsage( digital_signature=True, key_encipherment=True, data_encipherment=True, key_agreement=True, content_commitment=False, key_cert_sign=False, crl_sign=False, encipher_only=False, decipher_only=False ), critical=True ) csr = csr.add_extension( x509.SubjectAlternativeName([x509.DNSName(cn)]), critical=False ) signed_csr = csr.sign( pk, getattr(hashes, CONF.certificates.signing_digest.upper())(), backends.default_backend()) return signed_csr.public_bytes(serialization.Encoding.PEM) @classmethod def generate_cert_key_pair(cls, cn, validity, bit_length=2048, passphrase=None, **kwargs): pk = cls._generate_private_key(bit_length, passphrase) csr = cls._generate_csr(cn, pk, passphrase) cert = cls.sign_cert(csr, validity, **kwargs) cert_object = local_common.LocalCert( certificate=cert, private_key=pk, private_key_passphrase=passphrase ) return cert_object ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/certificates/manager/0000775000175000017500000000000000000000000020677 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/manager/__init__.py0000664000175000017500000000107400000000000023012 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/manager/barbican.py0000664000175000017500000001742400000000000023022 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # Copyright (c) 2017 GoDaddy # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cert manager implementation for Barbican using a single PKCS12 secret """ from OpenSSL import crypto from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from stevedore import driver as stevedore_driver from octavia.certificates.common import pkcs12 from octavia.certificates.manager import barbican_legacy from octavia.certificates.manager import cert_mgr from octavia.common import exceptions from octavia.common.tls_utils import cert_parser LOG = logging.getLogger(__name__) class BarbicanCertManager(cert_mgr.CertManager): """Certificate Manager that wraps the Barbican client API.""" def __init__(self): super(BarbicanCertManager, self).__init__() self.auth = stevedore_driver.DriverManager( namespace='octavia.barbican_auth', name=cfg.CONF.certificates.barbican_auth, invoke_on_load=True, ).driver def store_cert(self, context, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name="PKCS12 Certificate Bundle"): """Stores a certificate in the certificate manager. :param context: Oslo context of the request :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :param expiration: the expiration time of the cert in ISO 8601 format :param name: a friendly name for the cert :returns: the container_ref of the stored cert :raises Exception: if certificate storage fails """ connection = self.auth.get_barbican_client(context.project_id) LOG.info("Storing certificate secret '%s' in Barbican.", name) p12 = crypto.PKCS12() p12.set_friendlyname(encodeutils.to_utf8(name)) x509_cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate) p12.set_certificate(x509_cert) x509_pk = crypto.load_privatekey(crypto.FILETYPE_PEM, private_key) p12.set_privatekey(x509_pk) if intermediates: cert_ints = list(cert_parser.get_intermediates_pems(intermediates)) x509_ints = [ crypto.load_certificate(crypto.FILETYPE_PEM, ci) for ci in cert_ints] p12.set_ca_certificates(x509_ints) if private_key_passphrase: raise exceptions.CertificateStorageException( "Passphrase protected PKCS12 certificates are not supported.") try: certificate_secret = connection.secrets.create( payload=p12.export(), expiration=expiration, name=name ) certificate_secret.store() return certificate_secret.secret_ref except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Error storing certificate data: %s', str(e)) def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, service_name=None): """Retrieves the specified cert and registers as a consumer. :param context: Oslo context of the request :param cert_ref: the UUID of the cert to retrieve :param resource_ref: Full HATEOAS reference to the consuming resource :param check_only: Read Certificate data without registering :param service_name: Friendly name for the consuming service :return: octavia.certificates.common.Cert representation of the certificate data :raises Exception: if certificate retrieval fails """ connection = self.auth.get_barbican_client(context.project_id) LOG.info('Loading certificate secret %s from Barbican.', cert_ref) try: cert_secret = connection.secrets.get(secret_ref=cert_ref) return pkcs12.PKCS12Cert(cert_secret.payload) except exceptions.UnreadablePKCS12: raise except Exception: # If our get fails, try with the legacy driver. # TODO(rm_work): Remove this code when the deprecation cycle for # the legacy driver is complete. legacy_mgr = barbican_legacy.BarbicanCertManager() legacy_cert = legacy_mgr.get_cert( context, cert_ref, resource_ref=resource_ref, check_only=check_only, service_name=service_name ) return legacy_cert def delete_cert(self, context, cert_ref, resource_ref, service_name=None): """Deregister as a consumer for the specified cert. :param context: Oslo context of the request :param cert_ref: the UUID of the cert to retrieve :param resource_ref: Full HATEOAS reference to the consuming resource :param service_name: Friendly name for the consuming service :raises Exception: if deregistration fails """ # TODO(rm_work): We won't take any action on a delete in this driver, # but for now try the legacy driver's delete and ignore failure. try: legacy_mgr = barbican_legacy.BarbicanCertManager(auth=self.auth) legacy_mgr.delete_cert( context, cert_ref, resource_ref, service_name=service_name) except Exception: # If the delete failed, it was probably because it isn't legacy # (this will be fixed once Secrets have Consumer registration). pass def set_acls(self, context, cert_ref): LOG.debug('Setting project ACL for certificate secret...') self.auth.ensure_secret_access(context, cert_ref) # TODO(velizarx): Remove this code when the deprecation cycle for # the legacy driver is complete. legacy_mgr = barbican_legacy.BarbicanCertManager(auth=self.auth) legacy_mgr.set_acls(context, cert_ref) def unset_acls(self, context, cert_ref): LOG.debug('Unsetting project ACL for certificate secret...') self.auth.revoke_secret_access(context, cert_ref) # TODO(velizarx): Remove this code when the deprecation cycle for # the legacy driver is complete. legacy_mgr = barbican_legacy.BarbicanCertManager(auth=self.auth) legacy_mgr.unset_acls(context, cert_ref) def get_secret(self, context, secret_ref): """Retrieves a secret payload by reference. :param context: Oslo context of the request :param secret_ref: The secret reference ID :return: The secret payload :raises CertificateStorageException: if retrieval fails """ connection = self.auth.get_barbican_client(context.project_id) LOG.info('Loading secret %s from Barbican.', secret_ref) try: secret = connection.secrets.get(secret_ref=secret_ref) return secret.payload except Exception as e: LOG.error("Failed to access secret for %s due to: %s.", secret_ref, str(e)) raise exceptions.CertificateRetrievalException(ref=secret_ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/manager/barbican_legacy.py0000664000175000017500000002277500000000000024353 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Legacy cert manager implementation for Barbican (container+secrets) """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from stevedore import driver as stevedore_driver from octavia.certificates.common import barbican as barbican_common from octavia.certificates.manager import cert_mgr from octavia.common.tls_utils import cert_parser LOG = logging.getLogger(__name__) class BarbicanCertManager(cert_mgr.CertManager): """Certificate Manager that wraps the Barbican client API.""" def __init__(self, auth=None): super(BarbicanCertManager, self).__init__() if auth: self.auth = auth else: self.auth = stevedore_driver.DriverManager( namespace='octavia.barbican_auth', name=cfg.CONF.certificates.barbican_auth, invoke_on_load=True, ).driver def store_cert(self, context, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name=None): """Stores a certificate in the certificate manager. :param context: Oslo context of the request :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :param expiration: the expiration time of the cert in ISO 8601 format :param name: a friendly name for the cert :returns: the container_ref of the stored cert :raises Exception: if certificate storage fails """ connection = self.auth.get_barbican_client(context.project_id) LOG.info("Storing certificate container '%s' in Barbican.", name) certificate_secret = None private_key_secret = None intermediates_secret = None pkp_secret = None try: certificate_secret = connection.secrets.create( payload=certificate, expiration=expiration, name="Certificate" ) private_key_secret = connection.secrets.create( payload=private_key, expiration=expiration, name="Private Key" ) certificate_container = connection.containers.create_certificate( name=name, certificate=certificate_secret, private_key=private_key_secret ) if intermediates: intermediates_secret = connection.secrets.create( payload=intermediates, expiration=expiration, name="Intermediates" ) certificate_container.intermediates = intermediates_secret if private_key_passphrase: pkp_secret = connection.secrets.create( payload=private_key_passphrase, expiration=expiration, name="Private Key Passphrase" ) certificate_container.private_key_passphrase = pkp_secret certificate_container.store() return certificate_container.container_ref except Exception as e: for i in [certificate_secret, private_key_secret, intermediates_secret, pkp_secret]: if i and i.secret_ref: old_ref = i.secret_ref try: i.delete() LOG.info('Deleted secret %s (%s) during rollback.', i.name, old_ref) except Exception: LOG.warning('Failed to delete %s (%s) during ' 'rollback. This might not be a problem.', i.name, old_ref) with excutils.save_and_reraise_exception(): LOG.error('Error storing certificate data: %s', str(e)) def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, service_name=None): """Retrieves the specified cert and registers as a consumer. :param context: Oslo context of the request :param cert_ref: the UUID of the cert to retrieve :param resource_ref: Full HATEOAS reference to the consuming resource :param check_only: Read Certificate data without registering :param service_name: Friendly name for the consuming service :return: octavia.certificates.common.Cert representation of the certificate data :raises Exception: if certificate retrieval fails """ connection = self.auth.get_barbican_client(context.project_id) LOG.info('Loading certificate container %s from Barbican.', cert_ref) try: if check_only: cert_container = connection.containers.get( container_ref=cert_ref ) else: cert_container = connection.containers.register_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) barbican_cert = barbican_common.BarbicanCert(cert_container) LOG.debug('Validating certificate data for %s.', cert_ref) cert_parser.validate_cert( barbican_cert.get_certificate(), private_key=barbican_cert.get_private_key(), private_key_passphrase=( barbican_cert.get_private_key_passphrase()), intermediates=barbican_cert.get_intermediates()) LOG.debug('Certificate data validated for %s.', cert_ref) return barbican_cert except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Error getting cert %s: %s', cert_ref, str(e)) def delete_cert(self, context, cert_ref, resource_ref, service_name=None): """Deregister as a consumer for the specified cert. :param context: Oslo context of the request :param cert_ref: the UUID of the cert to retrieve :param resource_ref: Full HATEOAS reference to the consuming resource :param service_name: Friendly name for the consuming service :raises Exception: if deregistration fails """ connection = self.auth.get_barbican_client(context.project_id) LOG.info('Deregistering as a consumer of %s in Barbican.', cert_ref) try: connection.containers.remove_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Error deregistering as a consumer of %s: %s', cert_ref, str(e)) def set_acls(self, context, cert_ref): connection = self.auth.get_barbican_client(context.project_id) try: cert_container = connection.containers.get( container_ref=cert_ref ) except Exception: # If the containers.get failed, it was probably because it isn't # legacy so we will skip this step return self.auth.ensure_secret_access( context, cert_container.certificate.secret_ref) self.auth.ensure_secret_access( context, cert_container.private_key.secret_ref) if cert_container.private_key_passphrase: self.auth.ensure_secret_access( context, cert_container.private_key_passphrase.secret_ref) if cert_container.intermediates: self.auth.ensure_secret_access( context, cert_container.intermediates.secret_ref) def unset_acls(self, context, cert_ref): connection = self.auth.get_barbican_client(context.project_id) try: cert_container = connection.containers.get( container_ref=cert_ref ) except Exception: # If the containers.get failed, it was probably because it isn't # legacy so we will skip this step return self.auth.revoke_secret_access( context, cert_container.certificate.secret_ref) self.auth.revoke_secret_access( context, cert_container.private_key.secret_ref) if cert_container.private_key_passphrase: self.auth.revoke_secret_access( context, cert_container.private_key_passphrase.secret_ref) if cert_container.intermediates: self.auth.revoke_secret_access( context, cert_container.intermediates.secret_ref) def get_secret(self, context, secret_ref): # The legacy driver doesn't need get_secret return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/manager/castellan_mgr.py0000664000175000017500000000627200000000000024073 0ustar00zuulzuul00000000000000# Copyright (c) 2017 GoDaddy # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cert manager implementation for Castellan """ from castellan.common.objects import opaque_data from castellan import key_manager from OpenSSL import crypto from oslo_log import log as logging from octavia.certificates.common import pkcs12 from octavia.certificates.manager import cert_mgr from octavia.common import exceptions LOG = logging.getLogger(__name__) class CastellanCertManager(cert_mgr.CertManager): """Certificate Manager for the Castellan library.""" def __init__(self): super(CastellanCertManager, self).__init__() self.manager = key_manager.API() def store_cert(self, context, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name="PKCS12 Certificate Bundle"): p12 = crypto.PKCS12() p12.set_certificate(certificate) p12.set_privatekey(private_key) if intermediates: p12.set_ca_certificates(intermediates) if private_key_passphrase: raise exceptions.CertificateStorageException( "Passphrases protected PKCS12 certificates are not supported.") p12_data = opaque_data.OpaqueData(p12.export(), name=name) self.manager.store(context, p12_data) def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, service_name=None): certbag = self.manager.get(context, cert_ref) certbag_data = certbag.get_encoded() cert = pkcs12.PKCS12Cert(certbag_data) return cert def delete_cert(self, context, cert_ref, resource_ref, service_name=None): # Delete is not a great name for this -- we don't delete anything # in reality, we just do cleanup here. For castellan, none is required pass def set_acls(self, context, cert_ref): # We don't manage ACL based access for things retrieved via Castellan # because we assume we have elevated access to the secret store. pass def unset_acls(self, context, cert_ref): # We don't manage ACL based access for things retrieved via Castellan # because we assume we have elevated access to the secret store. pass def get_secret(self, context, secret_ref): try: certbag = self.manager.get(context, secret_ref) certbag_data = certbag.get_encoded() except Exception as e: LOG.error("Failed to access secret for %s due to: %s.", secret_ref, str(e)) raise exceptions.CertificateRetrievalException(ref=secret_ref) return certbag_data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/manager/cert_mgr.py0000664000175000017500000000540100000000000023053 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Certificate manager API """ import abc class CertManager(object, metaclass=abc.ABCMeta): """Base Cert Manager Interface A Cert Manager is responsible for managing certificates for TLS. """ @abc.abstractmethod def store_cert(self, context, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name=None): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert and returns its UUID that identifies it within the cert manager. If storage of the certificate data fails, a CertificateStorageException should be raised. """ @abc.abstractmethod def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, service_name=None): """Retrieves the specified cert. If check_only is True, don't perform any sort of registration. If the specified cert does not exist, a CertificateStorageException should be raised. """ @abc.abstractmethod def delete_cert(self, context, cert_ref, resource_ref, service_name=None): """Deletes the specified cert. If the specified cert does not exist, a CertificateStorageException should be raised. """ @abc.abstractmethod def set_acls(self, context, cert_ref): """Adds ACLs so Octavia can access the cert objects. If the specified cert does not exist or the addition of ACLs fails for any reason, a CertificateStorageException should be raised. """ @abc.abstractmethod def unset_acls(self, context, cert_ref): """Remove ACLs so Octavia can access the cert objects. If the specified cert does not exist or the removal of ACLs fails for any reason, a CertificateStorageException should be raised. """ @abc.abstractmethod def get_secret(self, context, secret_ref): """Retrieves a secret payload by reference. If the specified secret does not exist, a CertificateStorageException should be raised. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/certificates/manager/local.py0000664000175000017500000002025000000000000022342 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat import uuid from oslo_config import cfg from oslo_log import log as logging from octavia.certificates.common import local as local_common from octavia.certificates.manager import cert_mgr from octavia.common import exceptions from octavia.common.tls_utils import cert_parser CONF = cfg.CONF LOG = logging.getLogger(__name__) class LocalCertManager(cert_mgr.CertManager): """Cert Manager Interface that stores data locally.""" @staticmethod def store_cert(context, certificate, private_key, intermediates=None, private_key_passphrase=None, **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert to the filesystem and returns a UUID that can be used to retrieve it. :param context: Ignored in this implementation :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :returns: the UUID of the stored cert :raises CertificateStorageException: if certificate storage fails """ cert_ref = str(uuid.uuid4()) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) if type(certificate) == bytes: certificate = certificate.decode('utf-8') if type(private_key) == bytes: private_key = private_key.decode('utf-8') LOG.info("Storing certificate data on the local filesystem.") try: filename_certificate = "{0}.crt".format(filename_base) flags = os.O_WRONLY | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR # mode 0600 with os.fdopen(os.open( filename_certificate, flags, mode), 'w') as cert_file: cert_file.write(certificate) filename_private_key = "{0}.key".format(filename_base) with os.fdopen(os.open( filename_private_key, flags, mode), 'w') as key_file: key_file.write(private_key) if intermediates: filename_intermediates = "{0}.int".format(filename_base) if type(intermediates) == bytes: intermediates = intermediates.decode('utf-8') with os.fdopen(os.open( filename_intermediates, flags, mode), 'w') as int_file: int_file.write(intermediates) if private_key_passphrase: filename_pkp = "{0}.pass".format(filename_base) if type(private_key_passphrase) == bytes: private_key_passphrase = private_key_passphrase.decode( 'utf-8') with os.fdopen(os.open( filename_pkp, flags, mode), 'w') as pass_file: pass_file.write(private_key_passphrase) except IOError as ioe: LOG.error("Failed to store certificate.") raise exceptions.CertificateStorageException(message=ioe.message) return cert_ref @staticmethod def get_cert(context, cert_ref, **kwargs): """Retrieves the specified cert. :param context: Ignored in this implementation :param cert_ref: the UUID of the cert to retrieve :return: octavia.certificates.common.Cert representation of the certificate data :raises CertificateStorageException: if certificate retrieval fails """ LOG.info("Loading certificate %s from the local filesystem.", cert_ref) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) cert_data = dict() flags = os.O_RDONLY try: with os.fdopen(os.open(filename_certificate, flags)) as cert_file: cert_data['certificate'] = cert_file.read() except IOError: LOG.error("Failed to read certificate for %s.", cert_ref) raise exceptions.CertificateStorageException( msg="Certificate could not be read.") try: with os.fdopen(os.open(filename_private_key, flags)) as key_file: cert_data['private_key'] = key_file.read() except IOError: LOG.error("Failed to read private key for %s", cert_ref) raise exceptions.CertificateStorageException( msg="Private Key could not be read.") try: with os.fdopen(os.open(filename_intermediates, flags)) as int_file: cert_data['intermediates'] = int_file.read() cert_data['intermediates'] = list( cert_parser.get_intermediates_pems(cert_data['intermediates'])) except IOError: pass try: with os.fdopen(os.open(filename_pkp, flags)) as pass_file: cert_data['private_key_passphrase'] = pass_file.read() except IOError: pass return local_common.LocalCert(**cert_data) @staticmethod def delete_cert(context, cert_ref, **kwargs): """Deletes the specified cert. :param context: Ignored in this implementation :param cert_ref: the UUID of the cert to delete :raises CertificateStorageException: if certificate deletion fails """ LOG.info("Deleting certificate %s from the local filesystem.", cert_ref) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) try: os.remove(filename_certificate) os.remove(filename_private_key) os.remove(filename_intermediates) os.remove(filename_pkp) except IOError as ioe: LOG.error("Failed to delete certificate %s", cert_ref) raise exceptions.CertificateStorageException(message=ioe.message) def set_acls(self, context, cert_ref): # There is no security on this store, because it's really dumb pass def unset_acls(self, context, cert_ref): # There is no security on this store, because it's really dumb pass @staticmethod def get_secret(context, secret_ref): """Retrieves a secret payload by reference. :param context: Ignored in this implementation :param secret_ref: The secret reference ID :return: The secret payload :raises CertificateStorageException: if secret retrieval fails """ LOG.info("Loading secret %s from the local filesystem.", secret_ref) filename_base = os.path.join(CONF.certificates.storage_path, secret_ref) filename_secret = "{0}.crt".format(filename_base) secret_data = None flags = os.O_RDONLY try: with os.fdopen(os.open(filename_secret, flags)) as secret_file: secret_data = secret_file.read() except IOError: LOG.error("Failed to read secret for %s.", secret_ref) raise exceptions.CertificateRetrievalException(ref=secret_ref) return secret_data ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3822167 octavia-6.2.2/octavia/cmd/0000775000175000017500000000000000000000000015363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/__init__.py0000664000175000017500000000107400000000000017476 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/agent.py0000664000175000017500000000611000000000000017031 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # make sure PYTHONPATH includes the home directory if you didn't install import multiprocessing as multiproc import ssl import sys import gunicorn.app.base from oslo_config import cfg from oslo_reports import guru_meditation_report as gmr from octavia.amphorae.backends.agent.api_server import server from octavia.amphorae.backends.health_daemon import health_daemon from octavia.common import service from octavia.common import utils from octavia import version CONF = cfg.CONF HM_SENDER_CMD_QUEUE = multiproc.Queue() class AmphoraAgent(gunicorn.app.base.BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super(AmphoraAgent, self).__init__() def load_config(self): config = {key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None} for key, value in config.items(): self.cfg.set(key.lower(), value) def load(self): return self.application # start api server def main(): # comment out to improve logging service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) health_sender_proc = multiproc.Process(name='HM_sender', target=health_daemon.run_sender, args=(HM_SENDER_CMD_QUEUE,)) health_sender_proc.daemon = True health_sender_proc.start() # Initiate server class server_instance = server.Server() bind_ip_port = utils.ip_port_str(CONF.haproxy_amphora.bind_host, CONF.haproxy_amphora.bind_port) proto = CONF.amphora_agent.agent_tls_protocol.replace('.', '_') options = { 'bind': bind_ip_port, 'workers': 1, 'timeout': CONF.amphora_agent.agent_request_read_timeout, 'certfile': CONF.amphora_agent.agent_server_cert, 'ca_certs': CONF.amphora_agent.agent_server_ca, 'cert_reqs': ssl.CERT_REQUIRED, 'ssl_version': getattr(ssl, "PROTOCOL_%s" % proto), 'preload_app': True, 'accesslog': '/var/log/amphora-agent.log', 'errorlog': '/var/log/amphora-agent.log', 'loglevel': 'debug', 'syslog': True, 'syslog_facility': 'local{}'.format( CONF.amphora_agent.administrative_log_facility), 'syslog_addr': 'unix://run/rsyslog/octavia/log#dgram', } AmphoraAgent(server_instance.app, options).run() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/api.py0000664000175000017500000000311700000000000016510 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from wsgiref import simple_server from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from octavia.api import app as api_app from octavia.common import constants from octavia import version LOG = logging.getLogger(__name__) def main(): gmr.TextGuruMeditation.setup_autorun(version) app = api_app.setup_app(argv=sys.argv) host = cfg.CONF.api_settings.bind_host port = cfg.CONF.api_settings.bind_port LOG.info("Starting API server on %(host)s:%(port)s", {"host": host, "port": port}) if cfg.CONF.api_settings.auth_strategy != constants.KEYSTONE: LOG.warning('Octavia configuration [api_settings] auth_strategy is ' 'not set to "keystone". This is not a normal ' 'configuration and you may get "Missing project ID" ' 'errors from API calls."') srv = simple_server.make_server(host, port, app) srv.serve_forever() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/driver_agent.py0000664000175000017500000001374700000000000020422 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from functools import partial import multiprocessing import os import signal import sys import time from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr import setproctitle from stevedore import enabled as stevedore_enabled from octavia.api.drivers.driver_agent import driver_listener from octavia.common import service from octavia import version CONF = cfg.CONF LOG = logging.getLogger(__name__) PROVIDER_AGENT_PROCESSES = [] def _mutate_config(*args, **kwargs): CONF.mutate_config_files() def _handle_mutate_config(status_proc_pid, stats_proc_pid, *args, **kwargs): LOG.info("Driver agent received HUP signal, mutating config.") _mutate_config() os.kill(status_proc_pid, signal.SIGHUP) os.kill(stats_proc_pid, signal.SIGHUP) def _check_if_provider_agent_enabled(extension): if extension.name in CONF.driver_agent.enabled_provider_agents: return True return False def _process_wrapper(exit_event, proc_name, function, agent_name=None): signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGHUP, _mutate_config) if agent_name: process_title = 'octavia-driver-agent - {} -- {}'.format( proc_name, agent_name) else: process_title = 'octavia-driver-agent - {}'.format(proc_name) setproctitle.setproctitle(process_title) while not exit_event.is_set(): try: function(exit_event) except Exception as e: if agent_name: LOG.exception('Provider agent "%s" raised exception: %s. ' 'Restarting the "%s" provider agent.', agent_name, str(e), agent_name) else: LOG.exception('%s raised exception: %s. ' 'Restarting %s.', proc_name, str(e), proc_name) time.sleep(1) continue break def _start_provider_agents(exit_event): extensions = stevedore_enabled.EnabledExtensionManager( namespace='octavia.driver_agent.provider_agents', check_func=_check_if_provider_agent_enabled) for ext in extensions: ext_process = multiprocessing.Process( name=ext.name, target=_process_wrapper, args=(exit_event, 'provider_agent', ext.plugin), kwargs={'agent_name': ext.name}) PROVIDER_AGENT_PROCESSES.append(ext_process) ext_process.start() LOG.info('Started enabled provider agent: "%s" with PID: %d.', ext.name, ext_process.pid) def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) processes = [] exit_event = multiprocessing.Event() status_listener_proc = multiprocessing.Process( name='status_listener', target=_process_wrapper, args=(exit_event, 'status_listener', driver_listener.status_listener)) processes.append(status_listener_proc) LOG.info("Driver agent status listener process starts:") status_listener_proc.start() stats_listener_proc = multiprocessing.Process( name='stats_listener', target=_process_wrapper, args=(exit_event, 'stats_listener', driver_listener.stats_listener)) processes.append(stats_listener_proc) LOG.info("Driver agent statistics listener process starts:") stats_listener_proc.start() get_listener_proc = multiprocessing.Process( name='get_listener', target=_process_wrapper, args=(exit_event, 'get_listener', driver_listener.get_listener)) processes.append(get_listener_proc) LOG.info("Driver agent get listener process starts:") get_listener_proc.start() _start_provider_agents(exit_event) def process_cleanup(*args, **kwargs): LOG.info("Driver agent exiting due to signal.") exit_event.set() status_listener_proc.join() stats_listener_proc.join() get_listener_proc.join() for proc in PROVIDER_AGENT_PROCESSES: LOG.info('Waiting up to %s seconds for provider agent "%s" to ' 'shutdown.', CONF.driver_agent.provider_agent_shutdown_timeout, proc.name) try: proc.join(CONF.driver_agent.provider_agent_shutdown_timeout) if proc.exitcode is None: # TODO(johnsom) Change to proc.kill() once # python 3.7 or newer only os.kill(proc.pid, signal.SIGKILL) LOG.warning( 'Forcefully killed "%s" provider agent because it ' 'failed to shutdown in %s seconds.', proc.name, CONF.driver_agent.provider_agent_shutdown_timeout) except Exception as e: LOG.warning('Unknown error "%s" while shutting down "%s", ' 'ignoring and continuing shutdown process.', str(e), proc.name) else: LOG.info('Provider agent "%s" has succesfully shutdown.', proc.name) signal.signal(signal.SIGTERM, process_cleanup) signal.signal(signal.SIGHUP, partial( _handle_mutate_config, status_listener_proc.pid, stats_listener_proc.pid, get_listener_proc.pid)) try: for process in processes: process.join() except KeyboardInterrupt: process_cleanup() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/haproxy_vrrp_check.py0000664000175000017500000000351300000000000021637 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import sys SOCKET_TIMEOUT = 5 def get_status(sock_address): """Query haproxy stat socket Only VRRP fail over if the stats socket is not responding. :param sock_address: unix socket file :return: 0 if haproxy responded """ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.settimeout(SOCKET_TIMEOUT) s.connect(sock_address) s.send(b'show stat -1 -1 -1\n') data = b'' while True: x = s.recv(1024) if not x: break data += x s.close() # if get nothing, means has no response if not data: return 1 return 0 def health_check(sock_addresses): """Invoke queries for all defined listeners :param sock_addresses: :return: """ status = 0 for address in sock_addresses: status += get_status(address) return status def main(): # usage python haproxy_vrrp_check.py # Note: for performance, this script loads minimal number of module. # Loading octavia modules or any other complex construct MUST be avoided. listeners_sockets = sys.argv[1:] try: status = health_check(listeners_sockets) except Exception: sys.exit(1) sys.exit(status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/health_manager.py0000664000175000017500000001005500000000000020675 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from functools import partial import multiprocessing import os import signal import sys from futurist import periodics from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from octavia.amphorae.drivers.health import heartbeat_udp from octavia.common import service from octavia.controller.healthmanager import health_manager from octavia import version CONF = cfg.CONF LOG = logging.getLogger(__name__) def _mutate_config(*args, **kwargs): CONF.mutate_config_files() def hm_listener(exit_event): signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGHUP, _mutate_config) udp_getter = heartbeat_udp.UDPStatusGetter() while not exit_event.is_set(): try: udp_getter.check() except Exception as e: LOG.error('Health Manager listener experienced unknown error: %s', str(e)) LOG.info('Waiting for executor to shutdown...') udp_getter.health_executor.shutdown() udp_getter.stats_executor.shutdown() LOG.info('Executor shutdown finished.') def hm_health_check(exit_event): hm = health_manager.HealthManager(exit_event) signal.signal(signal.SIGHUP, _mutate_config) @periodics.periodic(CONF.health_manager.health_check_interval, run_immediately=True) def periodic_health_check(): hm.health_check() health_check = periodics.PeriodicWorker( [(periodic_health_check, None, None)], schedule_strategy='aligned_last_finished') def hm_exit(*args, **kwargs): health_check.stop() hm.executor.shutdown() signal.signal(signal.SIGINT, hm_exit) LOG.debug("Pausing before starting health check") exit_event.wait(CONF.health_manager.heartbeat_timeout) health_check.start() def _handle_mutate_config(listener_proc_pid, check_proc_pid, *args, **kwargs): LOG.info("Health Manager recieved HUP signal, mutating config.") _mutate_config() os.kill(listener_proc_pid, signal.SIGHUP) os.kill(check_proc_pid, signal.SIGHUP) def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) processes = [] exit_event = multiprocessing.Event() hm_listener_proc = multiprocessing.Process(name='HM_listener', target=hm_listener, args=(exit_event,)) processes.append(hm_listener_proc) hm_health_check_proc = multiprocessing.Process(name='HM_health_check', target=hm_health_check, args=(exit_event,)) processes.append(hm_health_check_proc) LOG.info("Health Manager listener process starts:") hm_listener_proc.start() LOG.info("Health manager check process starts:") hm_health_check_proc.start() def process_cleanup(*args, **kwargs): LOG.info("Health Manager exiting due to signal") exit_event.set() os.kill(hm_health_check_proc.pid, signal.SIGINT) hm_health_check_proc.join() hm_listener_proc.join() signal.signal(signal.SIGTERM, process_cleanup) signal.signal(signal.SIGHUP, partial( _handle_mutate_config, hm_listener_proc.pid, hm_health_check_proc.pid)) try: for process in processes: process.join() except KeyboardInterrupt: process_cleanup() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/house_keeping.py0000664000175000017500000001135100000000000020563 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import signal import sys import threading from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from octavia.common import service from octavia.controller.housekeeping import house_keeping from octavia import version LOG = logging.getLogger(__name__) CONF = cfg.CONF spare_amp_thread_event = threading.Event() db_cleanup_thread_event = threading.Event() cert_rotate_thread_event = threading.Event() def spare_amphora_check(): """Initiates spare amp check with respect to configured interval.""" # Read the interval from CONF interval = CONF.house_keeping.spare_check_interval LOG.info("Spare check interval is set to %d sec", interval) spare_amp = house_keeping.SpareAmphora() while not spare_amp_thread_event.is_set(): LOG.debug("Initiating spare amphora check...") try: spare_amp.spare_check() except Exception as e: LOG.debug('spare_amphora caught the following exception and ' 'is restarting: %s', str(e)) spare_amp_thread_event.wait(interval) def db_cleanup(): """Perform db cleanup for old resources.""" # Read the interval from CONF interval = CONF.house_keeping.cleanup_interval LOG.info("DB cleanup interval is set to %d sec", interval) LOG.info('Amphora expiry age is %s seconds', CONF.house_keeping.amphora_expiry_age) LOG.info('Load balancer expiry age is %s seconds', CONF.house_keeping.load_balancer_expiry_age) db_cleanup = house_keeping.DatabaseCleanup() while not db_cleanup_thread_event.is_set(): LOG.debug("Initiating the cleanup of old resources...") try: db_cleanup.delete_old_amphorae() db_cleanup.cleanup_load_balancers() except Exception as e: LOG.debug('db_cleanup caught the following exception and ' 'is restarting: %s', str(e)) db_cleanup_thread_event.wait(interval) def cert_rotation(): """Perform certificate rotation.""" interval = CONF.house_keeping.cert_interval LOG.info( "Expiring certificate check interval is set to %d sec", interval) cert_rotate = house_keeping.CertRotation() while not cert_rotate_thread_event.is_set(): LOG.debug("Initiating certification rotation ...") try: cert_rotate.rotate() except Exception as e: LOG.debug('cert_rotation caught the following exception and ' 'is restarting: %s', str(e)) cert_rotate_thread_event.wait(interval) def _mutate_config(*args, **kwargs): LOG.info("Housekeeping recieved HUP signal, mutating config.") CONF.mutate_config_files() def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) timestamp = str(datetime.datetime.utcnow()) LOG.info("Starting house keeping at %s", timestamp) threads = [] # Thread to perform spare amphora check spare_amp_thread = threading.Thread(target=spare_amphora_check) spare_amp_thread.daemon = True spare_amp_thread.start() threads.append(spare_amp_thread) # Thread to perform db cleanup db_cleanup_thread = threading.Thread(target=db_cleanup) db_cleanup_thread.daemon = True db_cleanup_thread.start() threads.append(db_cleanup_thread) # Thread to perform certificate rotation cert_rotate_thread = threading.Thread(target=cert_rotation) cert_rotate_thread.daemon = True cert_rotate_thread.start() threads.append(cert_rotate_thread) def process_cleanup(*args, **kwargs): LOG.info("Attempting to gracefully terminate House-Keeping") spare_amp_thread_event.set() db_cleanup_thread_event.set() cert_rotate_thread_event.set() spare_amp_thread.join() db_cleanup_thread.join() cert_rotate_thread.join() LOG.info("House-Keeping process terminated") signal.signal(signal.SIGTERM, process_cleanup) signal.signal(signal.SIGHUP, _mutate_config) try: for thread in threads: thread.join() except KeyboardInterrupt: process_cleanup() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/octavia_worker.py0000664000175000017500000000265500000000000020764 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import cotyledon from cotyledon import oslo_config_glue from oslo_config import cfg from oslo_reports import guru_meditation_report as gmr from octavia.common import service as octavia_service from octavia.controller.queue.v1 import consumer as consumer_v1 from octavia.controller.queue.v2 import consumer as consumer_v2 from octavia import version CONF = cfg.CONF def main(): octavia_service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) sm = cotyledon.ServiceManager() sm.add(consumer_v1.ConsumerService, workers=CONF.controller_worker.workers, args=(CONF,)) sm.add(consumer_v2.ConsumerService, workers=CONF.controller_worker.workers, args=(CONF,)) oslo_config_glue.setup(sm, CONF, reload_method="mutate") sm.run() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/cmd/status.py0000664000175000017500000000300000000000000017251 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_upgradecheck import upgradecheck from octavia.i18n import _ CONF = cfg.CONF class Checks(upgradecheck.UpgradeCommands): """Contains upgrade checks Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _sample_check(self): """This is sample check added to test the upgrade check framework It needs to be removed after adding any real upgrade check """ return upgradecheck.Result(upgradecheck.Code.SUCCESS, 'Sample detail') _upgrade_checks = ( # Sample check added for now. # Whereas in future real checks must be added here in tuple (_('Sample Check'), _sample_check), ) def main(): return upgradecheck.main( CONF, project='octavia', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/0000775000175000017500000000000000000000000016110 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/__init__.py0000664000175000017500000000107400000000000020223 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/base_taskflow.py0000664000175000017500000001471200000000000021313 0ustar00zuulzuul00000000000000# Copyright 2014-2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import concurrent.futures import datetime import functools from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from taskflow.conductors.backends import impl_blocking from taskflow import engines from taskflow import exceptions as taskflow_exc from taskflow.listeners import base from taskflow.listeners import logging from taskflow.persistence import models from taskflow import states from octavia.amphorae.driver_exceptions import exceptions LOG = log.getLogger(__name__) CONF = cfg.CONF # We do not need to log retry exception information. Warning "Could not connect # to instance" will be logged as usual. def retryMaskFilter(record): if record.exc_info is not None and isinstance( record.exc_info[1], exceptions.AmpConnectionRetry): return False return True LOG.logger.addFilter(retryMaskFilter) class BaseTaskFlowEngine(object): """This is the task flow engine Use this engine to start/load flows in the code """ def __init__(self): # work around for https://bugs.python.org/issue7980 datetime.datetime.strptime('2014-06-19 22:47:16', '%Y-%m-%d %H:%M:%S') self.executor = concurrent.futures.ThreadPoolExecutor( max_workers=CONF.task_flow.max_workers) def _taskflow_load(self, flow, **kwargs): eng = engines.load( flow, engine=CONF.task_flow.engine, executor=self.executor, never_resolve=CONF.task_flow.disable_revert, **kwargs) eng.compile() eng.prepare() return eng class ExtendExpiryListener(base.Listener): def __init__(self, engine, job): super(ExtendExpiryListener, self).__init__(engine) self.job = job def _task_receiver(self, state, details): self.job.extend_expiry(cfg.CONF.task_flow.jobboard_expiration_time) def _flow_receiver(self, state, details): self.job.extend_expiry(cfg.CONF.task_flow.jobboard_expiration_time) def _retry_receiver(self, state, details): self.job.extend_expiry(cfg.CONF.task_flow.jobboard_expiration_time) class DynamicLoggingConductor(impl_blocking.BlockingConductor): def _listeners_from_job(self, job, engine): listeners = super(DynamicLoggingConductor, self)._listeners_from_job( job, engine) listeners.append(logging.DynamicLoggingListener(engine, log=LOG)) return listeners def _on_job_done(self, job, fut): super(DynamicLoggingConductor, self)._on_job_done(job, fut) # Double check that job is complete. if (not CONF.task_flow.jobboard_save_logbook and job.state == states.COMPLETE): LOG.debug("Job %s is complete. Cleaning up job logbook.", job.name) try: self._persistence.get_connection().destroy_logbook( job.book.uuid) except taskflow_exc.NotFound: LOG.debug("Logbook for job %s has been already cleaned up", job.name) class RedisDynamicLoggingConductor(DynamicLoggingConductor): def _listeners_from_job(self, job, engine): listeners = super(RedisDynamicLoggingConductor, self)._listeners_from_job(job, engine) listeners.append(ExtendExpiryListener(engine, job)) return listeners class TaskFlowServiceController(object): def __init__(self, driver): self.driver = driver def run_poster(self, flow_factory, *args, wait=False, **kwargs): with self.driver.persistence_driver.get_persistence() as persistence: with self.driver.job_board(persistence) as job_board: job_id = uuidutils.generate_uuid() job_name = '-'.join([flow_factory.__name__, job_id]) job_logbook = models.LogBook(job_name) flow_detail = models.FlowDetail( job_name, job_id) job_details = { 'store': kwargs.pop('store') } job_logbook.add(flow_detail) persistence.get_connection().save_logbook(job_logbook) engines.save_factory_details(flow_detail, flow_factory, args, kwargs, backend=persistence) job_board.post(job_name, book=job_logbook, details=job_details) if wait: self._wait_for_job(job_board) return job_id def _wait_for_job(self, job_board): # Wait for job to its complete state for job in job_board.iterjobs(): LOG.debug("Waiting for job %s to finish", job.name) job.wait() def run_conductor(self, name): with self.driver.persistence_driver.get_persistence() as persistence: with self.driver.job_board(persistence) as board: # Redis do not expire jobs by default, so jobs won't be resumed # with restart of controller. Add expiry for board and use # special listener. if (CONF.task_flow.jobboard_backend_driver == 'redis_taskflow_driver'): conductor = RedisDynamicLoggingConductor( name, board, persistence=persistence, engine=CONF.task_flow.engine, engine_options={ 'max_workers': CONF.task_flow.max_workers }) board.claim = functools.partial( board.claim, expiry=CONF.task_flow.jobboard_expiration_time) else: conductor = DynamicLoggingConductor( name, board, persistence=persistence, engine=CONF.task_flow.engine) conductor.run() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/clients.py0000664000175000017500000002036400000000000020130 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient import client as cinder_client from glanceclient import client as glance_client from neutronclient.neutron import client as neutron_client from novaclient import api_versions from novaclient import client as nova_client from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from octavia.common import keystone LOG = logging.getLogger(__name__) CONF = cfg.CONF GLANCE_VERSION = '2' NEUTRON_VERSION = '2.0' NOVA_VERSION = '2.15' CINDER_VERSION = '3' class NovaAuth(object): nova_client = None @classmethod def get_nova_client(cls, region, service_name=None, endpoint=None, endpoint_type='publicURL', insecure=False, cacert=None): """Create nova client object. :param region: The region of the service :param service_name: The name of the nova service in the catalog :param endpoint: The endpoint of the service :param endpoint_type: The type of the endpoint :param insecure: Turn off certificate validation :param cacert: CA Cert file path :return: a Nova Client object. :raises Exception: if the client cannot be created """ ksession = keystone.KeystoneSession() if not cls.nova_client: kwargs = {'region_name': region, 'session': ksession.get_session(), 'endpoint_type': endpoint_type, 'insecure': insecure} if service_name: kwargs['service_name'] = service_name if endpoint: kwargs['endpoint_override'] = endpoint if cacert: kwargs['cacert'] = cacert try: cls.nova_client = nova_client.Client( version=api_versions.APIVersion(NOVA_VERSION), **kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Nova client.") return cls.nova_client class NeutronAuth(object): neutron_client = None @classmethod def get_neutron_client(cls, region, service_name=None, endpoint=None, endpoint_type='publicURL', insecure=False, ca_cert=None): """Create neutron client object. :param region: The region of the service :param service_name: The name of the neutron service in the catalog :param endpoint: The endpoint of the service :param endpoint_type: The endpoint_type of the service :param insecure: Turn off certificate validation :param ca_cert: CA Cert file path :return: a Neutron Client object. :raises Exception: if the client cannot be created """ ksession = keystone.KeystoneSession() if not cls.neutron_client: kwargs = {'region_name': region, 'session': ksession.get_session(), 'endpoint_type': endpoint_type, 'insecure': insecure} if service_name: kwargs['service_name'] = service_name if endpoint: kwargs['endpoint_override'] = endpoint if ca_cert: kwargs['ca_cert'] = ca_cert try: cls.neutron_client = neutron_client.Client( NEUTRON_VERSION, **kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Neutron client.") return cls.neutron_client @classmethod def get_user_neutron_client(cls, context): """Get neutron client for request user. It's possible that the token in the context is a trust scoped which can't be used to initialize a keystone session. We directly use the token and endpoint_url to initialize neutron client. """ neutron_endpoint = CONF.neutron.endpoint if not neutron_endpoint: session = keystone.KeystoneSession().get_session() endpoint_data = session.get_endpoint_data( service_type='network', interface=CONF.neutron.endpoint_type, region_name=CONF.neutron.region_name) neutron_endpoint = endpoint_data.catalog_url kwargs = { 'token': context.auth_token, 'endpoint_url': neutron_endpoint, 'insecure': CONF.neutron.insecure, 'ca_cert': CONF.neutron.ca_certificates_file } return neutron_client.Client(NEUTRON_VERSION, **kwargs) class GlanceAuth(object): glance_client = None @classmethod def get_glance_client(cls, region, service_name=None, endpoint=None, endpoint_type='publicURL', insecure=False, cacert=None): """Create glance client object. :param region: The region of the service :param service_name: The name of the glance service in the catalog :param endpoint: The endpoint of the service :param endpoint_type: The endpoint_type of the service :param insecure: Turn off certificate validation :param cacert: CA Cert file path :return: a Glance Client object. :raises Exception: if the client cannot be created """ ksession = keystone.KeystoneSession() if not cls.glance_client: kwargs = {'region_name': region, 'session': ksession.get_session(), 'interface': endpoint_type} if service_name: kwargs['service_name'] = service_name if endpoint: kwargs['endpoint'] = endpoint if endpoint.startswith("https"): kwargs['insecure'] = insecure kwargs['cacert'] = cacert try: cls.glance_client = glance_client.Client( GLANCE_VERSION, **kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Glance client.") return cls.glance_client class CinderAuth(object): cinder_client = None @classmethod def get_cinder_client(cls, region, service_name=None, endpoint=None, endpoint_type='publicURL', insecure=False, cacert=None): """Create cinder client object. :param region: The region of the service :param service_name: The name of the cinder service in the catalog :param endpoint: The endpoint of the service :param endpoint_type: The endpoint type of the service :param insecure: Turn off certificate validation :param cacert: CA Cert file path :return: a Cinder Client object :raise Exception: if the client cannot be created """ ksession = keystone.KeystoneSession() if not cls.cinder_client: kwargs = {'region_name': region, 'session': ksession.get_session(), 'interface': endpoint_type} if service_name: kwargs['service_name'] = service_name if endpoint: kwargs['endpoint'] = endpoint if endpoint.startswith("https"): kwargs['insecure'] = insecure kwargs['cacert'] = cacert try: cls.cinder_client = cinder_client.Client( CINDER_VERSION, **kwargs ) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error creating Cinder client.") return cls.cinder_client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/config.py0000664000175000017500000013205100000000000017731 0ustar00zuulzuul00000000000000# Copyright 2011 VMware, Inc., 2014 A10 Networks # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for configuring Octavia """ import os import ssl import sys from keystoneauth1 import loading as ks_loading from octavia_lib.common import constants as lib_consts from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log as logging import oslo_messaging as messaging from octavia.certificates.common import local from octavia.common import constants from octavia.common import utils from octavia.i18n import _ from octavia import version LOG = logging.getLogger(__name__) EXTRA_LOG_LEVEL_DEFAULTS = [ 'neutronclient.v2_0.client=INFO', ] TLS_PROTOCOL_CHOICES = [ p[9:].replace('_', '.') for p in ssl._PROTOCOL_NAMES.values()] core_opts = [ cfg.HostnameOpt('host', default=utils.get_hostname(), sample_default='', help=_("The hostname Octavia is running on")), cfg.StrOpt('octavia_plugins', default='hot_plug_plugin', help=_("Name of the controller plugin to use")), ] api_opts = [ cfg.IPOpt('bind_host', default='127.0.0.1', help=_("The host IP to bind to")), cfg.PortOpt('bind_port', default=9876, help=_("The port to bind to")), cfg.StrOpt('auth_strategy', default=constants.KEYSTONE, choices=[constants.NOAUTH, constants.KEYSTONE, constants.TESTING], help=_("The auth strategy for API requests.")), cfg.BoolOpt('allow_pagination', default=True, help=_("Allow the usage of pagination")), cfg.BoolOpt('allow_sorting', default=True, help=_("Allow the usage of sorting")), cfg.BoolOpt('allow_filtering', default=True, help=_("Allow the usage of filtering")), cfg.BoolOpt('allow_field_selection', default=True, help=_("Allow the usage of field selection")), cfg.StrOpt('pagination_max_limit', default=str(constants.DEFAULT_PAGE_SIZE), help=_("The maximum number of items returned in a single " "response. The string 'infinite' or a negative " "integer value means 'no limit'")), cfg.StrOpt('api_base_uri', help=_("Base URI for the API for use in pagination links. " "This will be autodetected from the request if not " "overridden here.")), cfg.BoolOpt('allow_tls_terminated_listeners', default=True, help=_("Allow users to create TLS Terminated listeners?")), cfg.BoolOpt('allow_ping_health_monitors', default=True, help=_("Allow users to create PING type Health Monitors?")), cfg.DictOpt('enabled_provider_drivers', help=_('A comma separated list of dictionaries of the ' 'enabled provider driver names and descriptions. ' 'Must match the driver name in the ' 'octavia.api.drivers entrypoint. Example: ' 'amphora:The Octavia Amphora driver.,' 'octavia:Deprecated alias of the Octavia ' 'Amphora driver.'), default={'amphora': 'The Octavia Amphora driver.', 'octavia': 'Deprecated alias of the Octavia Amphora ' 'driver.', }), cfg.StrOpt('default_provider_driver', default='amphora', help=_('Default provider driver.')), cfg.IntOpt('udp_connect_min_interval_health_monitor', default=3, help=_("The minimum health monitor delay interval for the " "UDP-CONNECT Health Monitor type. A negative integer " "value means 'no limit'.")), cfg.BoolOpt('healthcheck_enabled', default=False, help=_("When True, the oslo middleware healthcheck endpoint " "is enabled in the Octavia API.")), cfg.IntOpt('healthcheck_refresh_interval', default=5, help=_("The interval healthcheck plugins should cache results, " "in seconds.")), cfg.StrOpt('default_listener_ciphers', default=constants.CIPHERS_OWASP_SUITE_B, help=_("Default OpenSSL cipher string (colon-separated) for " "new TLS-enabled listeners.")), cfg.StrOpt('default_pool_ciphers', default=constants.CIPHERS_OWASP_SUITE_B, help=_("Default OpenSSL cipher string (colon-separated) for " "new TLS-enabled pools.")), ] # Options only used by the amphora agent amphora_agent_opts = [ cfg.StrOpt('agent_server_ca', default='/etc/octavia/certs/client_ca.pem', help=_("The ca which signed the client certificates")), cfg.StrOpt('agent_server_cert', default='/etc/octavia/certs/server.pem', help=_("The server certificate for the agent server " "to use")), cfg.StrOpt('agent_server_network_dir', help=_("The directory where new network interfaces " "are located")), cfg.StrOpt('agent_server_network_file', help=_("The file where the network interfaces are located. " "Specifying this will override any value set for " "agent_server_network_dir.")), cfg.IntOpt('agent_request_read_timeout', default=180, help=_("The time in seconds to allow a request from the " "controller to run before terminating the socket.")), cfg.StrOpt('agent_tls_protocol', default='TLSv1.2', help=_("Minimum TLS protocol for communication with the " "amphora agent."), choices=TLS_PROTOCOL_CHOICES), # Logging setup cfg.ListOpt('admin_log_targets', help=_('List of log server ip and port pairs for ' 'Administrative logs. Additional hosts are backup to ' 'the primary server. If none is ' 'specified remote logging is disabled. Example ' '127.0.0.1:10514, 192.168.0.1:10514')), cfg.ListOpt('tenant_log_targets', help=_('List of log server ip and port pairs for ' 'tenant traffic logs. Additional hosts are backup to ' 'the primary server. If none is ' 'specified remote logging is disabled. Example ' '127.0.0.1:10514, 192.168.0.1:10514')), cfg.IntOpt('user_log_facility', default=0, min=0, max=7, help=_('LOG_LOCAL facility number to use for user traffic ' 'logs.')), cfg.IntOpt('administrative_log_facility', default=1, min=0, max=7, help=_('LOG_LOCAL facility number to use for amphora processes ' 'logs.')), cfg.StrOpt('log_protocol', default=lib_consts.PROTOCOL_UDP, choices=[lib_consts.PROTOCOL_TCP, lib_consts.PROTOCOL_UDP], help=_("The log forwarding transport protocol. One of UDP or " "TCP.")), cfg.IntOpt('log_retry_count', default=5, help=_('The maximum attempts to retry connecting to the ' 'logging host.')), cfg.IntOpt('log_retry_interval', default=2, help=_('The time, in seconds, to wait between retries ' 'connecting to the logging host.')), cfg.IntOpt('log_queue_size', default=10000, help=_('The queue size (messages) to buffer log messages.')), cfg.StrOpt('logging_template_override', help=_('Custom logging configuration template.')), cfg.BoolOpt('forward_all_logs', default=False, help=_('When True, the amphora will forward all of the ' 'system logs (except tenant traffic logs) to the ' 'admin log target(s). When False, ' 'only amphora specific admin logs will be forwarded.')), cfg.BoolOpt('disable_local_log_storage', default=False, help=_('When True, no logs will be written to the amphora ' 'filesystem. When False, log files will be written to ' 'the local filesystem.')), # Do not specify in octavia.conf, loaded at runtime cfg.StrOpt('amphora_id', help=_("The amphora ID.")), cfg.StrOpt('amphora_udp_driver', default='keepalived_lvs', help='The UDP API backend for amphora agent.'), ] compute_opts = [ cfg.IntOpt('max_retries', default=15, help=_('The maximum attempts to retry an action with the ' 'compute service.')), cfg.IntOpt('retry_interval', default=1, help=_('Seconds to wait before retrying an action with the ' 'compute service.')), cfg.IntOpt('retry_backoff', default=1, help=_('The seconds to backoff retry attempts.')), cfg.IntOpt('retry_max', default=10, help=_('The maximum interval in seconds between retry ' 'attempts.')), ] networking_opts = [ cfg.IntOpt('max_retries', default=15, help=_('The maximum attempts to retry an action with the ' 'networking service.')), cfg.IntOpt('retry_interval', default=1, help=_('Seconds to wait before retrying an action with the ' 'networking service.')), cfg.IntOpt('retry_backoff', default=1, help=_('The seconds to backoff retry attempts.')), cfg.IntOpt('retry_max', default=10, help=_('The maximum interval in seconds between retry ' 'attempts.')), cfg.IntOpt('port_detach_timeout', default=300, help=_('Seconds to wait for a port to detach from an ' 'amphora.')), cfg.BoolOpt('allow_vip_network_id', default=True, help=_('Can users supply a network_id for their VIP?')), cfg.BoolOpt('allow_vip_subnet_id', default=True, help=_('Can users supply a subnet_id for their VIP?')), cfg.BoolOpt('allow_vip_port_id', default=True, help=_('Can users supply a port_id for their VIP?')), cfg.ListOpt('valid_vip_networks', help=_('List of network_ids that are valid for VIP ' 'creation. If this field is empty, no validation ' 'is performed.')), cfg.ListOpt('reserved_ips', default=['169.254.169.254'], item_type=cfg.types.IPAddress(), help=_('List of IP addresses reserved from being used for ' 'member addresses. IPv6 addresses should be in ' 'expanded, uppercase form.')), cfg.BoolOpt('allow_invisible_resource_usage', default=False, help=_("When True, users can use network resources they " "cannot normally see as VIP or member subnets. Making " "this True may allow users to access resources on " "subnets they do not normally have access to via " "neutron RBAC policies.")), ] healthmanager_opts = [ cfg.IPOpt('bind_ip', default='127.0.0.1', help=_('IP address the controller will listen on for ' 'heart beats')), cfg.PortOpt('bind_port', default=5555, help=_('Port number the controller will listen on ' 'for heart beats')), cfg.IntOpt('failover_threads', default=10, help=_('Number of threads performing amphora failovers.')), # TODO(tatsuma) Remove in or after "T" release cfg.IntOpt('status_update_threads', default=None, help=_('Number of processes for amphora status update.'), deprecated_for_removal=True, deprecated_reason=_('This option is replaced as ' 'health_update_threads and ' 'stats_update_threads')), cfg.IntOpt('health_update_threads', default=None, help=_('Number of processes for amphora health update.')), cfg.IntOpt('stats_update_threads', default=None, help=_('Number of processes for amphora stats update.')), cfg.StrOpt('heartbeat_key', mutable=True, help=_('key used to validate amphora sending ' 'the message'), secret=True), cfg.IntOpt('heartbeat_timeout', default=60, help=_('Interval, in seconds, to wait before failing over an ' 'amphora.')), cfg.IntOpt('health_check_interval', default=3, help=_('Sleep time between health checks in seconds.')), cfg.IntOpt('sock_rlimit', default=0, help=_(' sets the value of the heartbeat recv buffer')), # Used by the health manager on the amphora cfg.ListOpt('controller_ip_port_list', help=_('List of controller ip and port pairs for the ' 'heartbeat receivers. Example 127.0.0.1:5555, ' '192.168.0.1:5555'), mutable=True, default=[]), cfg.IntOpt('heartbeat_interval', default=10, mutable=True, help=_('Sleep time between sending heartbeats.')), # Used for updating health and stats cfg.StrOpt('health_update_driver', default='health_db', help=_('Driver for updating amphora health system.')), cfg.StrOpt('stats_update_driver', default='stats_db', help=_('Driver for updating amphora statistics.')), ] oslo_messaging_opts = [ cfg.StrOpt('topic'), ] haproxy_amphora_opts = [ cfg.StrOpt('base_path', default='/var/lib/octavia', help=_('Base directory for amphora files.')), cfg.StrOpt('base_cert_dir', default='/var/lib/octavia/certs', help=_('Base directory for cert storage.')), cfg.StrOpt('haproxy_template', help=_('Custom haproxy template.')), cfg.BoolOpt('connection_logging', default=True, help=_('Set this to False to disable connection logging.')), cfg.IntOpt('connection_max_retries', default=120, help=_('Retry threshold for connecting to amphorae.')), cfg.IntOpt('connection_retry_interval', default=5, help=_('Retry timeout between connection attempts in ' 'seconds.')), cfg.IntOpt('active_connection_max_retries', default=15, help=_('Retry threshold for connecting to active amphorae.')), cfg.IntOpt('active_connection_rety_interval', default=2, help=_('Retry timeout between connection attempts in ' 'seconds for active amphora.')), cfg.IntOpt('failover_connection_max_retries', default=2, help=_('Retry threshold for connecting to an amphora in ' 'failover.')), cfg.IntOpt('failover_connection_retry_interval', default=5, help=_('Retry timeout between connection attempts in ' 'seconds for amphora in failover.')), cfg.IntOpt('build_rate_limit', default=-1, help=_('Number of amphorae that could be built per controller ' 'worker, simultaneously.')), cfg.IntOpt('build_active_retries', default=120, help=_('Retry threshold for waiting for a build slot for ' 'an amphorae.')), cfg.IntOpt('build_retry_interval', default=5, help=_('Retry timeout between build attempts in ' 'seconds.')), cfg.StrOpt('haproxy_stick_size', default='10k', help=_('Size of the HAProxy stick table. Accepts k, m, g ' 'suffixes. Example: 10k')), cfg.StrOpt('user_log_format', default='{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST ' '%B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt ' '%tsc', help=_('Log format string for user flow logging.')), # REST server cfg.IPOpt('bind_host', default='::', # nosec help=_("The host IP to bind to")), cfg.PortOpt('bind_port', default=9443, help=_("The port to bind to")), cfg.StrOpt('lb_network_interface', default='o-hm0', help=_('Network interface through which to reach amphora, only ' 'required if using IPv6 link local addresses.')), cfg.StrOpt('haproxy_cmd', default='/usr/sbin/haproxy', help=_("The full path to haproxy")), cfg.IntOpt('respawn_count', default=2, help=_("The respawn count for haproxy's upstart script")), cfg.IntOpt('respawn_interval', default=2, help=_("The respawn interval for haproxy's upstart script")), cfg.FloatOpt('rest_request_conn_timeout', default=10, help=_("The time in seconds to wait for a REST API " "to connect.")), cfg.FloatOpt('rest_request_read_timeout', default=60, help=_("The time in seconds to wait for a REST API " "response.")), cfg.IntOpt('timeout_client_data', default=constants.DEFAULT_TIMEOUT_CLIENT_DATA, help=_('Frontend client inactivity timeout.')), cfg.IntOpt('timeout_member_connect', default=constants.DEFAULT_TIMEOUT_MEMBER_CONNECT, help=_('Backend member connection timeout.')), cfg.IntOpt('timeout_member_data', default=constants.DEFAULT_TIMEOUT_MEMBER_DATA, help=_('Backend member inactivity timeout.')), cfg.IntOpt('timeout_tcp_inspect', default=constants.DEFAULT_TIMEOUT_TCP_INSPECT, help=_('Time to wait for TCP packets for content inspection.')), # REST client cfg.StrOpt('client_cert', default='/etc/octavia/certs/client.pem', help=_("The client certificate to talk to the agent")), cfg.StrOpt('server_ca', default='/etc/octavia/certs/server_ca.pem', help=_("The ca which signed the server certificates")), cfg.BoolOpt('use_upstart', default=True, deprecated_for_removal=True, deprecated_reason='This is now automatically discovered ' ' and configured.', help=_("If False, use sysvinit.")), cfg.IntOpt('api_db_commit_retry_attempts', default=15, help=_('The number of times the database action will be ' 'attempted.')), cfg.IntOpt('api_db_commit_retry_initial_delay', default=1, help=_('The initial delay before a retry attempt.')), cfg.IntOpt('api_db_commit_retry_backoff', default=1, help=_('The time to backoff retry attempts.')), cfg.IntOpt('api_db_commit_retry_max', default=5, help=_('The maximum amount of time to wait between retry ' 'attempts.')), cfg.IntOpt('default_connection_limit', default=constants.HAPROXY_DEFAULT_MAXCONN, help=_('Default connection_limit for listeners, used when ' 'setting "-1" or when unsetting connection_limit with ' 'the listener API.')), ] controller_worker_opts = [ cfg.IntOpt('workers', default=1, min=1, help='Number of workers for the controller-worker service.'), cfg.IntOpt('amp_active_retries', default=30, help=_('Retry attempts to wait for Amphora to become active')), cfg.IntOpt('amp_active_wait_sec', default=10, help=_('Seconds to wait between checks on whether an Amphora ' 'has become active')), cfg.StrOpt('amp_flavor_id', default='', help=_('Nova instance flavor id for the Amphora')), cfg.StrOpt('amp_image_tag', default='', help=_('Glance image tag for the Amphora image to boot. ' 'Use this option to be able to update the image ' 'without reconfiguring Octavia. ' 'Ignored if amp_image_id is defined.')), cfg.StrOpt('amp_image_id', default='', deprecated_for_removal=True, deprecated_reason='Superseded by amp_image_tag option.', help=_('Glance image id for the Amphora image to boot')), cfg.StrOpt('amp_image_owner_id', default='', help=_('Restrict glance image selection to a specific ' 'owner ID. This is a recommended security setting.')), cfg.StrOpt('amp_ssh_key_name', default='', help=_('Optional SSH keypair name, in nova, that will be used ' 'for the authorized_keys inside the amphora.')), cfg.BoolOpt('amp_ssh_access_allowed', default=True, deprecated_for_removal=True, deprecated_reason='This option and amp_ssh_key_name overlap ' 'in functionality, and only one is needed. ' 'SSH access can be enabled/disabled simply ' 'by setting amp_ssh_key_name, or not.', help=_('Determines whether or not to allow access ' 'to the Amphorae')), cfg.ListOpt('amp_boot_network_list', default='', help=_('List of networks to attach to the Amphorae. ' 'All networks defined in the list will ' 'be attached to each amphora.')), cfg.ListOpt('amp_secgroup_list', default='', help=_('List of security groups to attach to the Amphora.')), cfg.StrOpt('client_ca', default='/etc/octavia/certs/ca_01.pem', help=_('Client CA for the amphora agent to use')), cfg.StrOpt('amphora_driver', default='amphora_noop_driver', help=_('Name of the amphora driver to use')), cfg.StrOpt('compute_driver', default='compute_noop_driver', help=_('Name of the compute driver to use')), cfg.StrOpt('network_driver', default='network_noop_driver', help=_('Name of the network driver to use')), cfg.StrOpt('volume_driver', default=constants.VOLUME_NOOP_DRIVER, choices=constants.SUPPORTED_VOLUME_DRIVERS, help=_('Name of the volume driver to use')), cfg.StrOpt('distributor_driver', default='distributor_noop_driver', help=_('Name of the distributor driver to use')), cfg.StrOpt('loadbalancer_topology', default=constants.TOPOLOGY_SINGLE, choices=constants.SUPPORTED_LB_TOPOLOGIES, mutable=True, help=_('Load balancer topology configuration. ' 'SINGLE - One amphora per load balancer. ' 'ACTIVE_STANDBY - Two amphora per load balancer.')), cfg.BoolOpt('user_data_config_drive', default=False, help=_('If True, build cloud-init user-data that is passed ' 'to the config drive on Amphora boot instead of ' 'personality files. If False, utilize personality ' 'files.')), cfg.IntOpt('amphora_delete_retries', default=5, help=_('Number of times an amphora delete should be retried.')), cfg.IntOpt('amphora_delete_retry_interval', default=5, help=_('Time, in seconds, between amphora delete retries.')), ] task_flow_opts = [ cfg.StrOpt('engine', default='parallel', choices=constants.SUPPORTED_TASKFLOW_ENGINE_TYPES, help=_('TaskFlow engine to use. ' 'serial - Runs all tasks on a single thread. ' 'parallel - Schedules tasks onto different threads to ' 'allow for running non-dependent tasks simultaneously')), cfg.IntOpt('max_workers', default=5, help=_('The maximum number of workers')), cfg.BoolOpt('disable_revert', default=False, help=_('If True, disables the controller worker taskflow ' 'flows from reverting. This will leave resources in ' 'an inconsistent state and should only be used for ' 'debugging purposes.')), cfg.StrOpt('persistence_connection', default='sqlite://', help='Persistence database, which will be used to store tasks ' 'states. Database connection url with db name'), cfg.StrOpt('jobboard_backend_driver', default='redis_taskflow_driver', choices=['redis_taskflow_driver', 'zookeeper_taskflow_driver'], help='Jobboard backend driver that will monitor job state.'), cfg.ListOpt('jobboard_backend_hosts', default=['127.0.0.1'], help='Jobboard backend server host(s).'), cfg.PortOpt('jobboard_backend_port', default=6379, help='Jobboard backend server port'), cfg.StrOpt('jobboard_backend_password', default='', secret=True, help='Jobboard backend server password'), cfg.StrOpt('jobboard_backend_namespace', default='octavia_jobboard', help='Jobboard name that should be used to store taskflow ' 'job id and claims for it.'), cfg.DictOpt('jobboard_redis_backend_ssl_options', help='Redis jobboard backend ssl configuration options.', default={'ssl': False, 'ssl_keyfile': None, 'ssl_certfile': None, 'ssl_ca_certs': None, 'ssl_cert_reqs': 'required'}), cfg.DictOpt('jobboard_zookeeper_ssl_options', help='Zookeeper jobboard backend ssl configuration options.', default={'use_ssl': False, 'keyfile': None, 'keyfile_password': None, 'certfile': None, 'verify_certs': True}), cfg.IntOpt('jobboard_expiration_time', default=30, help='For backends like redis claiming jobs requiring setting ' 'the expiry - how many seconds the claim should be ' 'retained for.'), cfg.BoolOpt('jobboard_save_logbook', default=False, help='If for analysis required saving logbooks info, set this ' 'parameter to True. By default remove logbook from ' 'persistence backend when job completed.'), ] core_cli_opts = [] certificate_opts = [ cfg.StrOpt('cert_manager', default='barbican_cert_manager', help='Name of the cert manager to use'), cfg.StrOpt('cert_generator', default='local_cert_generator', help='Name of the cert generator to use'), cfg.StrOpt('barbican_auth', default='barbican_acl_auth', help='Name of the Barbican authentication method to use'), cfg.StrOpt('service_name', help=_('The name of the certificate service in the keystone ' 'catalog')), cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' 'in the keystone catalog.')), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the barbican service.'), cfg.StrOpt('endpoint_type', default='publicURL', help='The endpoint_type to be used for barbican service.'), cfg.StrOpt('ca_certificates_file', help=_('CA certificates file path for the key manager service ' '(such as Barbican).')), cfg.BoolOpt('insecure', default=False, help=_('Disable certificate validation on SSL connections ')), ] house_keeping_opts = [ cfg.IntOpt('spare_check_interval', default=30, help=_('Spare check interval in seconds')), cfg.IntOpt('spare_amphora_pool_size', default=0, help=_('Number of spare amphorae')), cfg.IntOpt('cleanup_interval', default=30, help=_('DB cleanup interval in seconds')), cfg.IntOpt('amphora_expiry_age', default=604800, help=_('Amphora expiry age in seconds')), cfg.IntOpt('load_balancer_expiry_age', default=604800, help=_('Load balancer expiry age in seconds')), cfg.IntOpt('cert_interval', default=3600, help=_('Certificate check interval in seconds')), # 14 days for cert expiry buffer cfg.IntOpt('cert_expiry_buffer', default=1209600, help=_('Seconds until certificate expiration')), cfg.IntOpt('cert_rotate_threads', default=10, help=_('Number of threads performing amphora certificate' ' rotation')) ] keepalived_vrrp_opts = [ cfg.IntOpt('vrrp_advert_int', default=1, help=_('Amphora role and priority advertisement interval ' 'in seconds.')), cfg.IntOpt('vrrp_check_interval', default=5, help=_('VRRP health check script run interval in seconds.')), cfg.IntOpt('vrrp_fail_count', default=2, help=_('Number of successive failures before transition to a ' 'fail state.')), cfg.IntOpt('vrrp_success_count', default=2, help=_('Number of consecutive successes before transition to a ' 'success state.')), cfg.IntOpt('vrrp_garp_refresh_interval', default=5, help=_('Time in seconds between gratuitous ARP announcements ' 'from the MASTER.')), cfg.IntOpt('vrrp_garp_refresh_count', default=2, help=_('Number of gratuitous ARP announcements to make on ' 'each refresh interval.')) ] nova_opts = [ cfg.StrOpt('service_name', help=_('The name of the nova service in the keystone catalog')), cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' 'in the keystone catalog.')), cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack services.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Endpoint interface in identity service to use')), cfg.StrOpt('ca_certificates_file', help=_('CA certificates file path')), cfg.BoolOpt('insecure', default=False, help=_('Disable certificate validation on SSL connections')), cfg.BoolOpt('enable_anti_affinity', default=False, help=_('Flag to indicate if nova anti-affinity feature is ' 'turned on. This option is only used when creating ' 'amphorae in ACTIVE_STANDBY topology.')), cfg.StrOpt('anti_affinity_policy', default=constants.ANTI_AFFINITY, choices=[constants.ANTI_AFFINITY, constants.SOFT_ANTI_AFFINITY], help=_('Sets the anti-affinity policy for nova')), cfg.IntOpt('random_amphora_name_length', default=0, help=_('If non-zero, generate a random name of the length ' 'provided for each amphora, in the format "a[A-Z0-9]*". ' 'Otherwise, the default name format will be used: ' '"amphora-{UUID}".')), cfg.StrOpt('availability_zone', default=None, help=_('Availability zone to use for creating Amphorae')), ] cinder_opts = [ cfg.StrOpt('service_name', help=_('The name of the cinder service in the keystone ' 'catalog')), cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' 'in the keystone catalog.')), cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack services.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Endpoint interface in identity service to use')), cfg.StrOpt('ca_certificates_file', help=_('CA certificates file path')), cfg.StrOpt('availability_zone', default=None, help=_('Availability zone to use for creating Volume')), cfg.BoolOpt('insecure', default=False, help=_('Disable certificate validation on SSL connections')), cfg.IntOpt('volume_size', default=16, help=_('Size of volume, in GB, for Amphora instance')), cfg.StrOpt('volume_type', default=None, help=_('Type of volume for Amphorae volume root disk')), cfg.IntOpt('volume_create_retry_interval', default=5, help=_('Interval time to wait volume is created in available ' 'state')), cfg.IntOpt('volume_create_timeout', default=300, help=_('Timeout to wait for volume creation success')), cfg.IntOpt('volume_create_max_retries', default=5, help=_('Maximum number of retries to create volume')) ] neutron_opts = [ cfg.StrOpt('service_name', help=_('The name of the neutron service in the ' 'keystone catalog')), cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' 'in the keystone catalog.')), cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack services.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Endpoint interface in identity service to use')), cfg.StrOpt('ca_certificates_file', help=_('CA certificates file path')), cfg.BoolOpt('insecure', default=False, help=_('Disable certificate validation on SSL connections ')), ] glance_opts = [ cfg.StrOpt('service_name', help=_('The name of the glance service in the ' 'keystone catalog')), cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' 'in the keystone catalog.')), cfg.StrOpt('region_name', help=_('Region in Identity service catalog to use for ' 'communication with the OpenStack services.')), cfg.StrOpt('endpoint_type', default='publicURL', help=_('Endpoint interface in identity service to use')), cfg.StrOpt('ca_certificates_file', help=_('CA certificates file path')), cfg.BoolOpt('insecure', default=False, help=_('Disable certificate validation on SSL connections ')), ] quota_opts = [ cfg.IntOpt('default_load_balancer_quota', default=constants.QUOTA_UNLIMITED, help=_('Default per project load balancer quota.')), cfg.IntOpt('default_listener_quota', default=constants.QUOTA_UNLIMITED, help=_('Default per project listener quota.')), cfg.IntOpt('default_member_quota', default=constants.QUOTA_UNLIMITED, help=_('Default per project member quota.')), cfg.IntOpt('default_pool_quota', default=constants.QUOTA_UNLIMITED, help=_('Default per project pool quota.')), cfg.IntOpt('default_health_monitor_quota', default=constants.QUOTA_UNLIMITED, help=_('Default per project health monitor quota.')), ] audit_opts = [ cfg.BoolOpt('enabled', default=False, help=_('Enable auditing of API requests')), cfg.StrOpt('audit_map_file', default='/etc/octavia/octavia_api_audit_map.conf', help=_('Path to audit map file for octavia-api service. ' 'Used only when API audit is enabled.')), cfg.StrOpt('ignore_req_list', default='', help=_('Comma separated list of REST API HTTP methods to be ' 'ignored during audit. For example: auditing will not ' 'be done on any GET or POST requests if this is set to ' '"GET,POST". It is used only when API audit is ' 'enabled.')), ] driver_agent_opts = [ cfg.StrOpt('status_socket_path', default='/var/run/octavia/status.sock', help=_('Path to the driver status unix domain socket file.')), cfg.StrOpt('stats_socket_path', default='/var/run/octavia/stats.sock', help=_('Path to the driver statistics unix domain socket ' 'file.')), cfg.StrOpt('get_socket_path', default='/var/run/octavia/get.sock', help=_('Path to the driver get unix domain socket file.')), cfg.IntOpt('status_request_timeout', default=5, help=_('Time, in seconds, to wait for a status update ' 'request.')), cfg.IntOpt('status_max_processes', default=50, help=_('Maximum number of concurrent processes to use ' 'servicing status updates.')), cfg.IntOpt('stats_request_timeout', default=5, help=_('Time, in seconds, to wait for a statistics update ' 'request.')), cfg.IntOpt('stats_max_processes', default=50, help=_('Maximum number of concurrent processes to use ' 'servicing statistics updates.')), cfg.IntOpt('get_request_timeout', default=5, help=_('Time, in seconds, to wait for a get request.')), cfg.IntOpt('get_max_processes', default=50, help=_('Maximum number of concurrent processes to use ' 'servicing get requests.')), cfg.FloatOpt('max_process_warning_percent', default=0.75, min=0.01, max=0.99, help=_('Percentage of max_processes (both status and stats) ' 'in use to start logging warning messages about an ' 'overloaded driver-agent.')), cfg.IntOpt('provider_agent_shutdown_timeout', default=60, help=_('The time, in seconds, to wait for provider agents ' 'to shutdown after the exit event has been set.')), cfg.ListOpt('enabled_provider_agents', default='', help=_('List of enabled provider agents. The driver-agent ' 'will launch these agents at startup.')) ] # Register the configuration options cfg.CONF.register_opts(core_opts) cfg.CONF.register_opts(api_opts, group='api_settings') cfg.CONF.register_opts(amphora_agent_opts, group='amphora_agent') cfg.CONF.register_opts(compute_opts, group='compute') cfg.CONF.register_opts(networking_opts, group='networking') cfg.CONF.register_opts(oslo_messaging_opts, group='oslo_messaging') cfg.CONF.register_opts(haproxy_amphora_opts, group='haproxy_amphora') cfg.CONF.register_opts(controller_worker_opts, group='controller_worker') cfg.CONF.register_opts(keepalived_vrrp_opts, group='keepalived_vrrp') cfg.CONF.register_opts(task_flow_opts, group='task_flow') cfg.CONF.register_opts(house_keeping_opts, group='house_keeping') cfg.CONF.register_opts(certificate_opts, group='certificates') cfg.CONF.register_opts(healthmanager_opts, group='health_manager') cfg.CONF.register_opts(nova_opts, group='nova') cfg.CONF.register_opts(cinder_opts, group='cinder') cfg.CONF.register_opts(glance_opts, group='glance') cfg.CONF.register_opts(neutron_opts, group='neutron') cfg.CONF.register_opts(quota_opts, group='quotas') cfg.CONF.register_opts(audit_opts, group='audit') cfg.CONF.register_opts(driver_agent_opts, group='driver_agent') cfg.CONF.register_opts(local.certgen_opts, group='certificates') cfg.CONF.register_opts(local.certmgr_opts, group='certificates') # Ensure that the control exchange is set correctly messaging.set_transport_defaults(control_exchange='octavia') _SQL_CONNECTION_DEFAULT = 'sqlite://' # Update the default QueuePool parameters. These can be tweaked by the # configuration variables - max_pool_size, max_overflow and pool_timeout db_options.set_defaults(cfg.CONF, connection=_SQL_CONNECTION_DEFAULT, max_pool_size=10, max_overflow=20, pool_timeout=10) ks_loading.register_auth_conf_options(cfg.CONF, constants.SERVICE_AUTH) ks_loading.register_session_conf_options(cfg.CONF, constants.SERVICE_AUTH) def register_cli_opts(): cfg.CONF.register_cli_opts(core_cli_opts) logging.register_options(cfg.CONF) def init(args, **kwargs): register_cli_opts() cfg.CONF(args=args, project='octavia', version='%%prog %s' % version.version_info.release_string(), **kwargs) handle_deprecation_compatibility() setup_remote_debugger() def setup_logging(conf): """Sets up the logging options for a log with supplied name. :param conf: a cfg.ConfOpts object """ logging.set_defaults(default_log_levels=logging.get_default_log_levels() + EXTRA_LOG_LEVEL_DEFAULTS) product_name = "octavia" logging.setup(conf, product_name) LOG.info("Logging enabled!") LOG.info("%(prog)s version %(version)s", {'prog': sys.argv[0], 'version': version.version_info.release_string()}) LOG.debug("command line: %s", " ".join(sys.argv)) # Use cfg.CONF.set_default to override the new configuration setting # default value. This allows a value set, at the new location, to override # a value set in the previous location while allowing settings that have # not yet been moved to be utilized. def handle_deprecation_compatibility(): # TODO(tatsuma) Remove in or after "T" release if cfg.CONF.health_manager.status_update_threads is not None: cfg.CONF.set_default('health_update_threads', cfg.CONF.health_manager.status_update_threads, group='health_manager') cfg.CONF.set_default('stats_update_threads', cfg.CONF.health_manager.status_update_threads, group='health_manager') def _enable_pydev(debugger_host, debugger_port): try: from pydev import pydevd # pylint: disable=import-outside-toplevel except ImportError: import pydevd # pylint: disable=import-outside-toplevel pydevd.settrace(debugger_host, port=int(debugger_port), stdoutToServer=True, stderrToServer=True) def _enable_ptvsd(debuggger_host, debugger_port): import ptvsd # pylint: disable=import-outside-toplevel # Allow other computers to attach to ptvsd at this IP address and port. ptvsd.enable_attach(address=(debuggger_host, debugger_port), redirect_output=True) # Pause the program until a remote debugger is attached ptvsd.wait_for_attach() def setup_remote_debugger(): """Required setup for remote debugging.""" debugger_type = os.environ.get('DEBUGGER_TYPE', 'pydev') debugger_host = os.environ.get('DEBUGGER_HOST') debugger_port = os.environ.get('DEBUGGER_PORT') if not debugger_type or not debugger_host or not debugger_port: return try: LOG.warning("Connecting to remote debugger. Once connected, resume " "the program on the debugger to continue with the " "initialization of the service.") if debugger_type == 'pydev': _enable_pydev(debugger_host, debugger_port) elif debugger_type == 'ptvsd': _enable_ptvsd(debugger_host, debugger_port) else: LOG.exception('Debugger %(debugger)s is not supported', debugger_type) except Exception: LOG.exception('Unable to join debugger, please make sure that the ' 'debugger processes is listening on debug-host ' '\'%(debug-host)s\' debug-port \'%(debug-port)s\'.', {'debug-host': debugger_host, 'debug-port': debugger_port}) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/constants.py0000664000175000017500000007327400000000000020513 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.common import constants as lib_consts ############################################################################## # Constants common to the provider drivers moved to # octavia_lib.common.constants # These are deprecated, to be removed in the 'U' release ############################################################################## # 'loadbalancers' LOADBALANCERS = lib_consts.LOADBALANCERS # 'listeners' LISTENERS = lib_consts.LISTENERS # 'pools' POOLS = lib_consts.POOLS # HEALTHMONITORS = 'healthmonitors' HEALTHMONITORS = lib_consts.HEALTHMONITORS # 'members' MEMBERS = lib_consts.MEMBERS # 'l7policies' L7POLICIES = lib_consts.L7POLICIES # 'l7rules' L7RULES = lib_consts.L7RULES # 'PING' HEALTH_MONITOR_PING = lib_consts.HEALTH_MONITOR_PING # 'TCP' HEALTH_MONITOR_TCP = lib_consts.HEALTH_MONITOR_TCP # 'HTTP' HEALTH_MONITOR_HTTP = lib_consts.HEALTH_MONITOR_HTTP # 'HTTPS' HEALTH_MONITOR_HTTPS = lib_consts.HEALTH_MONITOR_HTTPS # 'TLS-HELLO' HEALTH_MONITOR_TLS_HELLO = lib_consts.HEALTH_MONITOR_TLS_HELLO # 'UDP-CONNECT' HEALTH_MONITOR_UDP_CONNECT = lib_consts.HEALTH_MONITOR_UDP_CONNECT SUPPORTED_HEALTH_MONITOR_TYPES = lib_consts.SUPPORTED_HEALTH_MONITOR_TYPES # 'GET' HEALTH_MONITOR_HTTP_METHOD_GET = lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET # 'HEAD' HEALTH_MONITOR_HTTP_METHOD_HEAD = lib_consts.HEALTH_MONITOR_HTTP_METHOD_HEAD # 'POST' HEALTH_MONITOR_HTTP_METHOD_POST = lib_consts.HEALTH_MONITOR_HTTP_METHOD_POST # 'PUT' HEALTH_MONITOR_HTTP_METHOD_PUT = lib_consts.HEALTH_MONITOR_HTTP_METHOD_PUT # 'DELETE' HEALTH_MONITOR_HTTP_METHOD_DELETE = ( lib_consts.HEALTH_MONITOR_HTTP_METHOD_DELETE) # 'TRACE' HEALTH_MONITOR_HTTP_METHOD_TRACE = lib_consts.HEALTH_MONITOR_HTTP_METHOD_TRACE # 'OPTIONS' HEALTH_MONITOR_HTTP_METHOD_OPTIONS = ( lib_consts.HEALTH_MONITOR_HTTP_METHOD_OPTIONS) # 'CONNECT' HEALTH_MONITOR_HTTP_METHOD_CONNECT = ( lib_consts.HEALTH_MONITOR_HTTP_METHOD_CONNECT) # 'PATCH' HEALTH_MONITOR_HTTP_METHOD_PATCH = lib_consts.HEALTH_MONITOR_HTTP_METHOD_PATCH SUPPORTED_HEALTH_MONITOR_HTTP_METHODS = ( lib_consts.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS) # 'REJECT' L7POLICY_ACTION_REJECT = lib_consts.L7POLICY_ACTION_REJECT # 'REDIRECT_TO_URL' L7POLICY_ACTION_REDIRECT_TO_URL = lib_consts.L7POLICY_ACTION_REDIRECT_TO_URL # 'REDIRECT_TO_POOL' L7POLICY_ACTION_REDIRECT_TO_POOL = lib_consts.L7POLICY_ACTION_REDIRECT_TO_POOL # 'REDIRECT_PREFIX' L7POLICY_ACTION_REDIRECT_PREFIX = lib_consts.L7POLICY_ACTION_REDIRECT_PREFIX SUPPORTED_L7POLICY_ACTIONS = lib_consts.SUPPORTED_L7POLICY_ACTIONS # 'REGEX' L7RULE_COMPARE_TYPE_REGEX = lib_consts.L7RULE_COMPARE_TYPE_REGEX # 'STARTS_WITH' L7RULE_COMPARE_TYPE_STARTS_WITH = lib_consts.L7RULE_COMPARE_TYPE_STARTS_WITH # 'ENDS_WITH' L7RULE_COMPARE_TYPE_ENDS_WITH = lib_consts.L7RULE_COMPARE_TYPE_ENDS_WITH # 'CONTAINS' L7RULE_COMPARE_TYPE_CONTAINS = lib_consts.L7RULE_COMPARE_TYPE_CONTAINS # 'EQUAL_TO' L7RULE_COMPARE_TYPE_EQUAL_TO = lib_consts.L7RULE_COMPARE_TYPE_EQUAL_TO SUPPORTED_L7RULE_COMPARE_TYPES = lib_consts.SUPPORTED_L7RULE_COMPARE_TYPES # 'HOST_NAME' L7RULE_TYPE_HOST_NAME = lib_consts.L7RULE_TYPE_HOST_NAME # 'PATH' L7RULE_TYPE_PATH = lib_consts.L7RULE_TYPE_PATH # 'FILE_TYPE' L7RULE_TYPE_FILE_TYPE = lib_consts.L7RULE_TYPE_FILE_TYPE # 'HEADER' L7RULE_TYPE_HEADER = lib_consts.L7RULE_TYPE_HEADER # 'COOKIE' L7RULE_TYPE_COOKIE = lib_consts.L7RULE_TYPE_COOKIE # 'SSL_CONN_HAS_CERT' L7RULE_TYPE_SSL_CONN_HAS_CERT = lib_consts.L7RULE_TYPE_SSL_CONN_HAS_CERT # 'SSL_VERIFY_RESULT' L7RULE_TYPE_SSL_VERIFY_RESULT = lib_consts.L7RULE_TYPE_SSL_VERIFY_RESULT # 'SSL_DN_FIELD' L7RULE_TYPE_SSL_DN_FIELD = lib_consts.L7RULE_TYPE_SSL_DN_FIELD SUPPORTED_L7RULE_TYPES = lib_consts.SUPPORTED_L7RULE_TYPES # 'ROUND_ROBIN' LB_ALGORITHM_ROUND_ROBIN = lib_consts.LB_ALGORITHM_ROUND_ROBIN # 'LEAST_CONNECTIONS' LB_ALGORITHM_LEAST_CONNECTIONS = lib_consts.LB_ALGORITHM_LEAST_CONNECTIONS # 'SOURCE_IP' LB_ALGORITHM_SOURCE_IP = lib_consts.LB_ALGORITHM_SOURCE_IP SUPPORTED_LB_ALGORITHMS = lib_consts.SUPPORTED_LB_ALGORITHMS # 'operating_status' OPERATING_STATUS = lib_consts.OPERATING_STATUS # 'ONLINE' ONLINE = lib_consts.ONLINE # 'OFFLINE' OFFLINE = lib_consts.OFFLINE # 'DEGRADED' DEGRADED = lib_consts.DEGRADED # 'ERROR' ERROR = lib_consts.ERROR # 'DRAINING' DRAINING = lib_consts.DRAINING # 'NO_MONITOR' NO_MONITOR = lib_consts.NO_MONITOR # 'operating_status' SUPPORTED_OPERATING_STATUSES = lib_consts.SUPPORTED_OPERATING_STATUSES # 'TCP' PROTOCOL_TCP = lib_consts.PROTOCOL_TCP # 'UDP' PROTOCOL_UDP = lib_consts.PROTOCOL_UDP # 'HTTP' PROTOCOL_HTTP = lib_consts.PROTOCOL_HTTP # 'HTTPS' PROTOCOL_HTTPS = lib_consts.PROTOCOL_HTTPS # 'TERMINATED_HTTPS' PROTOCOL_TERMINATED_HTTPS = lib_consts.PROTOCOL_TERMINATED_HTTPS # 'PROXY' PROTOCOL_PROXY = lib_consts.PROTOCOL_PROXY SUPPORTED_PROTOCOLS = lib_consts.SUPPORTED_PROTOCOLS # 'provisioning_status' PROVISIONING_STATUS = lib_consts.PROVISIONING_STATUS # Amphora has been allocated to a load balancer 'ALLOCATED' AMPHORA_ALLOCATED = lib_consts.AMPHORA_ALLOCATED # Amphora is being built 'BOOTING' AMPHORA_BOOTING = lib_consts.AMPHORA_BOOTING # Amphora is ready to be allocated to a load balancer 'READY' AMPHORA_READY = lib_consts.AMPHORA_READY # 'ACTIVE' ACTIVE = lib_consts.ACTIVE # 'PENDING_DELETE' PENDING_DELETE = lib_consts.PENDING_DELETE # 'PENDING_UPDATE' PENDING_UPDATE = lib_consts.PENDING_UPDATE # 'PENDING_CREATE' PENDING_CREATE = lib_consts.PENDING_CREATE # 'DELETED' DELETED = lib_consts.DELETED SUPPORTED_PROVISIONING_STATUSES = lib_consts.SUPPORTED_PROVISIONING_STATUSES # 'SOURCE_IP' SESSION_PERSISTENCE_SOURCE_IP = lib_consts.SESSION_PERSISTENCE_SOURCE_IP # 'HTTP_COOKIE' SESSION_PERSISTENCE_HTTP_COOKIE = lib_consts.SESSION_PERSISTENCE_HTTP_COOKIE # 'APP_COOKIE' SESSION_PERSISTENCE_APP_COOKIE = lib_consts.SESSION_PERSISTENCE_APP_COOKIE SUPPORTED_SP_TYPES = lib_consts.SUPPORTED_SP_TYPES # List of HTTP headers which are supported for insertion SUPPORTED_HTTP_HEADERS = lib_consts.SUPPORTED_HTTP_HEADERS # List of SSL headers for client certificate SUPPORTED_SSL_HEADERS = lib_consts.SUPPORTED_SSL_HEADERS ############################################################################### HEALTH_MONITOR_DEFAULT_EXPECTED_CODES = '200' HEALTH_MONITOR_HTTP_DEFAULT_METHOD = lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET HEALTH_MONITOR_DEFAULT_URL_PATH = '/' TYPE = 'type' URL_PATH = 'url_path' HTTP_METHOD = 'http_method' HTTP_VERSION = 'http_version' EXPECTED_CODES = 'expected_codes' DELAY = 'delay' TIMEOUT = 'timeout' MAX_RETRIES = 'max_retries' MAX_RETRIES_DOWN = 'max_retries_down' RISE_THRESHOLD = 'rise_threshold' DOMAIN_NAME = 'domain_name' UPDATE_STATS = 'UPDATE_STATS' UPDATE_HEALTH = 'UPDATE_HEALTH' VALID_LISTENER_POOL_PROTOCOL_MAP = { PROTOCOL_TCP: [PROTOCOL_HTTP, PROTOCOL_HTTPS, PROTOCOL_PROXY, PROTOCOL_TCP], PROTOCOL_HTTP: [PROTOCOL_HTTP, PROTOCOL_PROXY], PROTOCOL_HTTPS: [PROTOCOL_HTTPS, PROTOCOL_PROXY, PROTOCOL_TCP], PROTOCOL_TERMINATED_HTTPS: [PROTOCOL_HTTP, PROTOCOL_PROXY], PROTOCOL_UDP: [PROTOCOL_UDP]} # API Integer Ranges MIN_PORT_NUMBER = 1 MAX_PORT_NUMBER = 65535 DEFAULT_CONNECTION_LIMIT = -1 MIN_CONNECTION_LIMIT = -1 DEFAULT_WEIGHT = 1 MIN_WEIGHT = 0 MAX_WEIGHT = 256 DEFAULT_MAX_RETRIES_DOWN = 3 MIN_HM_RETRIES = 1 MAX_HM_RETRIES = 10 # 24 days: days d h m ms MAX_TIMEOUT = 24 * 24 * 60 * 60 * 1000 MIN_TIMEOUT = 0 DEFAULT_TIMEOUT_CLIENT_DATA = 50000 DEFAULT_TIMEOUT_MEMBER_CONNECT = 5000 DEFAULT_TIMEOUT_MEMBER_DATA = 50000 DEFAULT_TIMEOUT_TCP_INSPECT = 0 MUTABLE_STATUSES = (lib_consts.ACTIVE,) DELETABLE_STATUSES = (lib_consts.ACTIVE, lib_consts.ERROR) FAILOVERABLE_STATUSES = (lib_consts.ACTIVE, lib_consts.ERROR) # Note: The database Amphora table has a foreign key constraint against # the provisioning_status table SUPPORTED_AMPHORA_STATUSES = ( lib_consts.AMPHORA_ALLOCATED, lib_consts.AMPHORA_BOOTING, lib_consts.ERROR, lib_consts.AMPHORA_READY, lib_consts.DELETED, lib_consts.PENDING_CREATE, lib_consts.PENDING_DELETE) AMPHORA_VM = 'VM' SUPPORTED_AMPHORA_TYPES = (AMPHORA_VM,) DISTINGUISHED_NAME_FIELD_REGEX = lib_consts.DISTINGUISHED_NAME_FIELD_REGEX # For redirect, only codes 301, 302, 303, 307 and 308 are # supported. SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES = [301, 302, 303, 307, 308] SUPPORTED_HTTP_VERSIONS = [1.0, 1.1] MIN_POLICY_POSITION = 1 # Largest a 32-bit integer can be, which is a limitation # here if you're using MySQL, as most probably are. This just needs # to be larger than any existing rule position numbers which will # definitely be the case with 2147483647 MAX_POLICY_POSITION = 2147483647 # Testing showed haproxy config failed to parse after more than # 53 rules per policy MAX_L7RULES_PER_L7POLICY = 50 # See RFCs 2616, 2965, 6265, 7230: Should match characters valid in a # http header or cookie name. HTTP_HEADER_NAME_REGEX = r'\A[a-zA-Z0-9!#$%&\'*+-.^_`|~]+\Z' # See RFCs 2616, 2965, 6265: Should match characters valid in a cookie value. HTTP_COOKIE_VALUE_REGEX = r'\A[a-zA-Z0-9!#$%&\'()*+-./:<=>?@[\]^_`{|}~]+\Z' # See RFC 7230: Should match characters valid in a header value. HTTP_HEADER_VALUE_REGEX = (r'\A[a-zA-Z0-9' r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]+\Z') # Also in RFC 7230: Should match characters valid in a header value # when quoted with double quotes. HTTP_QUOTED_HEADER_VALUE_REGEX = (r'\A"[a-zA-Z0-9 \t' r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]*"\Z') DOMAIN_NAME_REGEX = ( r'^(?=.{1,253}\.?$)(?:(?!-|[^.]+_)[A-Za-z0-9-_]{1,63}(? -1: connection_limit_sum += listener.connection_limit else: connection_limit_sum += ( CONF.haproxy_amphora.default_connection_limit) # If there's a limit between 0 and MAX, set it, otherwise just set MAX if 0 < connection_limit_sum < constants.HAPROXY_MAX_MAXCONN: ret_value['global_connection_limit'] = connection_limit_sum else: ret_value['global_connection_limit'] = ( constants.HAPROXY_MAX_MAXCONN) return ret_value def _transform_amphora(self, amphora, feature_compatibility): """Transform an amphora into an object that will be processed by the templating system. """ return { 'id': amphora.id, 'lb_network_ip': amphora.lb_network_ip, 'vrrp_ip': amphora.vrrp_ip, 'ha_ip': amphora.ha_ip, 'vrrp_port_id': amphora.vrrp_port_id, 'ha_port_id': amphora.ha_port_id, 'role': amphora.role, 'status': amphora.status, 'vrrp_interface': amphora.vrrp_interface, 'vrrp_priority': amphora.vrrp_priority } def _transform_listener(self, listener, tls_certs, feature_compatibility, loadbalancer): """Transforms a listener into an object that will be processed by the templating system """ ret_value = { 'id': listener.id, 'protocol_port': listener.protocol_port, 'protocol_mode': PROTOCOL_MAP[listener.protocol], 'protocol': listener.protocol, 'insert_headers': listener.insert_headers, 'enabled': listener.enabled, 'timeout_client_data': ( listener.timeout_client_data or CONF.haproxy_amphora.timeout_client_data), 'timeout_member_connect': ( listener.timeout_member_connect or CONF.haproxy_amphora.timeout_member_connect), 'timeout_member_data': ( listener.timeout_member_data or CONF.haproxy_amphora.timeout_member_data), 'timeout_tcp_inspect': (listener.timeout_tcp_inspect or CONF.haproxy_amphora.timeout_tcp_inspect), } if self.connection_logging: ret_value['user_log_format'] = ( self._format_log_string(loadbalancer, listener.protocol)) if listener.connection_limit and listener.connection_limit > -1: ret_value['connection_limit'] = listener.connection_limit else: ret_value['connection_limit'] = ( CONF.haproxy_amphora.default_connection_limit) if listener.tls_certificate_id: ret_value['crt_list_filename'] = os.path.join( CONF.haproxy_amphora.base_cert_dir, loadbalancer.id, '{}.pem'.format(listener.id)) if tls_certs is not None: if listener.client_ca_tls_certificate_id: ret_value['client_ca_tls_path'] = '%s' % ( os.path.join( self.base_crt_dir, loadbalancer.id, tls_certs[listener.client_ca_tls_certificate_id])) ret_value['client_auth'] = CLIENT_AUTH_MAP.get( listener.client_authentication) if listener.client_crl_container_id: ret_value['client_crl_path'] = '%s' % ( os.path.join(self.base_crt_dir, loadbalancer.id, tls_certs[listener.client_crl_container_id])) if (listener.protocol == constants.PROTOCOL_TERMINATED_HTTPS and listener.tls_ciphers is not None): ret_value['tls_ciphers'] = listener.tls_ciphers pools = [] pool_gen = (pool for pool in listener.pools if pool.provisioning_status != constants.PENDING_DELETE) for pool in pool_gen: kwargs = {} if tls_certs is not None and tls_certs.get(pool.id): kwargs = {'pool_tls_certs': tls_certs.get(pool.id)} pools.append(self._transform_pool( pool, feature_compatibility, **kwargs)) ret_value['pools'] = pools policy_gen = (policy for policy in listener.l7policies if policy.provisioning_status != constants.PENDING_DELETE) if listener.default_pool: for pool in pools: if pool['id'] == listener.default_pool.id: ret_value['default_pool'] = pool break l7policies = [self._transform_l7policy( x, feature_compatibility, tls_certs) for x in policy_gen] ret_value['l7policies'] = l7policies return ret_value def _transform_pool(self, pool, feature_compatibility, pool_tls_certs=None): """Transforms a pool into an object that will be processed by the templating system """ ret_value = { 'id': pool.id, 'protocol': PROTOCOL_MAP[pool.protocol], 'proxy_protocol': pool.protocol == constants.PROTOCOL_PROXY, 'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'), 'members': [], 'health_monitor': '', 'session_persistence': '', 'enabled': pool.enabled, 'operating_status': pool.operating_status, 'stick_size': CONF.haproxy_amphora.haproxy_stick_size, constants.HTTP_REUSE: feature_compatibility.get( constants.HTTP_REUSE, False), 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': pool.tls_enabled } members_gen = (mem for mem in pool.members if mem.provisioning_status != constants.PENDING_DELETE) members = [self._transform_member(x, feature_compatibility) for x in members_gen] ret_value['members'] = members health_mon = pool.health_monitor if (health_mon and health_mon.provisioning_status != constants.PENDING_DELETE): ret_value['health_monitor'] = self._transform_health_monitor( health_mon, feature_compatibility) if pool.session_persistence: ret_value[ 'session_persistence'] = self._transform_session_persistence( pool.session_persistence, feature_compatibility) if (pool.tls_certificate_id and pool_tls_certs and pool_tls_certs.get('client_cert')): ret_value['client_cert'] = pool_tls_certs.get('client_cert') if pool.tls_enabled is True and pool.tls_ciphers is not None: ret_value['tls_ciphers'] = pool.tls_ciphers if (pool.ca_tls_certificate_id and pool_tls_certs and pool_tls_certs.get('ca_cert')): ret_value['ca_cert'] = pool_tls_certs.get('ca_cert') if (pool.crl_container_id and pool_tls_certs and pool_tls_certs.get('crl')): ret_value['crl'] = pool_tls_certs.get('crl') return ret_value @staticmethod def _transform_session_persistence(persistence, feature_compatibility): """Transforms session persistence into an object that will be processed by the templating system """ return { 'type': persistence.type, 'cookie_name': persistence.cookie_name } @staticmethod def _transform_member(member, feature_compatibility): """Transforms a member into an object that will be processed by the templating system """ return { 'id': member.id, 'address': member.ip_address, 'protocol_port': member.protocol_port, 'weight': member.weight, 'enabled': member.enabled, 'subnet_id': member.subnet_id, 'operating_status': member.operating_status, 'monitor_address': member.monitor_address, 'monitor_port': member.monitor_port, 'backup': member.backup } def _transform_health_monitor(self, monitor, feature_compatibility): """Transforms a health monitor into an object that will be processed by the templating system """ codes = None if monitor.expected_codes: codes = '|'.join(octavia_utils.expand_expected_codes( monitor.expected_codes)) return { 'id': monitor.id, 'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'fall_threshold': monitor.fall_threshold, 'rise_threshold': monitor.rise_threshold, 'http_method': monitor.http_method, 'url_path': monitor.url_path, 'expected_codes': codes, 'enabled': monitor.enabled, 'http_version': monitor.http_version, 'domain_name': monitor.domain_name, } def _transform_l7policy(self, l7policy, feature_compatibility, tls_certs=None): """Transforms an L7 policy into an object that will be processed by the templating system """ ret_value = { 'id': l7policy.id, 'action': l7policy.action, 'redirect_url': l7policy.redirect_url, 'redirect_prefix': l7policy.redirect_prefix, 'enabled': l7policy.enabled } if (l7policy.redirect_pool and l7policy.redirect_pool.provisioning_status != constants.PENDING_DELETE): kwargs = {} if tls_certs is not None and tls_certs.get( l7policy.redirect_pool.id): kwargs = {'pool_tls_certs': tls_certs.get(l7policy.redirect_pool.id)} ret_value['redirect_pool'] = self._transform_pool( l7policy.redirect_pool, feature_compatibility, **kwargs) else: ret_value['redirect_pool'] = None if (l7policy.action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, constants.L7POLICY_ACTION_REDIRECT_PREFIX] and l7policy.redirect_http_code): ret_value['redirect_http_code'] = l7policy.redirect_http_code else: ret_value['redirect_http_code'] = None rule_gen = (rule for rule in l7policy.l7rules if rule.enabled and rule.provisioning_status != constants.PENDING_DELETE) l7rules = [self._transform_l7rule(x, feature_compatibility) for x in rule_gen] ret_value['l7rules'] = l7rules return ret_value def _transform_l7rule(self, l7rule, feature_compatibility): """Transforms an L7 rule into an object that will be processed by the templating system """ return { 'id': l7rule.id, 'type': l7rule.type, 'compare_type': l7rule.compare_type, 'key': l7rule.key, 'value': self._escape_haproxy_config_string(l7rule.value), 'invert': l7rule.invert, 'enabled': l7rule.enabled } @staticmethod def _escape_haproxy_config_string(value): """Escapes certain characters in a given string such that haproxy will parse the string as a single value """ # Escape backslashes first value = re.sub(r'\\', r'\\\\', value) # Spaces next value = re.sub(' ', '\\ ', value) return value ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/haproxy/combined_listeners/templates/0000775000175000017500000000000000000000000026543 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/combined_listeners/templates/base.j20000664000175000017500000000355000000000000027715 0ustar00zuulzuul00000000000000{# Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} # Configuration for loadbalancer {{ loadbalancer_id }} global daemon user nobody log {{ log_http | default('/run/rsyslog/octavia/log', true)}} local{{ user_log_facility }} log {{ log_server | default('/run/rsyslog/octavia/log', true)}} local{{ administrative_log_facility }} notice stats socket {{ sock_path }} mode 0666 level user {% if loadbalancer.global_connection_limit is defined %} maxconn {{ loadbalancer.global_connection_limit }} {% endif %} {% set found_ns = namespace(found=false) %} {% for listener in loadbalancer.listeners if listener.enabled %} {% for pool in listener.pools if pool.enabled %} {% if pool.health_monitor and pool.health_monitor.enabled and pool.health_monitor.type == constants.HEALTH_MONITOR_PING and found_ns.found == false %} {% set found_ns.found = true %} external-check {% endif %} {% endfor %} {% endfor %} defaults {% if connection_logging %} log global {% else %} no log {% endif %} retries 3 option redispatch option splice-request option splice-response option http-keep-alive {% block peers %}{% endblock peers %} {% block proxies %}{% endblock proxies %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/combined_listeners/templates/haproxy.cfg.j20000664000175000017500000000247500000000000031240 0ustar00zuulzuul00000000000000{# Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {% extends 'base.j2' %} {% from 'macros.j2' import frontend_macro, backend_macro %} {% from 'macros.j2' import peers_macro %} {% set loadbalancer_id = loadbalancer.id %} {% set sock_path = stats_sock %} {% block peers %} {{ peers_macro(constants, loadbalancer) }} {% endblock peers %} {% block proxies %} {% if loadbalancer.enabled %} {% for listener in loadbalancer.listeners if listener.enabled %} {{- frontend_macro(constants, listener, loadbalancer.vip_address) }} {% for pool in listener.pools if pool.enabled %} {{- backend_macro(constants, listener, pool, loadbalancer) }} {% endfor %} {% endfor %} {% endif %} {% endblock proxies %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j20000664000175000017500000004040400000000000030266 0ustar00zuulzuul00000000000000{# Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {% macro peers_macro(constants, loadbalancer) %} {% if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} peers {{ "%s_peers"|format(loadbalancer.id.replace("-", ""))|trim() }} {% for amp in loadbalancer.amphorae if ( amp.status == constants.AMPHORA_ALLOCATED) %} {# HAProxy has peer name limitations, thus the hash filter #} peer {{ amp.id|hash_amp_id|replace('=', '') }} {{ amp.vrrp_ip }}:{{ constants.HAPROXY_BASE_PEER_PORT }} {% endfor %} {% endif %} {% endmacro %} {% macro bind_macro(constants, listener, lb_vip_address) %} {% if listener.crt_list_filename is defined %} {% set def_crt_opt = ("ssl crt-list %s"|format( listener.crt_list_filename)|trim()) %} {% else %} {% set def_crt_opt = "" %} {% endif %} {% if listener.client_ca_tls_path and listener.client_auth %} {% set client_ca_opt = "ca-file %s verify %s"|format(listener.client_ca_tls_path, listener.client_auth)|trim() %} {% else %} {% set client_ca_opt = "" %} {% endif %} {% if listener.client_crl_path and listener.client_ca_tls_path %} {% set ca_crl_opt = "crl-file %s"|format(listener.client_crl_path)|trim() %} {% else %} {% set ca_crl_opt = "" %} {% endif %} {% if listener.tls_ciphers is defined %} {% set ciphers_opt = "ciphers %s"|format(listener.tls_ciphers)|trim() %} {% else %} {% set ciphers_opt = "" %} {% endif %} bind {{ lb_vip_address }}:{{ listener.protocol_port }} {{ "%s %s %s %s"|format(def_crt_opt, client_ca_opt, ca_crl_opt, ciphers_opt)|trim() }} {% endmacro %} {% macro l7rule_compare_type_macro(constants, ctype) %} {% if ctype == constants.L7RULE_COMPARE_TYPE_REGEX %} {{- "-m reg" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_STARTS_WITH %} {{- "-m beg" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_ENDS_WITH %} {{- "-m end" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_CONTAINS %} {{- "-m sub" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_EQUAL_TO %} {{- "-m str" -}} {% endif %} {% endmacro %} {% macro l7rule_macro(constants, l7rule) %} {% if l7rule.type == constants.L7RULE_TYPE_HOST_NAME %} acl {{ l7rule.id }} req.hdr(host) -i {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_PATH %} acl {{ l7rule.id }} path {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_FILE_TYPE %} acl {{ l7rule.id }} path_end {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_HEADER %} acl {{ l7rule.id }} req.hdr({{ l7rule.key }}) {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_COOKIE %} acl {{ l7rule.id }} req.cook({{ l7rule.key }}) {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_SSL_CONN_HAS_CERT %} acl {{ l7rule.id }} ssl_c_used {% elif l7rule.type == constants.L7RULE_TYPE_SSL_VERIFY_RESULT %} acl {{ l7rule.id }} ssl_c_verify eq {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_SSL_DN_FIELD %} acl {{ l7rule.id }} ssl_c_s_dn({{ l7rule.key }}) {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% endif %} {% endmacro %} {% macro l7rule_invert_macro(invert) %} {% if invert %} {{- "!" -}} {% endif %} {% endmacro %} {% macro l7rule_list_macro(l7policy) %} {% for l7rule in l7policy.l7rules %} {{- " " -}}{{- l7rule_invert_macro(l7rule.invert) -}}{{- l7rule.id -}} {% endfor %} {% endmacro %} {% macro l7policy_macro(constants, l7policy, listener) %} {% for l7rule in l7policy.l7rules %} {{- l7rule_macro(constants, l7rule) -}} {% endfor %} {% if l7policy.redirect_http_code %} {% set redirect_http_code_opt = " code %s"|format( l7policy.redirect_http_code) %} {% else %} {% set redirect_http_code_opt = "" %} {% endif %} {% if l7policy.action == constants.L7POLICY_ACTION_REJECT %} http-request deny if{{ l7rule_list_macro(l7policy) }} {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_URL %} redirect {{- redirect_http_code_opt }} location {{ l7policy.redirect_url }} if{{ l7rule_list_macro(l7policy) }} {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_POOL and l7policy.redirect_pool.enabled %} use_backend {{ l7policy.redirect_pool.id }}:{{ listener.id }} if{{ l7rule_list_macro(l7policy) }} {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_PREFIX %} redirect {{- redirect_http_code_opt }} prefix {{ l7policy.redirect_prefix }} if{{ l7rule_list_macro(l7policy) }} {% endif %} {% endmacro %} {% macro frontend_macro(constants, listener, lb_vip_address) %} frontend {{ listener.id }} {% if listener.connection_limit is defined %} maxconn {{ listener.connection_limit }} {% endif %} {% if (listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower()) %} redirect scheme https if !{ ssl_fc } {% endif %} {{ bind_macro(constants, listener, lb_vip_address)|trim() }} mode {{ listener.protocol_mode }} {% for l7policy in listener.l7policies if (l7policy.enabled and l7policy.l7rules|length > 0) %} {{- l7policy_macro(constants, l7policy, listener) -}} {% endfor %} {% if listener.default_pool and listener.default_pool.enabled %} default_backend {{ listener.default_pool.id }}:{{ listener.id }} {% endif %} timeout client {{ listener.timeout_client_data }} {% if listener.user_log_format is defined %} log-format {{ listener.user_log_format }} {% endif %} {% if listener.timeout_tcp_inspect %} tcp-request inspect-delay {{ listener.timeout_tcp_inspect }} {% endif %} {% endmacro %} {% macro member_macro(constants, pool, member) %} {% if pool.health_monitor and pool.health_monitor.enabled %} {% if member.monitor_address %} {% set monitor_addr_opt = " addr %s"|format(member.monitor_address) %} {% else %} {% set monitor_addr_opt = "" %} {% endif %} {% if member.monitor_port %} {% set monitor_port_opt = " port %s"|format(member.monitor_port) %} {% else %} {% set monitor_port_opt = "" %} {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %} {% set monitor_ssl_opt = " check-ssl verify none" %} {% else %} {% set monitor_ssl_opt = "" %} {% endif %} {% set hm_opt = " check%s inter %ds fall %d rise %d%s%s"|format( monitor_ssl_opt, pool.health_monitor.delay, pool.health_monitor.fall_threshold, pool.health_monitor.rise_threshold, monitor_addr_opt, monitor_port_opt) %} {% else %} {% set hm_opt = "" %} {% endif %} {% if (pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE) %} {% set persistence_opt = " cookie %s"|format(member.id) %} {% else %} {% set persistence_opt = "" %} {% endif %} {% if pool.proxy_protocol %} {% set proxy_protocol_opt = " send-proxy" %} {% else %} {% set proxy_protocol_opt = "" %} {% endif %} {% if member.backup %} {% set member_backup_opt = " backup" %} {% else %} {% set member_backup_opt = "" %} {% endif %} {% if member.enabled %} {% set member_enabled_opt = "" %} {% else %} {% set member_enabled_opt = " disabled" %} {% endif %} {% if pool.tls_enabled %} {% set def_opt_prefix = " ssl" %} {% set def_sni_opt = " sni ssl_fc_sni" %} {% else %} {% set def_opt_prefix = "" %} {% set def_sni_opt = "" %} {% endif %} {% if pool.client_cert and pool.tls_enabled %} {% set def_crt_opt = " crt %s"|format(pool.client_cert) %} {% else %} {% set def_crt_opt = "" %} {% endif %} {% if pool.ca_cert and pool.tls_enabled %} {% set ca_opt = " ca-file %s"|format(pool.ca_cert) %} {% set def_verify_opt = " verify required" %} {% if pool.crl %} {% set crl_opt = " crl-file %s"|format(pool.crl) %} {% else %} {% set def_verify_opt = "" %} {% endif %} {% elif pool.tls_enabled %} {% set def_verify_opt = " verify none" %} {% endif %} {% if pool.tls_ciphers is defined %} {% set ciphers_opt = " ciphers %s"|format(pool.tls_ciphers) %} {% else %} {% set ciphers_opt = "" %} {% endif %} {{ "server %s %s:%d weight %s%s%s%s%s%s%s%s%s%s%s%s%s"|e|format( member.id, member.address, member.protocol_port, member.weight, hm_opt, persistence_opt, proxy_protocol_opt, member_backup_opt, member_enabled_opt, def_opt_prefix, def_crt_opt, ca_opt, crl_opt, def_verify_opt, def_sni_opt, ciphers_opt)|trim() }} {% endmacro %} {% macro backend_macro(constants, listener, pool, loadbalancer) %} backend {{ pool.id }}:{{ listener.id }} {% if pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() %} mode {{ listener.protocol_mode }} {% else %} mode {{ pool.protocol }} {% endif %} {% if pool.get(constants.HTTP_REUSE, False) and ( pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() or (pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() and listener.protocol_mode.lower() == constants.PROTOCOL_HTTP.lower()))%} http-reuse safe {% endif %} balance {{ pool.lb_algorithm }} {% if pool.session_persistence %} {% if (pool.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP) %} {% if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} stick-table type ip size {{ pool.stick_size }} peers {{ "%s_peers"|format(loadbalancer.id.replace("-", ""))|trim() }} {% else %} stick-table type ip size {{ pool.stick_size }} {% endif %} stick on src {% elif (pool.session_persistence.type == constants.SESSION_PERSISTENCE_APP_COOKIE) %} {% if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} stick-table type string len 64 size {{ pool.stick_size }} peers {{ "%s_peers"|format(loadbalancer.id.replace("-", ""))|trim() }} {% else %} stick-table type string len 64 size {{ pool.stick_size }} {% endif %} stick store-response res.cook({{ pool.session_persistence.cookie_name }}) stick match req.cook({{ pool.session_persistence.cookie_name }}) {% elif (pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE) %} cookie SRV insert indirect nocache {% endif %} {% endif %} {% if pool.health_monitor and pool.health_monitor.enabled %} timeout check {{ pool.health_monitor.timeout }}s {% if (pool.health_monitor.type == constants.HEALTH_MONITOR_HTTP or pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS) %} {% if (pool.health_monitor.http_version and pool.health_monitor.http_version == 1.1 and pool.health_monitor.domain_name) %} option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} HTTP/ {{- pool.health_monitor.http_version -}}{{- "\\r\\n" | safe -}} Host:\ {{ pool.health_monitor.domain_name }} {% elif pool.health_monitor.http_version %} option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} HTTP/ {{- pool.health_monitor.http_version -}}{{- "\\r\\n" | safe }} {% else %} option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} {% endif %} http-check expect rstatus {{ pool.health_monitor.expected_codes }} {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_TLS_HELLO %} option ssl-hello-chk {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_PING %} option external-check external-check command /var/lib/octavia/ping-wrapper.sh {% endif %} {% endif %} {% if pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() %} {% if listener.insert_headers.get('X-Forwarded-For', 'False').lower() == 'true' %} option forwardfor {% endif %} {% if listener.insert_headers.get('X-Forwarded-Port', 'False').lower() == 'true' %} http-request set-header X-Forwarded-Port %[dst_port] {% endif %} {% endif %} {% if listener.insert_headers.get('X-Forwarded-Proto', 'False').lower() == 'true' %} {% if listener.protocol.lower() == constants.PROTOCOL_HTTP.lower() %} http-request set-header X-Forwarded-Proto http {% elif listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower() %} http-request set-header X-Forwarded-Proto https {% endif %} {% endif %} {% if listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower() %} {% if listener.insert_headers.get('X-SSL-Client-Verify', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Verify %[ssl_c_verify] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-Has-Cert', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Has-Cert %[ssl_c_used] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-DN', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-DN %{+Q}[ssl_c_s_dn] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-CN', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-CN %{+Q}[ssl_c_s_dn(cn)] {% endif %} {% if listener.insert_headers.get('X-SSL-Issuer', 'False').lower() == 'true' %} http-request set-header X-SSL-Issuer %{+Q}[ssl_c_i_dn] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-SHA1', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-SHA1 %{+Q}[ssl_c_sha1,hex] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-Not-Before', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Not-Before %{+Q}[ssl_c_notbefore] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-Not-After', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Not-After %{+Q}[ssl_c_notafter] {% endif %} {% endif %} {% if listener.connection_limit is defined %} fullconn {{ listener.connection_limit }} {% endif %} option allbackups timeout connect {{ listener.timeout_member_connect }} timeout server {{ listener.timeout_member_data }} {% for member in pool.members %} {{- member_macro(constants, pool, member) -}} {% endfor %} {% endmacro %} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/haproxy/split_listeners/0000775000175000017500000000000000000000000024120 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/split_listeners/__init__.py0000664000175000017500000000000000000000000026217 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/split_listeners/jinja_cfg.py0000664000175000017500000004620100000000000026407 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import jinja2 from octavia.common.config import cfg from octavia.common import constants from octavia.common import utils as octavia_utils PROTOCOL_MAP = { constants.PROTOCOL_TCP: 'tcp', constants.PROTOCOL_HTTP: 'http', constants.PROTOCOL_HTTPS: 'tcp', constants.PROTOCOL_PROXY: 'proxy', constants.PROTOCOL_TERMINATED_HTTPS: 'http' } BALANCE_MAP = { constants.LB_ALGORITHM_ROUND_ROBIN: 'roundrobin', constants.LB_ALGORITHM_LEAST_CONNECTIONS: 'leastconn', constants.LB_ALGORITHM_SOURCE_IP: 'source' } CLIENT_AUTH_MAP = {constants.CLIENT_AUTH_NONE: 'none', constants.CLIENT_AUTH_OPTIONAL: 'optional', constants.CLIENT_AUTH_MANDATORY: 'required'} ACTIVE_PENDING_STATUSES = constants.SUPPORTED_PROVISIONING_STATUSES + ( constants.DEGRADED,) BASE_PATH = '/var/lib/octavia' BASE_CRT_DIR = BASE_PATH + '/certs' HAPROXY_TEMPLATE = os.path.abspath( os.path.join(os.path.dirname(__file__), 'templates/haproxy.cfg.j2')) CONF = cfg.CONF JINJA_ENV = None class JinjaTemplater(object): def __init__(self, base_amp_path=None, base_crt_dir=None, haproxy_template=None, log_http=None, log_server=None, connection_logging=True): """HaProxy configuration generation :param base_amp_path: Base path for amphora data :param base_crt_dir: Base directory for certificate storage :param haproxy_template: Absolute path to Jinja template :param log_http: Haproxy HTTP logging path :param log_server: Haproxy Server logging path :param connection_logging: enable logging connections in haproxy """ self.base_amp_path = base_amp_path or BASE_PATH self.base_crt_dir = base_crt_dir or BASE_CRT_DIR self.haproxy_template = haproxy_template or HAPROXY_TEMPLATE self.log_http = log_http self.log_server = log_server self.connection_logging = connection_logging def build_config(self, host_amphora, listener, haproxy_versions, socket_path=None, client_ca_filename=None, client_crl=None, pool_tls_certs=None): """Convert a logical configuration to the HAProxy version :param host_amphora: The Amphora this configuration is hosted on :param listener: The listener configuration :param socket_path: The socket path for Haproxy process :return: Rendered configuration """ # Check for any backward compatibility items we need to check # This is done here for upgrade scenarios where one amp in a # pair might be running an older amphora version. feature_compatibility = {} # Is it newer than haproxy 1.5? if not (int(haproxy_versions[0]) < 2 and int(haproxy_versions[1]) < 6): feature_compatibility[constants.HTTP_REUSE] = True return self.render_loadbalancer_obj( host_amphora, listener, socket_path=socket_path, feature_compatibility=feature_compatibility, client_ca_filename=client_ca_filename, client_crl=client_crl, pool_tls_certs=pool_tls_certs) def _get_template(self): """Returns the specified Jinja configuration template.""" global JINJA_ENV if not JINJA_ENV: template_loader = jinja2.FileSystemLoader( searchpath=os.path.dirname(self.haproxy_template)) JINJA_ENV = jinja2.Environment( autoescape=True, loader=template_loader, trim_blocks=True, lstrip_blocks=True) JINJA_ENV.filters['hash_amp_id'] = octavia_utils.base64_sha1_string return JINJA_ENV.get_template(os.path.basename(self.haproxy_template)) def _format_log_string(self, load_balancer, protocol): log_format = CONF.haproxy_amphora.user_log_format.replace( '{{ project_id }}', load_balancer.project_id) log_format = log_format.replace('{{ lb_id }}', load_balancer.id) # Order of these filters matter. # TODO(johnsom) Remove when HAProxy handles the format string # with HTTP variables in TCP listeners. # Currently it either throws an error or just fails # to log the message. if protocol not in constants.HAPROXY_HTTP_PROTOCOLS: log_format = log_format.replace('%{+Q}r', '-') log_format = log_format.replace('%r', '-') log_format = log_format.replace('%{+Q}ST', '-') log_format = log_format.replace('%ST', '-') log_format = log_format.replace(' ', '\\ ') return log_format def render_loadbalancer_obj(self, host_amphora, listener, socket_path=None, feature_compatibility=None, client_ca_filename=None, client_crl=None, pool_tls_certs=None): """Renders a templated configuration from a load balancer object :param host_amphora: The Amphora this configuration is hosted on :param listener: The listener configuration :param client_ca_filename: The CA certificate for client authorization :param socket_path: The socket path for Haproxy process :return: Rendered configuration """ feature_compatibility = feature_compatibility or {} loadbalancer = self._transform_loadbalancer( host_amphora, listener.load_balancer, listener, feature_compatibility, client_ca_filename=client_ca_filename, client_crl=client_crl, pool_tls_certs=pool_tls_certs) if not socket_path: socket_path = '%s/%s.sock' % (self.base_amp_path, listener.id) return self._get_template().render( {'loadbalancer': loadbalancer, 'stats_sock': socket_path, 'log_http': self.log_http, 'log_server': self.log_server, 'administrative_log_facility': CONF.amphora_agent.administrative_log_facility, 'user_log_facility': CONF.amphora_agent.user_log_facility, 'connection_logging': self.connection_logging}, constants=constants) def _transform_loadbalancer(self, host_amphora, loadbalancer, listener, feature_compatibility, client_ca_filename=None, client_crl=None, pool_tls_certs=None): """Transforms a load balancer into an object that will be processed by the templating system """ t_listener = self._transform_listener( listener, feature_compatibility, loadbalancer, client_ca_filename=client_ca_filename, client_crl=client_crl, pool_tls_certs=pool_tls_certs) ret_value = { 'id': loadbalancer.id, 'vip_address': loadbalancer.vip.ip_address, 'listener': t_listener, 'topology': loadbalancer.topology, 'enabled': loadbalancer.enabled, 'host_amphora': self._transform_amphora( host_amphora, feature_compatibility) } # NOTE(sbalukoff): Global connection limit should be a sum of all # listeners' connection limits. Since Octavia presently supports # just one listener per haproxy process, this makes determining # the global value trivial. if listener.connection_limit and listener.connection_limit > -1: ret_value['global_connection_limit'] = listener.connection_limit else: ret_value['global_connection_limit'] = ( constants.HAPROXY_MAX_MAXCONN) return ret_value def _transform_amphora(self, amphora, feature_compatibility): """Transform an amphora into an object that will be processed by the templating system. """ return { 'id': amphora.id, 'lb_network_ip': amphora.lb_network_ip, 'vrrp_ip': amphora.vrrp_ip, 'ha_ip': amphora.ha_ip, 'vrrp_port_id': amphora.vrrp_port_id, 'ha_port_id': amphora.ha_port_id, 'role': amphora.role, 'status': amphora.status, 'vrrp_interface': amphora.vrrp_interface, 'vrrp_priority': amphora.vrrp_priority } def _transform_listener(self, listener, feature_compatibility, loadbalancer, client_ca_filename=None, client_crl=None, pool_tls_certs=None): """Transforms a listener into an object that will be processed by the templating system """ ret_value = { 'id': listener.id, 'protocol_port': listener.protocol_port, 'protocol_mode': PROTOCOL_MAP[listener.protocol], 'protocol': listener.protocol, 'peer_port': listener.peer_port, 'insert_headers': listener.insert_headers, 'topology': listener.load_balancer.topology, 'amphorae': listener.load_balancer.amphorae, 'enabled': listener.enabled, 'timeout_client_data': ( listener.timeout_client_data or CONF.haproxy_amphora.timeout_client_data), 'timeout_member_connect': ( listener.timeout_member_connect or CONF.haproxy_amphora.timeout_member_connect), 'timeout_member_data': ( listener.timeout_member_data or CONF.haproxy_amphora.timeout_member_data), 'timeout_tcp_inspect': (listener.timeout_tcp_inspect or CONF.haproxy_amphora.timeout_tcp_inspect), } if self.connection_logging: ret_value['user_log_format'] = ( self._format_log_string(loadbalancer, listener.protocol)) if listener.connection_limit and listener.connection_limit > -1: ret_value['connection_limit'] = listener.connection_limit else: ret_value['connection_limit'] = constants.HAPROXY_MAX_MAXCONN if listener.tls_certificate_id: ret_value['crt_list_filename'] = os.path.join( CONF.haproxy_amphora.base_cert_dir, listener.id, '{}.pem'.format(listener.id)) if listener.client_ca_tls_certificate_id: ret_value['client_ca_tls_path'] = '%s' % ( os.path.join(self.base_crt_dir, listener.id, client_ca_filename)) ret_value['client_auth'] = CLIENT_AUTH_MAP.get( listener.client_authentication) if listener.client_crl_container_id: ret_value['client_crl_path'] = '%s' % ( os.path.join(self.base_crt_dir, listener.id, client_crl)) if (listener.default_pool and listener.default_pool.provisioning_status != constants.PENDING_DELETE): kwargs = {} if pool_tls_certs and pool_tls_certs.get(listener.default_pool.id): kwargs = {'pool_tls_certs': pool_tls_certs.get( listener.default_pool.id)} ret_value['default_pool'] = self._transform_pool( listener.default_pool, feature_compatibility, **kwargs) pools = [] pool_gen = (pool for pool in listener.pools if pool.provisioning_status != constants.PENDING_DELETE) for x in pool_gen: kwargs = {} if pool_tls_certs and pool_tls_certs.get(x.id): kwargs = {'pool_tls_certs': pool_tls_certs.get(x.id)} pools.append(self._transform_pool( x, feature_compatibility, **kwargs)) ret_value['pools'] = pools policy_gen = (policy for policy in listener.l7policies if policy.provisioning_status != constants.PENDING_DELETE) l7policies = [self._transform_l7policy( x, feature_compatibility, pool_tls_certs) for x in policy_gen] ret_value['l7policies'] = l7policies return ret_value def _transform_pool(self, pool, feature_compatibility, pool_tls_certs=None): """Transforms a pool into an object that will be processed by the templating system """ ret_value = { 'id': pool.id, 'protocol': PROTOCOL_MAP[pool.protocol], 'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'), 'members': [], 'health_monitor': '', 'session_persistence': '', 'enabled': pool.enabled, 'operating_status': pool.operating_status, 'stick_size': CONF.haproxy_amphora.haproxy_stick_size, constants.HTTP_REUSE: feature_compatibility.get( constants.HTTP_REUSE, False), 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': pool.tls_enabled } members_gen = (mem for mem in pool.members if mem.provisioning_status != constants.PENDING_DELETE) members = [self._transform_member(x, feature_compatibility) for x in members_gen] ret_value['members'] = members health_mon = pool.health_monitor if (health_mon and health_mon.provisioning_status != constants.PENDING_DELETE): ret_value['health_monitor'] = self._transform_health_monitor( health_mon, feature_compatibility) if pool.session_persistence: ret_value[ 'session_persistence'] = self._transform_session_persistence( pool.session_persistence, feature_compatibility) if (pool.tls_certificate_id and pool_tls_certs and pool_tls_certs.get('client_cert')): ret_value['client_cert'] = pool_tls_certs.get('client_cert') if (pool.ca_tls_certificate_id and pool_tls_certs and pool_tls_certs.get('ca_cert')): ret_value['ca_cert'] = pool_tls_certs.get('ca_cert') if (pool.crl_container_id and pool_tls_certs and pool_tls_certs.get('crl')): ret_value['crl'] = pool_tls_certs.get('crl') return ret_value @staticmethod def _transform_session_persistence(persistence, feature_compatibility): """Transforms session persistence into an object that will be processed by the templating system """ return { 'type': persistence.type, 'cookie_name': persistence.cookie_name } @staticmethod def _transform_member(member, feature_compatibility): """Transforms a member into an object that will be processed by the templating system """ return { 'id': member.id, 'address': member.ip_address, 'protocol_port': member.protocol_port, 'weight': member.weight, 'enabled': member.enabled, 'subnet_id': member.subnet_id, 'operating_status': member.operating_status, 'monitor_address': member.monitor_address, 'monitor_port': member.monitor_port, 'backup': member.backup } def _transform_health_monitor(self, monitor, feature_compatibility): """Transforms a health monitor into an object that will be processed by the templating system """ codes = None if monitor.expected_codes: codes = '|'.join(octavia_utils.expand_expected_codes( monitor.expected_codes)) return { 'id': monitor.id, 'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'fall_threshold': monitor.fall_threshold, 'rise_threshold': monitor.rise_threshold, 'http_method': monitor.http_method, 'url_path': monitor.url_path, 'expected_codes': codes, 'enabled': monitor.enabled, 'http_version': monitor.http_version, 'domain_name': monitor.domain_name, } def _transform_l7policy(self, l7policy, feature_compatibility, pool_tls_certs=None): """Transforms an L7 policy into an object that will be processed by the templating system """ ret_value = { 'id': l7policy.id, 'action': l7policy.action, 'redirect_url': l7policy.redirect_url, 'redirect_prefix': l7policy.redirect_prefix, 'enabled': l7policy.enabled } if (l7policy.redirect_pool and l7policy.redirect_pool.provisioning_status != constants.PENDING_DELETE): kwargs = {} if pool_tls_certs and pool_tls_certs.get( l7policy.redirect_pool.id): kwargs = {'pool_tls_certs': pool_tls_certs.get(l7policy.redirect_pool.id)} ret_value['redirect_pool'] = self._transform_pool( l7policy.redirect_pool, feature_compatibility, **kwargs) else: ret_value['redirect_pool'] = None if (l7policy.action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, constants.L7POLICY_ACTION_REDIRECT_PREFIX] and l7policy.redirect_http_code): ret_value['redirect_http_code'] = l7policy.redirect_http_code else: ret_value['redirect_http_code'] = None rule_gen = (rule for rule in l7policy.l7rules if rule.enabled and rule.provisioning_status != constants.PENDING_DELETE) l7rules = [self._transform_l7rule(x, feature_compatibility) for x in rule_gen] ret_value['l7rules'] = l7rules return ret_value def _transform_l7rule(self, l7rule, feature_compatibility): """Transforms an L7 rule into an object that will be processed by the templating system """ return { 'id': l7rule.id, 'type': l7rule.type, 'compare_type': l7rule.compare_type, 'key': l7rule.key, 'value': self._escape_haproxy_config_string(l7rule.value), 'invert': l7rule.invert, 'enabled': l7rule.enabled } @staticmethod def _escape_haproxy_config_string(value): """Escapes certain characters in a given string such that haproxy will parse the string as a single value """ # Escape backslashes first value = re.sub(r'\\', r'\\\\', value) # Spaces next value = re.sub(' ', '\\ ', value) return value ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/haproxy/split_listeners/templates/0000775000175000017500000000000000000000000026116 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/split_listeners/templates/base.j20000664000175000017500000000340100000000000027263 0ustar00zuulzuul00000000000000{# Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} # Configuration for loadbalancer {{ loadbalancer_id }} global daemon user nobody log {{ log_http | default('/run/rsyslog/octavia/log', true)}} local{{ user_log_facility }} log {{ log_server | default('/run/rsyslog/octavia/log', true)}} local{{ administrative_log_facility }} notice stats socket {{ sock_path }} mode 0666 level user {% if loadbalancer.global_connection_limit is defined %} maxconn {{ loadbalancer.global_connection_limit }} {% endif %} {% set found_ns = namespace(found=false) %} {% for pool in loadbalancer.listener.pools if pool.enabled %} {% if pool.health_monitor and pool.health_monitor.enabled and pool.health_monitor.type == constants.HEALTH_MONITOR_PING and found_ns.found == false %} {% set found_ns.found = true %} external-check {% endif %} {% endfor %} defaults {% if connection_logging %} log global {% else %} no log {% endif %} retries 3 option redispatch option splice-request option splice-response option http-keep-alive {% block peers %}{% endblock peers %} {% block proxies %}{% endblock proxies %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/split_listeners/templates/haproxy.cfg.j20000664000175000017500000000245300000000000030607 0ustar00zuulzuul00000000000000{# Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {% extends 'base.j2' %} {% from 'macros.j2' import frontend_macro, backend_macro %} {% from 'macros.j2' import peers_macro %} {% set loadbalancer_id = loadbalancer.id %} {% set sock_path = stats_sock %} {% block peers %} {{ peers_macro(constants, loadbalancer.listener) }} {% endblock peers %} {% block proxies %} {% if loadbalancer.enabled and loadbalancer.listener.enabled %} {{- frontend_macro(constants, loadbalancer.listener, loadbalancer.vip_address) }} {% for pool in loadbalancer.listener.pools if pool.enabled %} {{- backend_macro(constants, loadbalancer.listener, pool) }} {% endfor %} {% endif %} {% endblock proxies %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/haproxy/split_listeners/templates/macros.j20000664000175000017500000003745500000000000027655 0ustar00zuulzuul00000000000000{# Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {% macro peers_macro(constants,listener) %} {% if listener.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} peers {{ "%s_peers"|format(listener.id.replace("-", ""))|trim() }} {% for amp in listener.amphorae if ( amp.status == constants.AMPHORA_ALLOCATED) %} {# HAProxy has peer name limitations, thus the hash filter #} peer {{ amp.id|hash_amp_id|replace('=', '') }} {{ amp.vrrp_ip }}:{{ listener.peer_port }} {% endfor %} {% endif %} {% endmacro %} {% macro bind_macro(constants, listener, lb_vip_address) %} {% if listener.crt_list_filename is defined %} {% set def_crt_opt = ("ssl crt-list %s"|format( listener.crt_list_filename)|trim()) %} {% else %} {% set def_crt_opt = "" %} {% endif %} {% if listener.client_ca_tls_path and listener.client_auth %} {% set client_ca_opt = "ca-file %s verify %s"|format(listener.client_ca_tls_path, listener.client_auth)|trim() %} {% else %} {% set client_ca_opt = "" %} {% endif %} {% if listener.client_crl_path and listener.client_ca_tls_path %} {% set ca_crl_opt = "crl-file %s"|format(listener.client_crl_path)|trim() %} {% else %} {% set ca_crl_opt = "" %} {% endif %} bind {{ lb_vip_address }}:{{ listener.protocol_port }} {{ "%s %s %s"|format(def_crt_opt, client_ca_opt, ca_crl_opt)|trim() }} {% endmacro %} {% macro l7rule_compare_type_macro(constants, ctype) %} {% if ctype == constants.L7RULE_COMPARE_TYPE_REGEX %} {{- "-m reg" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_STARTS_WITH %} {{- "-m beg" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_ENDS_WITH %} {{- "-m end" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_CONTAINS %} {{- "-m sub" -}} {% elif ctype == constants.L7RULE_COMPARE_TYPE_EQUAL_TO %} {{- "-m str" -}} {% endif %} {% endmacro %} {% macro l7rule_macro(constants, l7rule) %} {% if l7rule.type == constants.L7RULE_TYPE_HOST_NAME %} acl {{ l7rule.id }} req.hdr(host) -i {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_PATH %} acl {{ l7rule.id }} path {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_FILE_TYPE %} acl {{ l7rule.id }} path_end {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_HEADER %} acl {{ l7rule.id }} req.hdr({{ l7rule.key }}) {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_COOKIE %} acl {{ l7rule.id }} req.cook({{ l7rule.key }}) {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_SSL_CONN_HAS_CERT %} acl {{ l7rule.id }} ssl_c_used {% elif l7rule.type == constants.L7RULE_TYPE_SSL_VERIFY_RESULT %} acl {{ l7rule.id }} ssl_c_verify eq {{ l7rule.value }} {% elif l7rule.type == constants.L7RULE_TYPE_SSL_DN_FIELD %} acl {{ l7rule.id }} ssl_c_s_dn({{ l7rule.key }}) {{ l7rule_compare_type_macro( constants, l7rule.compare_type) }} {{ l7rule.value }} {% endif %} {% endmacro %} {% macro l7rule_invert_macro(invert) %} {% if invert %} {{- "!" -}} {% endif %} {% endmacro %} {% macro l7rule_list_macro(l7policy) %} {% for l7rule in l7policy.l7rules %} {{- " " -}}{{- l7rule_invert_macro(l7rule.invert) -}}{{- l7rule.id -}} {% endfor %} {% endmacro %} {% macro l7policy_macro(constants, l7policy) %} {% for l7rule in l7policy.l7rules %} {{- l7rule_macro(constants, l7rule) -}} {% endfor %} {% if l7policy.redirect_http_code %} {% set redirect_http_code_opt = " code %s"|format( l7policy.redirect_http_code) %} {% else %} {% set redirect_http_code_opt = "" %} {% endif %} {% if l7policy.action == constants.L7POLICY_ACTION_REJECT %} http-request deny if{{ l7rule_list_macro(l7policy) }} {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_URL %} redirect {{- redirect_http_code_opt }} location {{ l7policy.redirect_url }} if{{ l7rule_list_macro( l7policy) }} {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_POOL and l7policy.redirect_pool.enabled %} use_backend {{ l7policy.redirect_pool.id }} if{{ l7rule_list_macro( l7policy) }} {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_PREFIX %} redirect {{- redirect_http_code_opt }} prefix {{ l7policy.redirect_prefix }} if{{ l7rule_list_macro( l7policy) }} {% endif %} {% endmacro %} {% macro frontend_macro(constants, listener, lb_vip_address) %} frontend {{ listener.id }} {% if listener.connection_limit is defined %} maxconn {{ listener.connection_limit }} {% endif %} {% if (listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower()) %} redirect scheme https if !{ ssl_fc } {% endif %} {{ bind_macro(constants, listener, lb_vip_address)|trim() }} mode {{ listener.protocol_mode }} {% for l7policy in listener.l7policies if (l7policy.enabled and l7policy.l7rules|length > 0) %} {{- l7policy_macro(constants, l7policy) -}} {% endfor %} {% if listener.default_pool and listener.default_pool.enabled %} default_backend {{ listener.default_pool.id }} {% endif %} timeout client {{ listener.timeout_client_data }} {% if listener.user_log_format is defined %} log-format {{ listener.user_log_format }} {% endif %} {% if listener.timeout_tcp_inspect %} tcp-request inspect-delay {{ listener.timeout_tcp_inspect }} {% endif %} {% endmacro %} {% macro member_macro(constants, pool, member) %} {% if pool.health_monitor and pool.health_monitor.enabled %} {% if member.monitor_address %} {% set monitor_addr_opt = " addr %s"|format(member.monitor_address) %} {% else %} {% set monitor_addr_opt = "" %} {% endif %} {% if member.monitor_port %} {% set monitor_port_opt = " port %s"|format(member.monitor_port) %} {% else %} {% set monitor_port_opt = "" %} {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %} {% set monitor_ssl_opt = " check-ssl verify none" %} {% else %} {% set monitor_ssl_opt = "" %} {% endif %} {% set hm_opt = " check%s inter %ds fall %d rise %d%s%s"|format( monitor_ssl_opt, pool.health_monitor.delay, pool.health_monitor.fall_threshold, pool.health_monitor.rise_threshold, monitor_addr_opt, monitor_port_opt) %} {% else %} {% set hm_opt = "" %} {% endif %} {% if (pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE) %} {% set persistence_opt = " cookie %s"|format(member.id) %} {% else %} {% set persistence_opt = "" %} {% endif %} {% if pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() %} {% set proxy_protocol_opt = " send-proxy" %} {% else %} {% set proxy_protocol_opt = "" %} {% endif %} {% if member.backup %} {% set member_backup_opt = " backup" %} {% else %} {% set member_backup_opt = "" %} {% endif %} {% if member.enabled %} {% set member_enabled_opt = "" %} {% else %} {% set member_enabled_opt = " disabled" %} {% endif %} {% if pool.tls_enabled %} {% set def_opt_prefix = " ssl" %} {% set def_sni_opt = " sni ssl_fc_sni" %} {% else %} {% set def_opt_prefix = "" %} {% set def_sni_opt = "" %} {% endif %} {% if pool.client_cert and pool.tls_enabled %} {% set def_crt_opt = " crt %s"|format(pool.client_cert) %} {% else %} {% set def_crt_opt = "" %} {% endif %} {% if pool.ca_cert and pool.tls_enabled %} {% set ca_opt = " ca-file %s"|format(pool.ca_cert) %} {% set def_verify_opt = " verify required" %} {% if pool.crl %} {% set crl_opt = " crl-file %s"|format(pool.crl) %} {% else %} {% set def_verify_opt = "" %} {% endif %} {% elif pool.tls_enabled %} {% set def_verify_opt = " verify none" %} {% endif %} {{ "server %s %s:%d weight %s%s%s%s%s%s%s%s%s%s%s%s"|e|format( member.id, member.address, member.protocol_port, member.weight, hm_opt, persistence_opt, proxy_protocol_opt, member_backup_opt, member_enabled_opt, def_opt_prefix, def_crt_opt, ca_opt, crl_opt, def_verify_opt, def_sni_opt)|trim() }} {% endmacro %} {% macro backend_macro(constants, listener, pool) %} backend {{ pool.id }} {% if pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() %} mode {{ listener.protocol_mode }} {% else %} mode {{ pool.protocol }} {% endif %} {% if pool.get(constants.HTTP_REUSE, False) and ( pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() or (pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() and listener.protocol_mode.lower() == constants.PROTOCOL_HTTP.lower()))%} http-reuse safe {% endif %} balance {{ pool.lb_algorithm }} {% if pool.session_persistence %} {% if (pool.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP) %} {% if listener.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} stick-table type ip size {{ pool.stick_size }} peers {{ "%s_peers"|format(listener.id.replace("-", ""))|trim() }} {% else %} stick-table type ip size {{ pool.stick_size }} {% endif %} stick on src {% elif (pool.session_persistence.type == constants.SESSION_PERSISTENCE_APP_COOKIE) %} {% if listener.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} stick-table type string len 64 size {{ pool.stick_size }} peers {{ "%s_peers"|format(listener.id.replace("-", ""))|trim() }} {% else %} stick-table type string len 64 size {{ pool.stick_size }} {% endif %} stick store-response res.cook({{ pool.session_persistence.cookie_name }}) stick match req.cook({{ pool.session_persistence.cookie_name }}) {% elif (pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE) %} cookie SRV insert indirect nocache {% endif %} {% endif %} {% if pool.health_monitor and pool.health_monitor.enabled %} timeout check {{ pool.health_monitor.timeout }}s {% if (pool.health_monitor.type == constants.HEALTH_MONITOR_HTTP or pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS) %} {% if (pool.health_monitor.http_version and pool.health_monitor.http_version == 1.1 and pool.health_monitor.domain_name) %} option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} HTTP/ {{- pool.health_monitor.http_version -}}{{- "\\r\\n" | safe -}} Host:\ {{ pool.health_monitor.domain_name }} {% elif pool.health_monitor.http_version %} option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} HTTP/ {{- pool.health_monitor.http_version -}}{{- "\\r\\n" | safe }} {% else %} option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} {% endif %} http-check expect rstatus {{ pool.health_monitor.expected_codes }} {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_TLS_HELLO %} option ssl-hello-chk {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_PING %} option external-check external-check command /var/lib/octavia/ping-wrapper.sh {% endif %} {% endif %} {% if pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() %} {% if listener.insert_headers.get('X-Forwarded-For', 'False').lower() == 'true' %} option forwardfor {% endif %} {% if listener.insert_headers.get('X-Forwarded-Port', 'False').lower() == 'true' %} http-request set-header X-Forwarded-Port %[dst_port] {% endif %} {% endif %} {% if listener.insert_headers.get('X-Forwarded-Proto', 'False').lower() == 'true' %} {% if listener.protocol.lower() == constants.PROTOCOL_HTTP.lower() %} http-request set-header X-Forwarded-Proto http {% elif listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower() %} http-request set-header X-Forwarded-Proto https {% endif %} {% endif %} {% if listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower() %} {% if listener.insert_headers.get('X-SSL-Client-Verify', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Verify %[ssl_c_verify] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-Has-Cert', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Has-Cert %[ssl_c_used] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-DN', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-DN %{+Q}[ssl_c_s_dn] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-CN', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-CN %{+Q}[ssl_c_s_dn(cn)] {% endif %} {% if listener.insert_headers.get('X-SSL-Issuer', 'False').lower() == 'true' %} http-request set-header X-SSL-Issuer %{+Q}[ssl_c_i_dn] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-SHA1', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-SHA1 %{+Q}[ssl_c_sha1,hex] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-Not-Before', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Not-Before %{+Q}[ssl_c_notbefore] {% endif %} {% if listener.insert_headers.get('X-SSL-Client-Not-After', 'False').lower() == 'true' %} http-request set-header X-SSL-Client-Not-After %{+Q}[ssl_c_notafter] {% endif %} {% endif %} {% if listener.connection_limit is defined %} fullconn {{ listener.connection_limit }} {% endif %} option allbackups timeout connect {{ listener.timeout_member_connect }} timeout server {{ listener.timeout_member_data }} {% for member in pool.members %} {{- member_macro(constants, pool, member) -}} {% endfor %} {% endmacro %} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/logging/0000775000175000017500000000000000000000000020631 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/logging/__init__.py0000664000175000017500000000000000000000000022730 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/logging/logging_jinja_cfg.py0000664000175000017500000000506300000000000024627 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import jinja2 from octavia.common.config import cfg from octavia.common import constants CONF = cfg.CONF TEMPLATES_DIR = (os.path.dirname(os.path.realpath(__file__)) + constants.LOGGING_TEMPLATES + '/') class LoggingJinjaTemplater(object): def __init__(self, logging_templates=None): self.logging_templates = logging_templates or TEMPLATES_DIR template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname( self.logging_templates)) jinja_env = jinja2.Environment(loader=template_loader, autoescape=True) self.logging_template = jinja_env.get_template( constants.LOGGING_CONF_TEMPLATE) def build_logging_config(self): admin_log_hosts = [] for server in CONF.amphora_agent.admin_log_targets or []: (host, port) = server.rsplit(':', 1) admin_log_hosts.append({ 'host': host, 'port': port, }) tenant_log_hosts = [] for server in CONF.amphora_agent.tenant_log_targets or []: (host, port) = server.rsplit(':', 1) tenant_log_hosts.append({ 'host': host, 'port': port, }) return self.logging_template.render( {'admin_log_hosts': admin_log_hosts, 'tenant_log_hosts': tenant_log_hosts, 'protocol': CONF.amphora_agent.log_protocol, 'retry_count': CONF.amphora_agent.log_retry_count, 'retry_interval': CONF.amphora_agent.log_retry_interval, 'queue_size': CONF.amphora_agent.log_queue_size, 'forward_all_logs': CONF.amphora_agent.forward_all_logs, 'disable_local_log_storage': CONF.amphora_agent.disable_local_log_storage, 'admin_log_facility': CONF.amphora_agent.administrative_log_facility, 'user_log_facility': CONF.amphora_agent.user_log_facility, }) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/logging/templates/0000775000175000017500000000000000000000000022627 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/logging/templates/10-rsyslog.conf.template0000664000175000017500000000423400000000000027233 0ustar00zuulzuul00000000000000{# Copyright 2018 Rackspace, US Inc. # Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #} {#- Tenant traffic logs -#} {%- if tenant_log_hosts -%} local{{ user_log_facility }}.=info {% for host in tenant_log_hosts %}{% if not loop.first %}&{% endif %}action(type="omfwd" target="{{ host['host'] }}" port="{{ host['port'] }}" protocol="{{ protocol }}" action.resumeRetryCount="{{ retry_count }}" action.resumeInterval="{{ retry_interval }}" queue.type="linkedList" queue.size="{{ queue_size }}" {% if not loop.first %}action.execOnlyWhenPreviousIsSuspended="on"{% endif %}){% endfor %} {%- endif -%} {#- Administrative logs -#} {%- if admin_log_hosts -%} {%- if forward_all_logs %} *.*;local{{ user_log_facility }}.none {% for host in admin_log_hosts %}{% if not loop.first %}&{% endif %}action(type="omfwd" target="{{ host['host'] }}" port="{{ host['port'] }}" protocol="{{ protocol }}" action.resumeRetryCount="{{ retry_count }}" action.resumeInterval="{{ retry_interval }}" queue.type="linkedList" queue.size="{{ queue_size }}" {% if not loop.first %}action.execOnlyWhenPreviousIsSuspended="on"{% endif %}){% endfor %} {% else %} local{{ admin_log_facility }}.* {% for host in admin_log_hosts %}{% if not loop.first %}&{% endif %}action(type="omfwd" target="{{ host['host'] }}" port="{{ host['port'] }}" protocol="{{ protocol }}" action.resumeRetryCount="{{ retry_count }}" action.resumeInterval="{{ retry_interval }}" queue.type="linkedList" queue.size="{{ queue_size }}" {% if not loop.first %}action.execOnlyWhenPreviousIsSuspended="on"{% endif %}){% endfor -%} {%- endif -%} {%- endif -%} {%- if disable_local_log_storage -%} *.* stop {%- endif -%} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/lvs/0000775000175000017500000000000000000000000020007 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/lvs/__init__.py0000664000175000017500000000000000000000000022106 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/lvs/jinja_cfg.py0000664000175000017500000001764200000000000022305 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import jinja2 from octavia.common.config import cfg from octavia.common import constants from octavia.common import utils as octavia_utils CONF = cfg.CONF PROTOCOL_MAP = { constants.PROTOCOL_UDP: 'udp' } BALANCE_MAP = { constants.LB_ALGORITHM_ROUND_ROBIN: 'wrr', constants.LB_ALGORITHM_LEAST_CONNECTIONS: 'lc', constants.LB_ALGORITHM_SOURCE_IP: 'sh' } BASE_PATH = CONF.haproxy_amphora.base_path CHECK_SCRIPT_NAME = 'udp_check.sh' KEEPALIVED_LVS_TEMPLATE = os.path.abspath( os.path.join(os.path.dirname(__file__), 'templates/keepalivedlvs.cfg.j2')) JINJA_ENV = None class LvsJinjaTemplater(object): def __init__(self, base_amp_path=None, keepalivedlvs_template=None): """Keepalived LVS configuration generation :param base_amp_path: Base path for amphora data :param keepalivedlvs_template: Absolute path to Jinja template """ self.base_amp_path = base_amp_path or BASE_PATH self.keepalivedlvs_template = (keepalivedlvs_template or KEEPALIVED_LVS_TEMPLATE) def build_config(self, listener, **kwargs): """Convert a logical configuration to the Keepalived LVS version :param listener: The listener configuration :return: Rendered configuration """ return self.render_loadbalancer_obj(listener) def _get_template(self): """Returns the specified Jinja configuration template.""" global JINJA_ENV if not JINJA_ENV: template_loader = jinja2.FileSystemLoader( searchpath=os.path.dirname(self.keepalivedlvs_template)) JINJA_ENV = jinja2.Environment( autoescape=True, loader=template_loader, trim_blocks=True, lstrip_blocks=True, extensions=['jinja2.ext.do']) return JINJA_ENV.get_template(os.path.basename( self.keepalivedlvs_template)) def render_loadbalancer_obj(self, listener, **kwargs): """Renders a templated configuration from a load balancer object :param host_amphora: The Amphora this configuration is hosted on :param listener: The listener configuration :return: Rendered configuration """ loadbalancer = self._transform_loadbalancer( listener.load_balancer, listener) return self._get_template().render( {'loadbalancer': loadbalancer}, constants=constants) def _transform_loadbalancer(self, loadbalancer, listener): """Transforms a load balancer into an object that will be processed by the templating system """ t_listener = self._transform_listener(listener) ret_value = { 'id': loadbalancer.id, 'vip_address': loadbalancer.vip.ip_address, 'listener': t_listener, 'enabled': loadbalancer.enabled } return ret_value def _transform_listener(self, listener): """Transforms a listener into an object that will be processed by the templating system """ ret_value = { 'id': listener.id, 'protocol_port': listener.protocol_port, 'protocol_mode': PROTOCOL_MAP[listener.protocol], 'enabled': listener.enabled } if listener.connection_limit and listener.connection_limit > -1: ret_value['connection_limit'] = listener.connection_limit if (listener.default_pool and listener.default_pool.provisioning_status != constants.PENDING_DELETE): ret_value['default_pool'] = self._transform_pool( listener.default_pool) return ret_value def _transform_pool(self, pool): """Transforms a pool into an object that will be processed by the templating system """ ret_value = { 'id': pool.id, 'protocol': PROTOCOL_MAP[pool.protocol], 'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'), 'members': [], 'health_monitor': '', 'session_persistence': '', 'enabled': pool.enabled } members_gen = (mem for mem in pool.members if mem.provisioning_status != constants.PENDING_DELETE) members = [self._transform_member(x) for x in members_gen] ret_value['members'] = members if (pool.health_monitor and pool.health_monitor.provisioning_status != constants.PENDING_DELETE): ret_value['health_monitor'] = self._transform_health_monitor( pool.health_monitor) if pool.session_persistence: func = self._transform_session_persistence ret_value['session_persistence'] = func( pool.session_persistence) return ret_value @staticmethod def _transform_session_persistence(persistence): """Transforms session persistence into an object that will be processed by the templating system """ return { 'type': persistence.type, 'persistence_timeout': persistence.persistence_timeout, 'persistence_granularity': persistence.persistence_granularity } @staticmethod def _transform_member(member): """Transforms a member into an object that will be processed by the templating system """ return { 'id': member.id, 'address': member.ip_address, 'protocol_port': member.protocol_port, 'weight': member.weight, 'enabled': member.enabled, 'monitor_address': member.monitor_address, 'monitor_port': member.monitor_port } def _get_default_lvs_check_script_path(self): return (CONF.haproxy_amphora.base_path + '/lvs/check/' + CHECK_SCRIPT_NAME) def _transform_health_monitor(self, monitor): """Transforms a health monitor into an object that will be processed by the templating system """ return_val = { 'id': monitor.id, 'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'enabled': monitor.enabled, 'fall_threshold': monitor.fall_threshold, 'check_script_path': (self._get_default_lvs_check_script_path() if monitor.type == constants.HEALTH_MONITOR_UDP_CONNECT else None) } if monitor.type == constants.HEALTH_MONITOR_HTTP: return_val.update({ 'rise_threshold': monitor.rise_threshold, 'url_path': monitor.url_path, 'http_method': (monitor.http_method if monitor.http_method == constants.HEALTH_MONITOR_HTTP_METHOD_GET else None), 'expected_codes': (sorted(list( octavia_utils.expand_expected_codes( monitor.expected_codes))) if monitor.expected_codes else [])}) return return_val ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/lvs/templates/0000775000175000017500000000000000000000000022005 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/lvs/templates/base.j20000664000175000017500000000166100000000000023160 0ustar00zuulzuul00000000000000{# Copyright (c) 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} # Configuration for Loadbalancer {{ loadbalancer.id }} {% if loadbalancer.listener.enabled %} # Configuration for Listener {{ udp_listener_id }} {% else %} # Listener {{ udp_listener_id }} is disabled {% endif %} {% block global_definitions %}{% endblock global_definitions %} {% block proxies %}{% endblock proxies %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/lvs/templates/keepalivedlvs.cfg.j20000664000175000017500000000225700000000000025644 0ustar00zuulzuul00000000000000{# Copyright (c) 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {% extends 'base.j2' %} {% from 'macros.j2' import virtualserver_macro %} {% set udp_listener_id = loadbalancer.listener.id %} {% block global_definitions %} net_namespace {{ constants.AMPHORA_NAMESPACE }} {% endblock global_definitions %} {% block proxies %} {% if loadbalancer.enabled and loadbalancer.listener.enabled %} {{- virtualserver_macro(constants, loadbalancer.listener, loadbalancer.vip_address, loadbalancer.listener.get('default_pool', None)) }} {% endif %} {% endblock proxies %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/lvs/templates/macros.j20000664000175000017500000001243600000000000023534 0ustar00zuulzuul00000000000000{# Copyright (c) 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {%- macro lb_algo_macro(pool) -%} lb_algo {{ pool.lb_algorithm }} {%- endmacro -%} {% macro misc_path_macro(member, health_monitor) -%} misc_path "{{ health_monitor.check_script_path }} {{ member.monitor_address|default(member.address, true) }} {{ member.monitor_port|default(member.protocol_port, true) }}" {%- endmacro %} {%- macro misc_check_macro(pool, member, health_monitor) -%} MISC_CHECK { {{ misc_path_macro(member, health_monitor) }} misc_timeout {{ pool.health_monitor.timeout }} } {%- endmacro -%} {%- macro http_url_macro(health_monitor, health_monitor_status_code) %} url { path {{ health_monitor.url_path }} status_code {{ health_monitor_status_code }} } {% endmacro -%} {%- macro http_get_macro(pool, member, health_monitor) -%} HTTP_GET { {% for status_code in health_monitor.expected_codes %} {{ http_url_macro(health_monitor, status_code) -}} {% endfor %} connect_ip {{ member.monitor_address|default(member.address, true) }} connect_port {{ member.monitor_port|default(member.protocol_port, true) }} connect_timeout {{ health_monitor.timeout }} } {%- endmacro -%} {%- macro tcp_check_macro(pool, member, health_monitor) -%} TCP_CHECK { connect_ip {{ member.monitor_address|default(member.address, true) }} connect_port {{ member.monitor_port|default(member.protocol_port, true) }} connect_timeout {{ health_monitor.timeout }} } {%- endmacro -%} {% macro health_monitor_rs_macro(constants, pool, member) %} {% if pool.health_monitor and pool.health_monitor.enabled %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_UDP_CONNECT %} {{ misc_check_macro(pool, member, pool.health_monitor) -}} {% elif pool.health_monitor.type == constants.HEALTH_MONITOR_HTTP and pool.health_monitor.http_method == constants.HEALTH_MONITOR_HTTP_METHOD_GET %} {{ http_get_macro(pool, member, pool.health_monitor) -}} {% elif pool.health_monitor.type == constants.HEALTH_MONITOR_TCP %} {{ tcp_check_macro(pool, member, pool.health_monitor) -}} {% endif %} {% endif %} {% endmacro %} {% macro realserver_macro(constants, pool, member, listener) %} {% if member.enabled %} # Configuration for Member {{ member.id }} real_server {{ member.address }} {{ member.protocol_port }} { weight {{ member.weight }} {% if listener.connection_limit %} uthreshold {{ listener.connection_limit }} {% endif %} {{- health_monitor_rs_macro(constants, pool, member) }} } {% else %} # Member {{ member.id }} is disabled {% endif %} {% endmacro %} {% macro health_monitor_vs_macro(default_pool) %} {% if default_pool and default_pool.health_monitor and default_pool.health_monitor.enabled %} delay_loop {{ default_pool.health_monitor.delay }} delay_before_retry {{ default_pool.health_monitor.delay }} {% if default_pool.health_monitor.fall_threshold %} retry {{ default_pool.health_monitor.fall_threshold }} {% endif %} {% endif %} {% endmacro %} {% macro virtualserver_macro(constants, listener, lb_vip_address, default_pool) %} {% if default_pool %} virtual_server {{ lb_vip_address }} {{ listener.protocol_port }} { {{ lb_algo_macro(default_pool) }} lb_kind NAT protocol {{ listener.protocol_mode.upper() }} {% if default_pool.session_persistence and default_pool.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP %} {# set our defined defaults as I saw this not be consistent #} {# in testing #} {% if default_pool.session_persistence.persistence_timeout %} persistence_timeout {{ default_pool.session_persistence.persistence_timeout }} {% else %} persistence_timeout 360 {% endif %} {% if default_pool.session_persistence.persistence_granularity %} persistence_granularity {{ default_pool.session_persistence.persistence_granularity }} {% else %} persistence_granularity 255.255.255.255 {% endif %} {% endif %} {{ health_monitor_vs_macro(default_pool) }} {% if default_pool.protocol.lower() == "udp" %} {% if default_pool.enabled %} # Configuration for Pool {{ default_pool.id }} {% else %} # Pool {{ default_pool.id }} is disabled {% endif %} {% if default_pool.health_monitor and default_pool.health_monitor.enabled %} # Configuration for HealthMonitor {{ default_pool.health_monitor.id }} {% endif %} {% for member in default_pool.members %} {{- realserver_macro(constants, default_pool, member, listener) }} {% endfor %} {% endif %} } {% endif %} {% endmacro %} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/jinja/templates/0000775000175000017500000000000000000000000021201 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/templates/user_data_config_drive.template0000664000175000017500000000224000000000000027421 0ustar00zuulzuul00000000000000{# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -#} #cloud-config # vim: syntax=yaml # # This configuration with take user-data dict and build a cloud-init # script utilizing the write_files module. The user-data dict should be a # Key Value pair where the Key is the path to store the file and the Value # is the data to store at that location # # Example: # {'/root/path/to/file.cfg': 'I'm a file, write things in me'} write_files: {%- for key, value in user_data.items() %} - path: {{ key }} content: | {{ value|indent(8) }} {%- endfor -%} {# restart agent now that configurations are in place #} runcmd: - service amphora-agent restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/jinja/user_data_jinja_cfg.py0000664000175000017500000000241700000000000023522 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import jinja2 from octavia.common.config import cfg from octavia.common import constants CONF = cfg.CONF TEMPLATES_DIR = (os.path.dirname(os.path.realpath(__file__)) + constants.TEMPLATES + '/') class UserDataJinjaCfg(object): def __init__(self): template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname( TEMPLATES_DIR)) jinja_env = jinja2.Environment(autoescape=True, loader=template_loader) self.agent_template = jinja_env.get_template( constants.USER_DATA_CONFIG_DRIVE_TEMPLATE) def build_user_data_config(self, user_data): return self.agent_template.render(user_data=user_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/keystone.py0000664000175000017500000000517000000000000020326 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from keystonemiddleware import auth_token from oslo_config import cfg from oslo_log import log as logging from octavia.common import constants LOG = logging.getLogger(__name__) _NOAUTH_PATHS = ['/', '/load-balancer/', '/healthcheck', '/load-balancer/healthcheck'] class KeystoneSession(object): def __init__(self, section=constants.SERVICE_AUTH): self._session = None self._auth = None self.section = section ks_loading.register_auth_conf_options(cfg.CONF, self.section) ks_loading.register_session_conf_options(cfg.CONF, self.section) def get_session(self): """Initializes a Keystone session. :return: a Keystone Session object """ if not self._session: self._session = ks_loading.load_session_from_conf_options( cfg.CONF, self.section, auth=self.get_auth()) return self._session def get_auth(self): if not self._auth: self._auth = ks_loading.load_auth_from_conf_options( cfg.CONF, self.section) return self._auth def get_service_user_id(self): return self.get_auth().get_user_id(self.get_session()) class SkippingAuthProtocol(auth_token.AuthProtocol): """SkippingAuthProtocol to reach special endpoints Bypasses keystone authentication for special request paths, such as the api version discovery path. Note: SkippingAuthProtocol is lean customization of :py:class:`keystonemiddleware.auth_token.AuthProtocol` that disables keystone communication if the request path is in the _NOAUTH_PATHS list. """ def process_request(self, request): path = request.path if path in _NOAUTH_PATHS: LOG.debug('Request path is %s and it does not require keystone ' 'authentication', path) return None # return NONE to reach actual logic return super(SkippingAuthProtocol, self).process_request(request) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/policy.py0000664000175000017500000001363200000000000017766 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Octavia.""" from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy as oslo_policy from oslo_utils import excutils from octavia.common import exceptions from octavia import policies LOG = logging.getLogger(__name__) OCTAVIA_POLICY = None def get_enforcer(): global OCTAVIA_POLICY if OCTAVIA_POLICY is None: LOG.debug('Loading octavia policy object.') OCTAVIA_POLICY = Policy() return OCTAVIA_POLICY def reset(): global OCTAVIA_POLICY if OCTAVIA_POLICY: OCTAVIA_POLICY.clear() OCTAVIA_POLICY = None class Policy(oslo_policy.Enforcer): def __init__(self, conf=cfg.CONF, policy_file=None, rules=None, default_rule=None, use_conf=True, overwrite=True): """Init an Enforcer class. :param context: A context object. :param conf: A configuration object. :param policy_file: Custom policy file to use, if none is specified, ``conf.oslo_policy.policy_file`` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. If :meth:`load_rules` with ``force_reload=True``, :meth:`clear` or :meth:`set_rules` with ``overwrite=True`` is called this will be overwritten. :param default_rule: Default rule to use, conf.default_rule will be used if none is specified. :param use_conf: Whether to load rules from cache or config file. :param overwrite: Whether to overwrite existing rules when reload rules from config file. """ super(Policy, self).__init__(conf, policy_file, rules, default_rule, use_conf, overwrite) self.register_defaults(policies.list_rules()) def authorize(self, action, target, context, do_raise=True, exc=None): """Verifies that the action is valid on the target in this context. :param context: The oslo context for this request. :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``compute:create_instance``, ``compute:attach_volume``, ``volume:attach_volume`` :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param do_raise: if True (the default), raises PolicyForbidden; if False, returns False :param exc: Class of the exceptions to raise if the check fails. Any remaining arguments passed to :meth:`enforce` (both positional and keyword arguments) will be passed to the exceptions class. If not specified, :class:`PolicyForbidden` will be used. :raises PolicyForbidden: if verification fails and do_raise is True. Or if 'exc' is specified it will raise an exceptions of that type. :return: returns a non-False value (not necessarily "True") if authorized, and the exact value False if not authorized and do_raise is False. """ credentials = context.to_policy_values() # Inject is_admin into the credentials to allow override via # config auth_strategy = constants.NOAUTH credentials['is_admin'] = ( credentials.get('is_admin') or context.is_admin) if not exc: exc = exceptions.PolicyForbidden try: return super(Policy, self).authorize( action, target, credentials, do_raise=do_raise, exc=exc) except oslo_policy.PolicyNotRegistered: with excutils.save_and_reraise_exception(): LOG.exception('Policy not registered') except Exception: credentials.pop('auth_token', None) with excutils.save_and_reraise_exception(): LOG.debug('Policy check for %(action)s failed with ' 'credentials %(credentials)s', {'action': action, 'credentials': credentials}) def check_is_admin(self, context): """Does roles contains 'admin' role according to policy setting. """ credentials = context.to_dict() return self.enforce('context_is_admin', credentials, credentials) def get_rules(self): return self.rules @oslo_policy.register('is_admin') class IsAdminCheck(oslo_policy.Check): """An explicit check for is_admin.""" def __init__(self, kind, match): """Initialize the check.""" self.expected = match.lower() == 'true' super(IsAdminCheck, self).__init__(kind, str(self.expected)) def __call__(self, target, creds, enforcer): """Determine whether is_admin matches the requested value.""" return creds['is_admin'] == self.expected # This is used for the oslopolicy-policy-generator tool def get_no_context_enforcer(): return Policy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/rpc.py0000664000175000017500000000404600000000000017252 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher LOG = logging.getLogger(__name__) TRANSPORT = None def init(): global TRANSPORT TRANSPORT = create_transport(get_transport_url()) def cleanup(): global TRANSPORT if TRANSPORT is not None: TRANSPORT.cleanup() TRANSPORT = None def get_transport_url(url_str=None): return messaging.TransportURL.parse(cfg.CONF, url_str) def get_client(target, version_cap=None, serializer=None, call_monitor_timeout=None): if TRANSPORT is None: init() return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer, call_monitor_timeout=call_monitor_timeout) def get_server(target, endpoints, executor='threading', access_policy=dispatcher.DefaultRPCAccessPolicy, serializer=None): if TRANSPORT is None: init() return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor=executor, serializer=serializer, access_policy=access_policy) def create_transport(url): return messaging.get_rpc_transport(cfg.CONF, url=url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/service.py0000664000175000017500000000157700000000000020134 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from octavia.common import config from octavia.common import rpc def prepare_service(argv=None): """Sets global config from config file and sets up logging.""" argv = argv or [] config.init(argv[1:]) config.setup_logging(cfg.CONF) rpc.init() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/stats.py0000664000175000017500000000464500000000000017631 0ustar00zuulzuul00000000000000# Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from octavia.common import constants from octavia.common import data_models from octavia.db import repositories as repo LOG = logging.getLogger(__name__) class StatsMixin(object): def __init__(self): super(StatsMixin, self).__init__() self.listener_stats_repo = repo.ListenerStatisticsRepository() self.repo_amphora = repo.AmphoraRepository() self.repo_loadbalancer = repo.LoadBalancerRepository() def get_listener_stats(self, session, listener_id): """Gets the listener statistics data_models object.""" db_ls, _ = self.listener_stats_repo.get_all( session, listener_id=listener_id) if not db_ls: LOG.warning("Listener Statistics for Listener %s was not found", listener_id) statistics = data_models.ListenerStatistics(listener_id=listener_id) for db_l in db_ls: statistics += db_l amp = self.repo_amphora.get(session, id=db_l.amphora_id) if amp and amp.status == constants.AMPHORA_ALLOCATED: statistics.active_connections += db_l.active_connections return statistics def get_loadbalancer_stats(self, session, loadbalancer_id): statistics = data_models.LoadBalancerStatistics() lb_db = self.repo_loadbalancer.get(session, id=loadbalancer_id) for listener in lb_db.listeners: data = self.get_listener_stats(session, listener.id) statistics.bytes_in += data.bytes_in statistics.bytes_out += data.bytes_out statistics.request_errors += data.request_errors statistics.active_connections += data.active_connections statistics.total_connections += data.total_connections statistics.listeners.append(data) return statistics ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/common/tls_utils/0000775000175000017500000000000000000000000020132 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/tls_utils/__init__.py0000664000175000017500000000107400000000000022245 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/tls_utils/cert_parser.py0000664000175000017500000003624700000000000023031 0ustar00zuulzuul00000000000000# # Copyright 2014 Rackspace. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib from cryptography.hazmat import backends from cryptography.hazmat.primitives import serialization from cryptography import x509 from oslo_context import context as oslo_context from oslo_log import log as logging from pyasn1.codec.der import decoder as der_decoder from pyasn1.codec.der import encoder as der_encoder from pyasn1_modules import rfc2315 from octavia.common import data_models from octavia.common import exceptions from octavia.common import utils as octavia_utils X509_BEG = b'-----BEGIN CERTIFICATE-----' X509_END = b'-----END CERTIFICATE-----' PKCS7_BEG = b'-----BEGIN PKCS7-----' PKCS7_END = b'-----END PKCS7-----' LOG = logging.getLogger(__name__) def validate_cert(certificate, private_key=None, private_key_passphrase=None, intermediates=None): """Validate that the certificate is a valid PEM encoded X509 object Optionally verify that the private key matches the certificate. Optionally verify that the intermediates are valid X509 objects. :param certificate: A PEM encoded certificate :param private_key: The private key for the certificate :param private_key_passphrase: Passphrase for accessing the private key :param intermediates: PEM or PKCS7 encoded intermediate certificates :returns: boolean """ cert = _get_x509_from_pem_bytes(certificate) if intermediates and not isinstance(intermediates, list): # If the intermediates are in a list, then they are already loaded. # Load the certificates to validate them, if they weren't already. list(get_intermediates_pems(intermediates)) if private_key: pkey = _read_private_key(private_key, passphrase=private_key_passphrase) pknum = pkey.public_key().public_numbers() certnum = cert.public_key().public_numbers() if pknum != certnum: raise exceptions.MisMatchedKey return True def _read_private_key(private_key_pem, passphrase=None): """Reads a private key PEM block and returns a RSAPrivatekey :param private_key_pem: The private key PEM block :param passphrase: Optional passphrase needed to decrypt the private key :returns: a RSAPrivatekey object """ if passphrase and isinstance(passphrase, str): passphrase = passphrase.encode("utf-8") if isinstance(private_key_pem, str): private_key_pem = private_key_pem.encode('utf-8') try: return serialization.load_pem_private_key(private_key_pem, passphrase, backends.default_backend()) except Exception: LOG.exception("Passphrase required.") raise exceptions.NeedsPassphrase def prepare_private_key(private_key, passphrase=None): """Prepares an unencrypted PEM-encoded private key for printing :param private_key: The private key in PEM format (encrypted or not) :returns: The unencrypted private key in PEM format """ pk = _read_private_key(private_key, passphrase) return pk.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()).strip() def get_intermediates_pems(intermediates=None): """Split the input string into individual x509 text blocks :param intermediates: PEM or PKCS7 encoded intermediate certificates :returns: A list of strings where each string represents an X509 pem block surrounded by BEGIN CERTIFICATE, END CERTIFICATE block tags """ if isinstance(intermediates, str): try: intermediates = intermediates.encode("utf-8") except UnicodeDecodeError: LOG.debug("Couldn't encode intermediates string, it was probably " "in binary DER format.") if X509_BEG in intermediates: for x509Pem in _split_x509s(intermediates): yield _prepare_x509_cert(_get_x509_from_pem_bytes(x509Pem)) else: for x509Pem in _parse_pkcs7_bundle(intermediates): yield _prepare_x509_cert(_get_x509_from_der_bytes(x509Pem)) def _prepare_x509_cert(cert=None): """Prepares a PEM-encoded X509 certificate for printing :param intermediates: X509Certificate object :returns: A PEM-encoded X509 certificate """ return cert.public_bytes(encoding=serialization.Encoding.PEM).strip() def _split_x509s(xstr): """Split the input string into individual x509 text blocks :param xstr: A large multi x509 certificate blcok :returns: A list of strings where each string represents an X509 pem block surrounded by BEGIN CERTIFICATE, END CERTIFICATE block tags """ curr_pem_block = [] inside_x509 = False if isinstance(xstr, bytes): xstr = xstr.decode('utf-8') for line in xstr.replace("\r", "").split("\n"): if inside_x509: curr_pem_block.append(line) if line == X509_END.decode('utf-8'): yield octavia_utils.b("\n".join(curr_pem_block)) curr_pem_block = [] inside_x509 = False continue if line == X509_BEG.decode('utf-8'): curr_pem_block.append(line) inside_x509 = True def _parse_pkcs7_bundle(pkcs7): """Parse a PKCS7 certificate bundle in DER or PEM format :param pkcs7: A pkcs7 bundle in DER or PEM format :returns: A list of individual DER-encoded certificates """ # Look for PEM encoding if PKCS7_BEG in pkcs7: try: for substrate in _read_pem_blocks(pkcs7): for cert in _get_certs_from_pkcs7_substrate(substrate): yield cert except Exception: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert # If no PEM encoding, assume this is DER encoded and try to decode else: for cert in _get_certs_from_pkcs7_substrate(pkcs7): yield cert def _read_pem_blocks(data): """Parse a series of PEM-encoded blocks This method is based on pyasn1-modules.pem.readPemBlocksFromFile, but eliminates the need to operate on a file handle and is a generator. :param data: A long text string containing one or more PEM-encoded blocks :param markers: A tuple containing the test strings that indicate the start and end of the PEM-encoded blocks :returns: An ASN1 substrate suitable for DER decoding. """ stSpam, stHam, stDump = 0, 1, 2 startMarkers = {PKCS7_BEG.decode('utf-8'): 0} stopMarkers = {PKCS7_END.decode('utf-8'): 0} idx = -1 state = stSpam if isinstance(data, bytes): data = data.decode('utf-8') for certLine in data.replace('\r', '').split('\n'): if not certLine: continue certLine = certLine.strip() if state == stSpam: if certLine in startMarkers: certLines = [] idx = startMarkers[certLine] state = stHam continue if state == stHam: if certLine in stopMarkers and stopMarkers[certLine] == idx: state = stDump else: certLines.append(certLine) if state == stDump: yield b''.join([base64.b64decode(x) for x in certLines]) state = stSpam def _get_certs_from_pkcs7_substrate(substrate): """Extracts DER-encoded X509 certificates from a PKCS7 ASN1 DER substrate :param substrate: The substrate to be processed :returns: A list of DER-encoded X509 certificates """ try: contentInfo, _ = der_decoder.decode(substrate, asn1Spec=rfc2315.ContentInfo()) contentType = contentInfo.getComponentByName('contentType') except Exception: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert if contentType != rfc2315.signedData: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert try: content, _ = der_decoder.decode( contentInfo.getComponentByName('content'), asn1Spec=rfc2315.SignedData()) except Exception: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert for cert in content.getComponentByName('certificates'): yield der_encoder.encode(cert) def get_host_names(certificate): """Extract the host names from the Pem encoded X509 certificate :param certificate: A PEM encoded certificate :returns: A dictionary containing the following keys: ['cn', 'dns_names'] where 'cn' is the CN from the SubjectName of the certificate, and 'dns_names' is a list of dNSNames (possibly empty) from the SubjectAltNames of the certificate. """ if isinstance(certificate, str): certificate = certificate.encode('utf-8') try: cert = x509.load_pem_x509_certificate(certificate, backends.default_backend()) cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0] host_names = { 'cn': cn.value.lower(), 'dns_names': [] } try: ext = cert.extensions.get_extension_for_oid( x509.OID_SUBJECT_ALTERNATIVE_NAME ) host_names['dns_names'] = ext.value.get_values_for_type( x509.DNSName) except x509.ExtensionNotFound: LOG.debug("%s extension not found", x509.OID_SUBJECT_ALTERNATIVE_NAME) return host_names except Exception: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert def get_cert_expiration(certificate_pem): """Extract the expiration date from the Pem encoded X509 certificate :param certificate_pem: Certificate in PEM format :returns: Expiration date of certificate_pem """ try: cert = x509.load_pem_x509_certificate(certificate_pem, backends.default_backend()) return cert.not_valid_after except Exception: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert def _get_x509_from_pem_bytes(certificate_pem): """Parse X509 data from a PEM encoded certificate :param certificate_pem: Certificate in PEM format :returns: crypto high-level x509 data from the PEM string """ if isinstance(certificate_pem, str): certificate_pem = certificate_pem.encode('utf-8') try: x509cert = x509.load_pem_x509_certificate(certificate_pem, backends.default_backend()) except Exception: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert return x509cert def _get_x509_from_der_bytes(certificate_der): """Parse X509 data from a DER encoded certificate :param certificate_der: Certificate in DER format :returns: crypto high-level x509 data from the DER-encoded certificate """ try: x509cert = x509.load_der_x509_certificate(certificate_der, backends.default_backend()) except Exception: LOG.exception('Unreadable Certificate.') raise exceptions.UnreadableCert return x509cert def build_pem(tls_container): """Concatenate TLS container fields to create a PEM encoded certificate file :param tls_container: Object container TLS certificates :returns: Pem encoded certificate file """ pem = [tls_container.certificate] if tls_container.private_key: pem.append(tls_container.private_key) if tls_container.intermediates: pem.extend(tls_container.intermediates[:]) return b'\n'.join(pem) + b'\n' def load_certificates_data(cert_mngr, obj, context=None): """Load TLS certificate data from the listener/pool. return TLS_CERT and SNI_CERTS """ tls_cert = None sni_certs = [] if not context: context = oslo_context.RequestContext(project_id=obj.project_id) if obj.tls_certificate_id: try: tls_cert = _map_cert_tls_container( cert_mngr.get_cert(context, obj.tls_certificate_id, check_only=True)) except Exception as e: LOG.warning('Unable to retrieve certificate: %s due to %s.', obj.tls_certificate_id, str(e)) raise exceptions.CertificateRetrievalException( ref=obj.tls_certificate_id) if hasattr(obj, 'sni_containers') and obj.sni_containers: for sni_cont in obj.sni_containers: try: cert_container = _map_cert_tls_container( cert_mngr.get_cert(context, sni_cont.tls_container_id, check_only=True)) except Exception as e: LOG.warning('Unable to retrieve certificate: %s due to %s.', sni_cont.tls_container_id, str(e)) raise exceptions.CertificateRetrievalException( ref=sni_cont.tls_container_id) sni_certs.append(cert_container) return {'tls_cert': tls_cert, 'sni_certs': sni_certs} def _map_cert_tls_container(cert): certificate = cert.get_certificate() private_key = cert.get_private_key() private_key_passphrase = cert.get_private_key_passphrase() intermediates = cert.get_intermediates() if isinstance(certificate, str): certificate = certificate.encode('utf-8') if isinstance(private_key, str): private_key = private_key.encode('utf-8') if isinstance(private_key_passphrase, str): private_key_passphrase = private_key_passphrase.encode('utf-8') if intermediates: intermediates = [ (imd.encode('utf-8') if isinstance(imd, str) else imd) for imd in intermediates ] else: intermediates = [] return data_models.TLSContainer( # TODO(rm_work): applying nosec here because this is not intended to be # secure, it's just a way to get a consistent ID. Changing this would # break backwards compatibility with existing loadbalancers. id=hashlib.sha1(certificate).hexdigest(), # nosec primary_cn=get_primary_cn(certificate), private_key=prepare_private_key(private_key, private_key_passphrase), certificate=certificate, intermediates=intermediates) def get_primary_cn(tls_cert): """Returns primary CN for Certificate.""" return get_host_names(tls_cert)['cn'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/utils.py0000664000175000017500000001273500000000000017632 0ustar00zuulzuul00000000000000# Copyright 2011, VMware, Inc., 2014 A10 Networks # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Borrowed from nova code base, more utilities will be added/borrowed as and # when needed. """Utilities and helper functions.""" import base64 import hashlib import re import socket import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from stevedore import driver as stevedore_driver from octavia.common import constants CONF = cfg.CONF LOG = logging.getLogger(__name__) def get_hostname(): return socket.gethostname() def base64_sha1_string(string_to_hash): """Get a b64-encoded sha1 hash of a string. Not intended to be secure!""" # TODO(rm_work): applying nosec here because this is not intended to be # secure, it's just a way to get a consistent ID. Changing this would # break backwards compatibility with existing loadbalancers. hash_str = hashlib.sha1(string_to_hash.encode('utf-8')).digest() # nosec b64_str = base64.b64encode(hash_str, str.encode('_-', 'ascii')) b64_sha1 = b64_str.decode('UTF-8') # https://github.com/haproxy/haproxy/issues/644 return re.sub(r"^-", "x", b64_sha1) def get_amphora_driver(): amphora_driver = stevedore_driver.DriverManager( namespace='octavia.amphora.drivers', name=CONF.controller_worker.amphora_driver, invoke_on_load=True ).driver return amphora_driver def get_network_driver(): CONF.import_group('controller_worker', 'octavia.common.config') network_driver = stevedore_driver.DriverManager( namespace='octavia.network.drivers', name=CONF.controller_worker.network_driver, invoke_on_load=True ).driver return network_driver def is_ipv4(ip_address): """Check if ip address is IPv4 address.""" ip = netaddr.IPAddress(ip_address) return ip.version == 4 def is_ipv6(ip_address): """Check if ip address is IPv6 address.""" ip = netaddr.IPAddress(ip_address) return ip.version == 6 def is_cidr_ipv6(cidr): """Check if CIDR is IPv6 address with subnet prefix.""" ip = netaddr.IPNetwork(cidr) return ip.version == 6 def is_ipv6_lla(ip_address): """Check if ip address is IPv6 link local address.""" ip = netaddr.IPAddress(ip_address) return ip.version == 6 and ip.is_link_local() def ip_port_str(ip_address, port): """Return IP port as string representation depending on address family.""" ip = netaddr.IPAddress(ip_address) if ip.version == 4: return "{ip}:{port}".format(ip=ip, port=port) return "[{ip}]:{port}".format(ip=ip, port=port) def netmask_to_prefix(netmask): return netaddr.IPAddress(netmask).netmask_bits() def ip_netmask_to_cidr(ip, netmask): net = netaddr.IPNetwork("0.0.0.0/0") if ip and netmask: net = netaddr.IPNetwork( "{ip}/{netmask}".format(ip=ip, netmask=netmask) ) return "{ip}/{netmask}".format(ip=net.network, netmask=net.prefixlen) def get_vip_security_group_name(loadbalancer_id): if loadbalancer_id: return constants.VIP_SECURITY_GROUP_PREFIX + loadbalancer_id return None def get_compatible_value(value): if isinstance(value, str): value = value.encode('utf-8') return value def get_compatible_server_certs_key_passphrase(): key = CONF.certificates.server_certs_key_passphrase if isinstance(key, str): key = key.encode('utf-8') return base64.urlsafe_b64encode(key) def subnet_ip_availability(nw_ip_avail, subnet_id, req_num_ips): for subnet in nw_ip_avail.subnet_ip_availability: if subnet['subnet_id'] == subnet_id: return subnet['total_ips'] - subnet['used_ips'] >= req_num_ips return None def b(s): return s.encode('utf-8') def expand_expected_codes(codes): """Expand the expected code string in set of codes. 200-204 -> 200, 201, 202, 204 200, 203 -> 200, 203 """ retval = set() codes = re.split(', *', codes) for code in codes: if not code: continue if '-' in code: low, hi = code.split('-')[:2] retval.update( str(i) for i in range(int(low), int(hi) + 1)) else: retval.add(code) return retval class exception_logger(object): """Wrap a function and log raised exception :param logger: the logger to log the exception default is LOG.exception :returns: origin value if no exception raised; re-raise the exception if any occurred """ def __init__(self, logger=None): self.logger = logger def __call__(self, func): if self.logger is None: _LOG = logging.getLogger(func.__module__) self.logger = _LOG.exception def call(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception(): self.logger(e) return call ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/common/validate.py0000664000175000017500000004307400000000000020263 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Several handy validation functions that go beyond simple type checking. Defined here so these can also be used at deeper levels than the API. """ import ipaddress import re import netaddr from oslo_config import cfg import rfc3986 from wsme import types as wtypes from octavia.common import constants from octavia.common import exceptions from octavia.common import utils from octavia.i18n import _ CONF = cfg.CONF def url(url, require_scheme=True): """Raises an error if the url doesn't look like a URL.""" try: if not rfc3986.is_valid_uri(url, require_scheme=require_scheme): raise exceptions.InvalidURL(url=url) p_url = rfc3986.urlparse(rfc3986.normalize_uri(url)) if require_scheme: if p_url.scheme != 'http' and p_url.scheme != 'https': raise exceptions.InvalidURL(url=url) except Exception: raise exceptions.InvalidURL(url=url) return True def url_path(url_path): """Raises an error if the url_path doesn't look like a URL Path.""" try: p_url = rfc3986.urlparse(rfc3986.normalize_uri(url_path)) invalid_path = ( p_url.scheme or p_url.userinfo or p_url.host or p_url.port or p_url.path is None or not p_url.path.startswith('/') ) if invalid_path: raise exceptions.InvalidURLPath(url_path=url_path) except Exception: raise exceptions.InvalidURLPath(url_path=url_path) return True def header_name(header, what=None): """Raises an error if header does not look like an HTML header name.""" p = re.compile(constants.HTTP_HEADER_NAME_REGEX) if not p.match(header): raise exceptions.InvalidString(what=what) return True def cookie_value_string(value, what=None): """Raises an error if the value string contains invalid characters.""" p = re.compile(constants.HTTP_COOKIE_VALUE_REGEX) if not p.match(value): raise exceptions.InvalidString(what=what) return True def header_value_string(value, what=None): """Raises an error if the value string contains invalid characters.""" p = re.compile(constants.HTTP_HEADER_VALUE_REGEX) q = re.compile(constants.HTTP_QUOTED_HEADER_VALUE_REGEX) if not p.match(value) and not q.match(value): raise exceptions.InvalidString(what=what) return True def regex(regex): """Raises an error if the string given is not a valid regex.""" try: re.compile(regex) except Exception as e: raise exceptions.InvalidRegex(e=str(e)) return True # Note that we can evaluate this outside the context of any L7 Policy because # L7 rules must be internally consistent. def l7rule_data(l7rule): """Raises an error if the l7rule given is invalid in some way.""" if not l7rule.value: raise exceptions.InvalidL7Rule(msg=_('L7 rule type requires a value')) if l7rule.type == constants.L7RULE_TYPE_HEADER: if not l7rule.key: raise exceptions.InvalidL7Rule(msg='L7 rule type requires a key') header_name(l7rule.key, what='key') if l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: regex(l7rule.value) elif l7rule.compare_type in ( constants.L7RULE_COMPARE_TYPE_STARTS_WITH, constants.L7RULE_COMPARE_TYPE_ENDS_WITH, constants.L7RULE_COMPARE_TYPE_CONTAINS, constants.L7RULE_COMPARE_TYPE_EQUAL_TO): header_value_string(l7rule.value, what='header value') else: raise exceptions.InvalidL7Rule(msg='invalid comparison type ' 'for rule type') elif l7rule.type == constants.L7RULE_TYPE_COOKIE: if not l7rule.key: raise exceptions.InvalidL7Rule(msg='L7 rule type requires a key') header_name(l7rule.key, what='key') if l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: regex(l7rule.value) elif l7rule.compare_type in ( constants.L7RULE_COMPARE_TYPE_STARTS_WITH, constants.L7RULE_COMPARE_TYPE_ENDS_WITH, constants.L7RULE_COMPARE_TYPE_CONTAINS, constants.L7RULE_COMPARE_TYPE_EQUAL_TO): cookie_value_string(l7rule.value, what='cookie value') else: raise exceptions.InvalidL7Rule(msg='invalid comparison type ' 'for rule type') elif l7rule.type in (constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_TYPE_PATH): if l7rule.compare_type in ( constants.L7RULE_COMPARE_TYPE_STARTS_WITH, constants.L7RULE_COMPARE_TYPE_ENDS_WITH, constants.L7RULE_COMPARE_TYPE_CONTAINS, constants.L7RULE_COMPARE_TYPE_EQUAL_TO): header_value_string(l7rule.value, what='comparison value') elif l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: regex(l7rule.value) else: raise exceptions.InvalidL7Rule(msg='invalid comparison type ' 'for rule type') elif l7rule.type == constants.L7RULE_TYPE_FILE_TYPE: if l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: regex(l7rule.value) elif l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_EQUAL_TO: header_value_string(l7rule.value, what='comparison value') else: raise exceptions.InvalidL7Rule(msg='invalid comparison type ' 'for rule type') elif l7rule.type in [constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, constants.L7RULE_TYPE_SSL_VERIFY_RESULT, constants.L7RULE_TYPE_SSL_DN_FIELD]: validate_l7rule_ssl_types(l7rule) else: raise exceptions.InvalidL7Rule(msg='invalid rule type') return True def validate_l7rule_ssl_types(l7rule): if not l7rule.type or l7rule.type not in [ constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, constants.L7RULE_TYPE_SSL_VERIFY_RESULT, constants.L7RULE_TYPE_SSL_DN_FIELD]: return rule_type = None if l7rule.type == wtypes.Unset else l7rule.type req_key = None if l7rule.key == wtypes.Unset else l7rule.key req_value = None if l7rule.value == wtypes.Unset else l7rule.value compare_type = (None if l7rule.compare_type == wtypes.Unset else l7rule.compare_type) msg = None if rule_type == constants.L7RULE_TYPE_SSL_CONN_HAS_CERT: # key and value are not allowed if req_key: # log error or raise msg = 'L7rule type {0} does not use the "key" field.'.format( rule_type) elif req_value.lower() != 'true': msg = 'L7rule value {0} is not a boolean True string.'.format( req_value) elif compare_type != constants.L7RULE_COMPARE_TYPE_EQUAL_TO: msg = 'L7rule type {0} only supports the {1} compare type.'.format( rule_type, constants.L7RULE_COMPARE_TYPE_EQUAL_TO) if rule_type == constants.L7RULE_TYPE_SSL_VERIFY_RESULT: if req_key: # log or raise req_key not used msg = 'L7rule type {0} does not use the "key" field.'.format( rule_type) elif not req_value.isdigit() or int(req_value) < 0: # log or raise req_value must be int msg = 'L7rule type {0} needs a int value, which is >= 0'.format( rule_type) elif compare_type != constants.L7RULE_COMPARE_TYPE_EQUAL_TO: msg = 'L7rule type {0} only supports the {1} compare type.'.format( rule_type, constants.L7RULE_COMPARE_TYPE_EQUAL_TO) if rule_type == constants.L7RULE_TYPE_SSL_DN_FIELD: dn_regex = re.compile(constants.DISTINGUISHED_NAME_FIELD_REGEX) if compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: regex(l7rule.value) if not req_key or not req_value: # log or raise key and value must be specified. msg = 'L7rule type {0} needs to specify a key and a value.'.format( rule_type) # log or raise the key must be splited by '-' elif not dn_regex.match(req_key): msg = ('Invalid L7rule distinguished name field.') if msg: raise exceptions.InvalidL7Rule(msg=msg) def sanitize_l7policy_api_args(l7policy, create=False): """Validate and make consistent L7Policy API arguments. This method is mainly meant to sanitize L7 Policy create and update API dictionaries, so that we strip 'None' values that don't apply for our particular update. This method does *not* verify that any redirect_pool_id exists in the database, but will raise an error if a redirect_url doesn't look like a URL. :param l7policy: The L7 Policy dictionary we are santizing / validating """ if 'action' in l7policy.keys(): if l7policy['action'] == constants.L7POLICY_ACTION_REJECT: l7policy.update({'redirect_url': None}) l7policy.update({'redirect_pool_id': None}) l7policy.pop('redirect_pool', None) elif l7policy['action'] == constants.L7POLICY_ACTION_REDIRECT_TO_URL: if not l7policy.get('redirect_url'): raise exceptions.InvalidL7PolicyArgs( msg='redirect_url must not be None') l7policy.update({'redirect_pool_id': None}) l7policy.pop('redirect_pool', None) elif l7policy['action'] == constants.L7POLICY_ACTION_REDIRECT_TO_POOL: if (not l7policy.get('redirect_pool_id') and not l7policy.get('redirect_pool')): raise exceptions.InvalidL7PolicyArgs( msg='redirect_pool_id or redirect_pool must not be None') l7policy.update({'redirect_url': None}) elif l7policy['action'] == constants.L7POLICY_ACTION_REDIRECT_PREFIX: if not l7policy.get('redirect_prefix'): raise exceptions.InvalidL7PolicyArgs( msg='redirect_prefix must not be None') else: raise exceptions.InvalidL7PolicyAction( action=l7policy['action']) if ((l7policy.get('redirect_pool_id') or l7policy.get('redirect_pool')) and (l7policy.get('redirect_url') or l7policy.get('redirect_prefix'))): raise exceptions.InvalidL7PolicyArgs( msg='Cannot specify redirect_pool_id and redirect_url or ' 'redirect_prefix at the same time') if l7policy.get('redirect_pool_id'): l7policy.update({ 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL}) l7policy.update({'redirect_url': None}) l7policy.pop('redirect_pool', None) l7policy.update({'redirect_prefix': None}) l7policy.update({'redirect_http_code': None}) if l7policy.get('redirect_pool'): l7policy.update({ 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL}) l7policy.update({'redirect_url': None}) l7policy.pop('redirect_pool_id', None) l7policy.update({'redirect_prefix': None}) l7policy.update({'redirect_http_code': None}) if l7policy.get('redirect_url'): url(l7policy['redirect_url']) l7policy.update({ 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL}) l7policy.update({'redirect_pool_id': None}) l7policy.update({'redirect_prefix': None}) l7policy.pop('redirect_pool', None) if not l7policy.get('redirect_http_code'): l7policy.update({'redirect_http_code': 302}) if l7policy.get('redirect_prefix'): url(l7policy['redirect_prefix']) l7policy.update({ 'action': constants.L7POLICY_ACTION_REDIRECT_PREFIX}) l7policy.update({'redirect_pool_id': None}) l7policy.update({'redirect_url': None}) l7policy.pop('redirect_pool', None) if not l7policy.get('redirect_http_code'): l7policy.update({'redirect_http_code': 302}) # If we are creating, we need an action at this point if create and 'action' not in l7policy.keys(): raise exceptions.InvalidL7PolicyAction(action='None') # See if we have anything left after that... if not l7policy.keys(): raise exceptions.InvalidL7PolicyArgs(msg='Invalid update options') return l7policy def port_exists(port_id, context=None): """Raises an exception when a port does not exist.""" network_driver = utils.get_network_driver() try: port = network_driver.get_port(port_id, context=context) except Exception: raise exceptions.InvalidSubresource(resource='Port', id=port_id) return port def check_port_in_use(port): """Raise an exception when a port is used.""" if port.device_id: raise exceptions.ValidationException(detail=_( "Port %(port_id)s is already used by device %(device_id)s ") % {'port_id': port.id, 'device_id': port.device_id}) return False def subnet_exists(subnet_id, context=None): """Raises an exception when a subnet does not exist.""" network_driver = utils.get_network_driver() try: subnet = network_driver.get_subnet(subnet_id, context=context) except Exception: raise exceptions.InvalidSubresource(resource='Subnet', id=subnet_id) return subnet def qos_policy_exists(qos_policy_id): network_driver = utils.get_network_driver() qos_extension_enabled(network_driver) try: qos_policy = network_driver.get_qos_policy(qos_policy_id) except Exception: raise exceptions.InvalidSubresource(resource='qos_policy', id=qos_policy_id) return qos_policy def qos_extension_enabled(network_driver): if not network_driver.qos_enabled(): raise exceptions.ValidationException(detail=_( "VIP QoS policy is not allowed in this deployment.")) def network_exists_optionally_contains_subnet(network_id, subnet_id=None, context=None): """Raises an exception when a network does not exist. If a subnet is provided, also validate the network contains that subnet. """ network_driver = utils.get_network_driver() try: network = network_driver.get_network(network_id, context=context) except Exception: raise exceptions.InvalidSubresource(resource='Network', id=network_id) if subnet_id: if not network.subnets or subnet_id not in network.subnets: raise exceptions.InvalidSubresource(resource='Subnet', id=subnet_id) return network def network_allowed_by_config(network_id, valid_networks=None): if CONF.networking.valid_vip_networks and not valid_networks: valid_networks = CONF.networking.valid_vip_networks if valid_networks: valid_networks = map(str.lower, valid_networks) if network_id.lower() not in valid_networks: raise exceptions.ValidationException(detail=_( 'Supplied VIP network_id is not allowed by the configuration ' 'of this deployment.')) def is_ip_member_of_cidr(address, cidr): if netaddr.IPAddress(address) in netaddr.IPNetwork(cidr): return True return False def check_session_persistence(SP_dict): try: if SP_dict['cookie_name']: if SP_dict['type'] != constants.SESSION_PERSISTENCE_APP_COOKIE: raise exceptions.ValidationException(detail=_( 'Field "cookie_name" can only be specified with session ' 'persistence of type "APP_COOKIE".')) bad_cookie_name = re.compile(r'[\x00-\x20\x22\x28-\x29\x2c\x2f' r'\x3a-\x40\x5b-\x5d\x7b\x7d\x7f]+') valid_chars = re.compile(r'[\x00-\xff]+') if (bad_cookie_name.search(SP_dict['cookie_name']) or not valid_chars.search(SP_dict['cookie_name'])): raise exceptions.ValidationException(detail=_( 'Supplied "cookie_name" is invalid.')) if (SP_dict['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and not SP_dict['cookie_name']): raise exceptions.ValidationException(detail=_( 'Field "cookie_name" must be specified when using the ' '"APP_COOKIE" session persistence type.')) except exceptions.ValidationException: raise except Exception: raise exceptions.ValidationException(detail=_( 'Invalid session_persistence provided.')) def ip_not_reserved(ip_address): ip_address = ( ipaddress.ip_address(ip_address).exploded.upper()) if ip_address in CONF.networking.reserved_ips: raise exceptions.InvalidOption(value=ip_address, option='member address') def is_flavor_spares_compatible(flavor): if flavor: # If a compute flavor is specified, the flavor is not spares compatible if flavor.get(constants.COMPUTE_FLAVOR, None): return False return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/compute/0000775000175000017500000000000000000000000016274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/compute/__init__.py0000664000175000017500000000107400000000000020407 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/compute/compute_base.py0000664000175000017500000001207000000000000021314 0ustar00zuulzuul00000000000000# Copyright 2011-2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class ComputeBase(object, metaclass=abc.ABCMeta): @abc.abstractmethod def build(self, name="amphora_name", amphora_flavor=None, image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, config_drive_files=None, user_data=None, server_group_id=None, availability_zone=None): """Build a new amphora. :param name: Optional name for Amphora :param amphora_flavor: Optionally specify a flavor :param image_id: ID of the base image for the amphora instance :param image_tag: tag of the base image for the amphora instance :param key_name: Optionally specify a keypair :param sec_groups: Optionally specify list of security groups :param network_ids: A list of network IDs to attach to the amphora :param config_drive_files: An optional dict of files to overwrite on the server upon boot. Keys are file names (i.e. /etc/passwd) and values are the file contents (either as a string or as a file-like object). A maximum of five entries is allowed, and each file must be 10k or less. :param user_data: Optional user data to pass to be exposed by the metadata server this can be a file type object as well or a string :param server_group_id: Optional server group id(uuid) which is used for anti_affinity feature :param availability_zone: Name of the compute availability zone. :raises ComputeBuildException: if compute failed to build amphora :returns: UUID of amphora """ @abc.abstractmethod def delete(self, compute_id): """Delete the specified amphora :param compute_id: The id of the amphora to delete """ @abc.abstractmethod def status(self, compute_id): """Check whether the specified amphora is up :param compute_id: the ID of the desired amphora :returns: The compute "status" response ("ONLINE" or "OFFLINE") """ @abc.abstractmethod def get_amphora(self, compute_id, management_network_id=None): """Retrieve an amphora object :param compute_id: the compute id of the desired amphora :param management_network_id: ID of the management network :returns: the amphora object :returns: fault message or None """ @abc.abstractmethod def create_server_group(self, name, policy): """Create a server group object :param name: the name of the server group :param policy: the policy of the server group :returns: the server group object """ @abc.abstractmethod def delete_server_group(self, server_group_id): """Delete a server group object :param server_group_id: the uuid of a server group """ @abc.abstractmethod def attach_network_or_port(self, compute_id, network_id=None, ip_address=None, port_id=None): """Connects an existing amphora to an existing network. :param compute_id: id of an amphora in the compute service :param network_id: id of a network :param ip_address: ip address to attempt to be assigned to interface :param port_id: id of the neutron port :return: nova interface :raises: Exception """ @abc.abstractmethod def detach_port(self, compute_id, port_id): """Disconnects an existing amphora from an existing port. :param compute_id: id of an amphora in the compute service :param port_id: id of the port :return: None :raises: Exception """ @abc.abstractmethod def validate_flavor(self, flavor_id): """Validates that a compute flavor exists. :param flavor_id: ID of the compute flavor. :return: None :raises: NotFound :raises: NotImplementedError """ @abc.abstractmethod def validate_availability_zone(self, availability_zone): """Validates that a compute availability zone exists. :param availability_zone: Name of the compute availability zone. :return: None :raises: NotFound :raises: NotImplementedError """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/compute/drivers/0000775000175000017500000000000000000000000017752 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/compute/drivers/__init__.py0000664000175000017500000000107400000000000022065 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/compute/drivers/noop_driver/0000775000175000017500000000000000000000000022300 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/compute/drivers/noop_driver/__init__.py0000664000175000017500000000107400000000000024413 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/compute/drivers/noop_driver/driver.py0000664000175000017500000001646400000000000024160 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils from octavia.common import constants from octavia.common import data_models from octavia.compute import compute_base as driver_base from octavia.network import data_models as network_models LOG = logging.getLogger(__name__) class NoopManager(object): def __init__(self): super(NoopManager, self).__init__() self.computeconfig = {} def build(self, name="amphora_name", amphora_flavor=None, image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, config_drive_files=None, user_data=None, port_ids=None, server_group_id=None, availability_zone=None): LOG.debug("Compute %s no-op, build name %s, amphora_flavor %s, " "image_id %s, image_tag %s, image_owner %s, key_name %s, " "sec_groups %s, network_ids %s, config_drive_files %s, " "user_data %s, port_ids %s, server_group_id %s, " "availability_zone %s", self.__class__.__name__, name, amphora_flavor, image_id, image_tag, image_owner, key_name, sec_groups, network_ids, config_drive_files, user_data, port_ids, server_group_id, availability_zone) self.computeconfig[(name, amphora_flavor, image_id, image_tag, image_owner, key_name, user_data, server_group_id)] = ( name, amphora_flavor, image_id, image_tag, image_owner, key_name, sec_groups, network_ids, config_drive_files, user_data, port_ids, server_group_id, 'build') compute_id = uuidutils.generate_uuid() return compute_id def delete(self, compute_id): LOG.debug("Compute %s no-op, compute_id %s", self.__class__.__name__, compute_id) self.computeconfig[compute_id] = (compute_id, 'delete') def status(self, compute_id): LOG.debug("Compute %s no-op, compute_id %s", self.__class__.__name__, compute_id) self.computeconfig[compute_id] = (compute_id, 'status') return constants.UP def get_amphora(self, compute_id, management_network_id=None): LOG.debug("Compute %s no-op, compute_id %s, management_network_id %s", self.__class__.__name__, compute_id, management_network_id) self.computeconfig[(compute_id, management_network_id)] = ( compute_id, management_network_id, 'get_amphora') return data_models.Amphora( compute_id=compute_id, status=constants.ACTIVE, lb_network_ip='192.0.2.1' ), None def create_server_group(self, name, policy): LOG.debug("Create Server Group %s no-op, name %s, policy %s ", self.__class__.__name__, name, policy) self.computeconfig[(name, policy)] = (name, policy, 'create') def delete_server_group(self, server_group_id): LOG.debug("Delete Server Group %s no-op, id %s ", self.__class__.__name__, server_group_id) self.computeconfig[server_group_id] = (server_group_id, 'delete') def attach_network_or_port(self, compute_id, network_id=None, ip_address=None, port_id=None): LOG.debug("Compute %s no-op, attach_network_or_port compute_id %s," "network_id %s, ip_address %s, port_id %s", self.__class__.__name__, compute_id, network_id, ip_address, port_id) self.computeconfig[(compute_id, network_id, ip_address, port_id)] = ( compute_id, network_id, ip_address, port_id, 'attach_network_or_port') return network_models.Interface( id=uuidutils.generate_uuid(), compute_id=compute_id, network_id=network_id, fixed_ips=[], port_id=uuidutils.generate_uuid() ) def detach_port(self, compute_id, port_id): LOG.debug("Compute %s no-op, detach_network compute_id %s, " "port_id %s", self.__class__.__name__, compute_id, port_id) self.computeconfig[(compute_id, port_id)] = ( compute_id, port_id, 'detach_port') def validate_flavor(self, flavor_id): LOG.debug("Compute %s no-op, validate_flavor flavor_id %s", self.__class__.__name__, flavor_id) self.computeconfig[flavor_id] = (flavor_id, 'validate_flavor') def validate_availability_zone(self, availability_zone): LOG.debug("Compute %s no-op, validate_availability_zone name %s", self.__class__.__name__, availability_zone) self.computeconfig[availability_zone] = ( availability_zone, 'validate_availability_zone') class NoopComputeDriver(driver_base.ComputeBase): def __init__(self): super(NoopComputeDriver, self).__init__() self.driver = NoopManager() def build(self, name="amphora_name", amphora_flavor=None, image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, config_drive_files=None, user_data=None, port_ids=None, server_group_id=None, availability_zone=None): compute_id = self.driver.build(name, amphora_flavor, image_id, image_tag, image_owner, key_name, sec_groups, network_ids, config_drive_files, user_data, port_ids, server_group_id, availability_zone) return compute_id def delete(self, compute_id): self.driver.delete(compute_id) def status(self, compute_id): return self.driver.status(compute_id) def get_amphora(self, compute_id, management_network_id=None): return self.driver.get_amphora(compute_id, management_network_id) def create_server_group(self, name, policy): return self.driver.create_server_group(name, policy) def delete_server_group(self, server_group_id): self.driver.delete_server_group(server_group_id) def attach_network_or_port(self, compute_id, network_id=None, ip_address=None, port_id=None): self.driver.attach_network_or_port(compute_id, network_id, ip_address, port_id) def detach_port(self, compute_id, port_id): self.driver.detach_port(compute_id, port_id) def validate_flavor(self, flavor_id): self.driver.validate_flavor(flavor_id) def validate_availability_zone(self, availability_zone): self.driver.validate_availability_zone(availability_zone) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/compute/drivers/nova_driver.py0000664000175000017500000004551700000000000022656 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import string from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from octavia.common import clients from octavia.common import constants from octavia.common import data_models as models from octavia.common import exceptions from octavia.compute import compute_base LOG = logging.getLogger(__name__) CONF = cfg.CONF def _extract_amp_image_id_by_tag(client, image_tag, image_owner): if image_owner: images = list(client.images.list( filters={'tag': [image_tag], 'owner': image_owner, 'status': constants.GLANCE_IMAGE_ACTIVE}, sort='created_at:desc', limit=2)) else: images = list(client.images.list( filters={'tag': [image_tag], 'status': constants.GLANCE_IMAGE_ACTIVE}, sort='created_at:desc', limit=2)) if not images: raise exceptions.GlanceNoTaggedImages(tag=image_tag) image_id = images[0]['id'] num_images = len(images) if num_images > 1: LOG.warning("A single Glance image should be tagged with %(tag)s tag, " "but at least two were found. Using %(image_id)s.", {'tag': image_tag, 'image_id': image_id}) return image_id def _get_image_uuid(client, image_id, image_tag, image_owner): if image_id: if image_tag: LOG.warning("Both amp_image_id and amp_image_tag options defined. " "Using the amp_image_id.") return image_id return _extract_amp_image_id_by_tag(client, image_tag, image_owner) class VirtualMachineManager(compute_base.ComputeBase): '''Compute implementation of virtual machines via nova.''' def __init__(self): super(VirtualMachineManager, self).__init__() # Must initialize nova api self._nova_client = clients.NovaAuth.get_nova_client( endpoint=CONF.nova.endpoint, region=CONF.nova.region_name, endpoint_type=CONF.nova.endpoint_type, insecure=CONF.nova.insecure, cacert=CONF.nova.ca_certificates_file) self._glance_client = clients.GlanceAuth.get_glance_client( service_name=CONF.glance.service_name, endpoint=CONF.glance.endpoint, region=CONF.glance.region_name, endpoint_type=CONF.glance.endpoint_type, insecure=CONF.glance.insecure, cacert=CONF.glance.ca_certificates_file) self.manager = self._nova_client.servers self.server_groups = self._nova_client.server_groups self.flavor_manager = self._nova_client.flavors self.availability_zone_manager = self._nova_client.availability_zones self.volume_driver = stevedore_driver.DriverManager( namespace='octavia.volume.drivers', name=CONF.controller_worker.volume_driver, invoke_on_load=True ).driver def build(self, name="amphora_name", amphora_flavor=None, image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, port_ids=None, config_drive_files=None, user_data=None, server_group_id=None, availability_zone=None): '''Create a new virtual machine. :param name: optional name for amphora :param amphora_flavor: image flavor for virtual machine :param image_id: image ID for virtual machine :param image_tag: image tag for virtual machine :param key_name: keypair to add to the virtual machine :param sec_groups: Security group IDs for virtual machine :param network_ids: Network IDs to include on virtual machine :param port_ids: Port IDs to include on virtual machine :param config_drive_files: An optional dict of files to overwrite on the server upon boot. Keys are file names (i.e. /etc/passwd) and values are the file contents (either as a string or as a file-like object). A maximum of five entries is allowed, and each file must be 10k or less. :param user_data: Optional user data to pass to be exposed by the metadata server this can be a file type object as well or a string :param server_group_id: Optional server group id(uuid) which is used for anti_affinity feature :param availability_zone: Name of the compute availability zone. :raises ComputeBuildException: if nova failed to build virtual machine :returns: UUID of amphora ''' volume_id = None try: network_ids = network_ids or [] port_ids = port_ids or [] nics = [] if network_ids: nics.extend([{"net-id": net_id} for net_id in network_ids]) if port_ids: nics.extend([{"port-id": port_id} for port_id in port_ids]) server_group = None if server_group_id is None else { "group": server_group_id} az_name = availability_zone or CONF.nova.availability_zone image_id = _get_image_uuid( self._glance_client, image_id, image_tag, image_owner) if CONF.nova.random_amphora_name_length: r = random.SystemRandom() name = "a{}".format("".join( [r.choice(string.ascii_uppercase + string.digits) for i in range(CONF.nova.random_amphora_name_length - 1)] )) block_device_mapping = {} if (CONF.controller_worker.volume_driver != constants.VOLUME_NOOP_DRIVER): # creating volume LOG.debug('Creating volume for amphora from image %s', image_id) volume_id = self.volume_driver.create_volume_from_image( image_id) LOG.debug('Created boot volume %s for amphora', volume_id) # If use volume based, does not require image ID anymore image_id = None # Boot from volume with parameters: target device name = vda, # device id = volume_id, device type and size unspecified, # delete-on-terminate = true (volume will be deleted by Nova # on instance termination) block_device_mapping = {'vda': '%s:::true' % volume_id} amphora = self.manager.create( name=name, image=image_id, flavor=amphora_flavor, block_device_mapping=block_device_mapping, key_name=key_name, security_groups=sec_groups, nics=nics, files=config_drive_files, userdata=user_data, config_drive=True, scheduler_hints=server_group, availability_zone=az_name ) return amphora.id except Exception as e: if (CONF.controller_worker.volume_driver != constants.VOLUME_NOOP_DRIVER): self.volume_driver.delete_volume(volume_id) LOG.exception("Nova failed to build the instance due to: %s", str(e)) raise exceptions.ComputeBuildException(fault=e) def delete(self, compute_id): '''Delete a virtual machine. :param compute_id: virtual machine UUID ''' try: self.manager.delete(server=compute_id) except nova_exceptions.NotFound: LOG.warning("Nova instance with id: %s not found. " "Assuming already deleted.", compute_id) except Exception as e: LOG.exception("Error deleting nova virtual machine.") raise exceptions.ComputeDeleteException(compute_msg=str(e)) def status(self, compute_id): '''Retrieve the status of a virtual machine. :param compute_id: virtual machine UUID :returns: constant of amphora status ''' try: amphora, fault = self.get_amphora(compute_id) if amphora and amphora.status == 'ACTIVE': return constants.UP except Exception: LOG.exception("Error retrieving nova virtual machine status.") raise exceptions.ComputeStatusException() return constants.DOWN def get_amphora(self, compute_id, management_network_id=None): '''Retrieve the information in nova of a virtual machine. :param compute_id: virtual machine UUID :param management_network_id: ID of the management network :returns: an amphora object :returns: fault message or None ''' # utilize nova client ServerManager 'get' method to retrieve info try: amphora = self.manager.get(compute_id) except Exception: LOG.exception("Error retrieving nova virtual machine.") raise exceptions.ComputeGetException() return self._translate_amphora(amphora, management_network_id) def _translate_amphora(self, nova_response, management_network_id=None): '''Convert a nova virtual machine into an amphora object. :param nova_response: JSON response from nova :param management_network_id: ID of the management network :returns: an amphora object :returns: fault message or None ''' # Extract interfaces of virtual machine to populate desired amphora # fields lb_network_ip = None availability_zone = None image_id = None if management_network_id: boot_networks = [management_network_id] else: boot_networks = CONF.controller_worker.amp_boot_network_list try: inf_list = nova_response.interface_list() for interface in inf_list: net_id = interface.net_id # Pick the first fixed_ip if this is a boot network or if # there are no boot networks configured (use default network) if net_id in boot_networks or not boot_networks: lb_network_ip = interface.fixed_ips[0]['ip_address'] break try: availability_zone = getattr( nova_response, 'OS-EXT-AZ:availability_zone') except AttributeError: LOG.info('No availability zone listed for server %s', nova_response.id) except Exception: LOG.debug('Extracting virtual interfaces through nova ' 'os-interfaces extension failed.') fault = getattr(nova_response, 'fault', None) if (CONF.controller_worker.volume_driver == constants.VOLUME_NOOP_DRIVER): image_id = nova_response.image.get("id") else: try: volumes = self._nova_client.volumes.get_server_volumes( nova_response.id) except Exception: LOG.debug('Extracting volumes through nova ' 'os-volumes extension failed.') volumes = [] if not volumes: LOG.warning('Boot volume not found for volume backed ' 'amphora instance %s ', nova_response.id) else: if len(volumes) > 1: LOG.warning('Found more than one (%s) volumes ' 'for amphora instance %s', len(volumes), nova_response.id) volume_id = volumes[0].volumeId image_id = self.volume_driver.get_image_from_volume(volume_id) response = models.Amphora( compute_id=nova_response.id, status=nova_response.status, lb_network_ip=lb_network_ip, cached_zone=availability_zone, image_id=image_id, compute_flavor=nova_response.flavor.get("id") ) return response, fault def create_server_group(self, name, policy): """Create a server group object :param name: the name of the server group :param policy: the policy of the server group :raises: Generic exception if the server group is not created :returns: the server group object """ kwargs = {'name': name, 'policies': [policy]} try: server_group_obj = self.server_groups.create(**kwargs) return server_group_obj except Exception: LOG.exception("Error create server group instance.") raise exceptions.ServerGroupObjectCreateException() def delete_server_group(self, server_group_id): """Delete a server group object :raises: Generic exception if the server group is not deleted :param server_group_id: the uuid of a server group """ try: self.server_groups.delete(server_group_id) except nova_exceptions.NotFound: LOG.warning("Server group instance with id: %s not found. " "Assuming already deleted.", server_group_id) except Exception: LOG.exception("Error delete server group instance.") raise exceptions.ServerGroupObjectDeleteException() def attach_network_or_port(self, compute_id, network_id=None, ip_address=None, port_id=None): """Attaching a port or a network to an existing amphora :param compute_id: id of an amphora in the compute service :param network_id: id of a network :param ip_address: ip address to attempt to be assigned to interface :param port_id: id of the neutron port :return: nova interface instance :raises ComputePortInUseException: The port is in use somewhere else :raises ComputeUnknownException: Unknown nova error """ try: interface = self.manager.interface_attach( server=compute_id, net_id=network_id, fixed_ip=ip_address, port_id=port_id) except nova_exceptions.Conflict as e: # The port is already in use. if port_id: # Check if the port we want is already attached try: interfaces = self.manager.interface_list(compute_id) for interface in interfaces: if interface.id == port_id: return interface except Exception as e: raise exceptions.ComputeUnknownException(exc=str(e)) raise exceptions.ComputePortInUseException(port=port_id) # Nova should have created the port, so something is really # wrong in nova if we get here. raise exceptions.ComputeUnknownException(exc=str(e)) except nova_exceptions.NotFound as e: if 'Instance' in str(e): raise exceptions.NotFound(resource='Instance', id=compute_id) if 'Network' in str(e): raise exceptions.NotFound(resource='Network', id=network_id) if 'Port' in str(e): raise exceptions.NotFound(resource='Port', id=port_id) raise exceptions.NotFound(resource=str(e), id=compute_id) except Exception as e: LOG.error('Error attaching network %(network_id)s with ip ' '%(ip_address)s and port %(port)s to amphora ' '(compute_id: %(compute_id)s) ', { 'compute_id': compute_id, 'network_id': network_id, 'ip_address': ip_address, 'port': port_id }) raise exceptions.ComputeUnknownException(exc=str(e)) return interface def detach_port(self, compute_id, port_id): """Detaches a port from an existing amphora. :param compute_id: id of an amphora in the compute service :param port_id: id of the port :return: None """ try: self.manager.interface_detach(server=compute_id, port_id=port_id) except Exception: LOG.error('Error detaching port %(port_id)s from amphora ' 'with compute ID %(compute_id)s. ' 'Skipping.', { 'port_id': port_id, 'compute_id': compute_id }) def validate_flavor(self, flavor_id): """Validates that a flavor exists in nova. :param flavor_id: ID of the flavor to lookup in nova. :raises: NotFound :returns: None """ try: self.flavor_manager.get(flavor_id) except nova_exceptions.NotFound: LOG.info('Flavor %s was not found in nova.', flavor_id) raise exceptions.InvalidSubresource(resource='Nova flavor', id=flavor_id) except Exception as e: LOG.exception('Nova reports a failure getting flavor details for ' 'flavor ID %s: %s', flavor_id, str(e)) raise def validate_availability_zone(self, availability_zone): """Validates that an availability zone exists in nova. :param availability_zone: Name of the availability zone to lookup. :raises: NotFound :returns: None """ try: compute_zones = [ a.zoneName for a in self.availability_zone_manager.list( detailed=False)] if availability_zone not in compute_zones: LOG.info('Availability zone %s was not found in nova. %s', availability_zone, compute_zones) raise exceptions.InvalidSubresource( resource='Nova availability zone', id=availability_zone) except Exception as e: LOG.exception('Nova reports a failure getting listing ' 'availability zones: %s', str(e)) raise ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3862166 octavia-6.2.2/octavia/controller/0000775000175000017500000000000000000000000017003 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/__init__.py0000664000175000017500000000107400000000000021116 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/healthmanager/0000775000175000017500000000000000000000000021603 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/healthmanager/__init__.py0000664000175000017500000000107400000000000023716 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/healthmanager/health_drivers/0000775000175000017500000000000000000000000024606 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/healthmanager/health_drivers/__init__.py0000664000175000017500000000000000000000000026705 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/healthmanager/health_drivers/update_base.py0000664000175000017500000000154100000000000027435 0ustar00zuulzuul00000000000000# Copyright 2018 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class HealthUpdateBase(object): @abc.abstractmethod def update_health(self, health, srcaddr): raise NotImplementedError() class StatsUpdateBase(object): @abc.abstractmethod def update_stats(self, health_message, srcaddr): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/healthmanager/health_drivers/update_db.py0000664000175000017500000005650300000000000027120 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import time import timeit from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import sqlalchemy from stevedore import driver as stevedore_driver from octavia.common import constants from octavia.common import stats from octavia.controller.healthmanager.health_drivers import update_base from octavia.db import api as db_api from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) class UpdateHealthDb(update_base.HealthUpdateBase): def __init__(self): super(UpdateHealthDb, self).__init__() # first setup repo for amphora, listener,member(nodes),pool repo self.amphora_repo = repo.AmphoraRepository() self.amphora_health_repo = repo.AmphoraHealthRepository() self.listener_repo = repo.ListenerRepository() self.loadbalancer_repo = repo.LoadBalancerRepository() self.member_repo = repo.MemberRepository() self.pool_repo = repo.PoolRepository() def _update_status(self, session, repo, entity_type, entity_id, new_op_status, old_op_status): message = {} if old_op_status.lower() != new_op_status.lower(): LOG.debug("%s %s status has changed from %s to " "%s, updating db.", entity_type, entity_id, old_op_status, new_op_status) repo.update(session, entity_id, operating_status=new_op_status) # Map the status for neutron-lbaas compatibility if new_op_status == constants.DRAINING: new_op_status = constants.ONLINE message.update({constants.OPERATING_STATUS: new_op_status}) def update_health(self, health, srcaddr): # The executor will eat any exceptions from the update_health code # so we need to wrap it and log the unhandled exception start_time = timeit.default_timer() try: self._update_health(health, srcaddr) except Exception as e: LOG.exception('Health update for amphora %(amp)s encountered ' 'error %(err)s. Skipping health update.', {'amp': health['id'], 'err': e}) # TODO(johnsom) We need to set a warning threshold here LOG.debug('Health Update finished in: %s seconds', timeit.default_timer() - start_time) # Health heartbeat messsage pre-versioning with UDP listeners # need to adjust the expected listener count # This is for backward compatibility with Rocky pre-versioning # heartbeat amphora. def _update_listener_count_for_UDP(self, session, db_lb, expected_listener_count): # For udp listener, the udp health won't send out by amp agent. # Once the default_pool of udp listener have the first enabled # member, then the health will be sent out. So during this # period, need to figure out the udp listener and ignore them # by changing expected_listener_count. for list_id, list_db in db_lb.get('listeners', {}).items(): need_remove = False if list_db['protocol'] == constants.PROTOCOL_UDP: listener = self.listener_repo.get(session, id=list_id) enabled_members = ([member for member in listener.default_pool.members if member.enabled] if listener.default_pool else []) if listener.default_pool: if not listener.default_pool.members: need_remove = True elif not enabled_members: need_remove = True else: need_remove = True if need_remove: expected_listener_count = expected_listener_count - 1 return expected_listener_count def _update_health(self, health, srcaddr): """This function is to update db info based on amphora status :param health: map object that contains amphora, listener, member info :type map: string :returns: null The input v1 health data structure is shown as below:: health = { "id": self.FAKE_UUID_1, "listeners": { "listener-id-1": {"status": constants.OPEN, "pools": { "pool-id-1": {"status": constants.UP, "members": { "member-id-1": constants.ONLINE} } } } } } Example V2 message:: {"id": "", "seq": 67, "listeners": { "": { "status": "OPEN", "stats": { "tx": 0, "rx": 0, "conns": 0, "totconns": 0, "ereq": 0 } } }, "pools": { ":": { "status": "UP", "members": { "": "no check" } } }, "ver": 2 } """ session = db_api.get_session() # We need to see if all of the listeners are reporting in db_lb = self.amphora_repo.get_lb_for_health_update(session, health['id']) ignore_listener_count = False if db_lb: expected_listener_count = 0 if ('PENDING' in db_lb['provisioning_status'] or not db_lb['enabled']): ignore_listener_count = True else: for key, listener in db_lb.get('listeners', {}).items(): # disabled listeners don't report from the amphora if listener['enabled']: expected_listener_count += 1 # If this is a heartbeat older than versioning, handle # UDP special for backward compatibility. if 'ver' not in health: udp_listeners = [ l for k, l in db_lb.get('listeners', {}).items() if l['protocol'] == constants.PROTOCOL_UDP] if udp_listeners: expected_listener_count = ( self._update_listener_count_for_UDP( session, db_lb, expected_listener_count)) else: # If this is not a spare amp, log and skip it. amp = self.amphora_repo.get(session, id=health['id']) if not amp or amp.load_balancer_id: # This is debug and not warning because this can happen under # normal deleting operations. LOG.debug('Received a health heartbeat from amphora %s with ' 'IP %s that should not exist. This amphora may be ' 'in the process of being deleted, in which case you ' 'will only see this message a few ' 'times', health['id'], srcaddr) if not amp: LOG.warning('The amphora %s with IP %s is missing from ' 'the DB, so it cannot be automatically ' 'deleted (the compute_id is unknown). An ' 'operator must manually delete it from the ' 'compute service.', health['id'], srcaddr) return # delete the amp right there try: compute = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver compute.delete(amp.compute_id) return except Exception as e: LOG.info("Error deleting amp %s with IP %s Error: %s", health['id'], srcaddr, e) expected_listener_count = 0 listeners = health['listeners'] # Do not update amphora health if the reporting listener count # does not match the expected listener count if len(listeners) == expected_listener_count or ignore_listener_count: lock_session = db_api.get_session(autocommit=False) # if we're running too far behind, warn and bail proc_delay = time.time() - health['recv_time'] hb_interval = CONF.health_manager.heartbeat_interval # TODO(johnsom) We need to set a warning threshold here, and # escalate to critical when it reaches the # heartbeat_interval if proc_delay >= hb_interval: LOG.warning('Amphora %(id)s health message was processed too ' 'slowly: %(delay)ss! The system may be overloaded ' 'or otherwise malfunctioning. This heartbeat has ' 'been ignored and no update was made to the ' 'amphora health entry. THIS IS NOT GOOD.', {'id': health['id'], 'delay': proc_delay}) return # if the input amphora is healthy, we update its db info try: self.amphora_health_repo.replace( lock_session, health['id'], last_update=(datetime.datetime.utcnow())) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() else: LOG.warning('Amphora %(id)s health message reports %(found)i ' 'listeners when %(expected)i expected', {'id': health['id'], 'found': len(listeners), 'expected': expected_listener_count}) # Don't try to update status for spares pool amphora if not db_lb: return processed_pools = [] potential_offline_pools = {} # We got a heartbeat so lb is healthy until proven otherwise if db_lb[constants.ENABLED] is False: lb_status = constants.OFFLINE else: lb_status = constants.ONLINE health_msg_version = health.get('ver', 0) for listener_id in db_lb.get(constants.LISTENERS, {}): db_listener = db_lb[constants.LISTENERS][listener_id] db_op_status = db_listener[constants.OPERATING_STATUS] listener_status = None listener = None if listener_id not in listeners: if (db_listener[constants.ENABLED] and db_lb[constants.PROVISIONING_STATUS] == constants.ACTIVE): listener_status = constants.ERROR else: listener_status = constants.OFFLINE else: listener = listeners[listener_id] # OPEN = HAProxy listener status nbconn < maxconn if listener.get('status') == constants.OPEN: listener_status = constants.ONLINE # FULL = HAProxy listener status not nbconn < maxconn elif listener.get('status') == constants.FULL: listener_status = constants.DEGRADED if lb_status == constants.ONLINE: lb_status = constants.DEGRADED else: LOG.warning(('Listener %(list)s reported status of ' '%(status)s'), {'list': listener_id, 'status': listener.get('status')}) try: if (listener_status is not None and listener_status != db_op_status): self._update_status( session, self.listener_repo, constants.LISTENER, listener_id, listener_status, db_op_status) except sqlalchemy.orm.exc.NoResultFound: LOG.error("Listener %s is not in DB", listener_id) if not listener: continue if health_msg_version < 2: raw_pools = listener['pools'] # normalize the pool IDs. Single process listener pools # have the listener id appended with an ':' seperator. # Old multi-process listener pools only have a pool ID. # This makes sure the keys are only pool IDs. pools = {(k + ' ')[:k.rfind(':')]: v for k, v in raw_pools.items()} for db_pool_id in db_lb.get('pools', {}): # If we saw this pool already on another listener, skip it. if db_pool_id in processed_pools: continue db_pool_dict = db_lb['pools'][db_pool_id] lb_status = self._process_pool_status( session, db_pool_id, db_pool_dict, pools, lb_status, processed_pools, potential_offline_pools) if health_msg_version >= 2: raw_pools = health['pools'] # normalize the pool IDs. Single process listener pools # have the listener id appended with an ':' seperator. # Old multi-process listener pools only have a pool ID. # This makes sure the keys are only pool IDs. pools = {(k + ' ')[:k.rfind(':')]: v for k, v in raw_pools.items()} for db_pool_id in db_lb.get('pools', {}): # If we saw this pool already, skip it. if db_pool_id in processed_pools: continue db_pool_dict = db_lb['pools'][db_pool_id] lb_status = self._process_pool_status( session, db_pool_id, db_pool_dict, pools, lb_status, processed_pools, potential_offline_pools) for pool_id in potential_offline_pools: # Skip if we eventually found a status for this pool if pool_id in processed_pools: continue try: # If the database doesn't already show the pool offline, update if potential_offline_pools[pool_id] != constants.OFFLINE: self._update_status( session, self.pool_repo, constants.POOL, pool_id, constants.OFFLINE, potential_offline_pools[pool_id]) except sqlalchemy.orm.exc.NoResultFound: LOG.error("Pool %s is not in DB", pool_id) # Update the load balancer status last try: if lb_status != db_lb['operating_status']: self._update_status( session, self.loadbalancer_repo, constants.LOADBALANCER, db_lb['id'], lb_status, db_lb[constants.OPERATING_STATUS]) except sqlalchemy.orm.exc.NoResultFound: LOG.error("Load balancer %s is not in DB", db_lb.id) def _process_pool_status( self, session, pool_id, db_pool_dict, pools, lb_status, processed_pools, potential_offline_pools): pool_status = None if pool_id not in pools: # If we don't have a status update for this pool_id # add it to the list of potential offline pools and continue. # We will check the potential offline pool list after we # finish processing the status updates from all of the listeners. potential_offline_pools[pool_id] = db_pool_dict['operating_status'] return lb_status pool = pools[pool_id] processed_pools.append(pool_id) # UP = HAProxy backend has working or no servers if pool.get('status') == constants.UP: pool_status = constants.ONLINE # DOWN = HAProxy backend has no working servers elif pool.get('status') == constants.DOWN: pool_status = constants.ERROR lb_status = constants.ERROR else: LOG.warning(('Pool %(pool)s reported status of ' '%(status)s'), {'pool': pool_id, 'status': pool.get('status')}) # Deal with the members that are reporting from # the Amphora members = pool['members'] for member_id in db_pool_dict.get('members', {}): member_status = None member_db_status = ( db_pool_dict['members'][member_id]['operating_status']) if member_id not in members: if member_db_status != constants.NO_MONITOR: member_status = constants.OFFLINE else: status = members[member_id] # Member status can be "UP" or "UP #/#" # (transitional) if status.startswith(constants.UP): member_status = constants.ONLINE # Member status can be "DOWN" or "DOWN #/#" # (transitional) elif status.startswith(constants.DOWN): member_status = constants.ERROR if pool_status == constants.ONLINE: pool_status = constants.DEGRADED if lb_status == constants.ONLINE: lb_status = constants.DEGRADED elif status == constants.DRAIN: member_status = constants.DRAINING elif status == constants.MAINT: member_status = constants.OFFLINE elif status == constants.NO_CHECK: member_status = constants.NO_MONITOR elif status == constants.RESTARTING: # RESTARTING means that keepalived is restarting and a down # member has been detected, the real status of the member # is not clear, it might mean that the checker hasn't run # yet. # In this case, keep previous member_status, and wait for a # non-transitional status. pass else: LOG.warning('Member %(mem)s reported ' 'status of %(status)s', {'mem': member_id, 'status': status}) try: if (member_status is not None and member_status != member_db_status): self._update_status( session, self.member_repo, constants.MEMBER, member_id, member_status, member_db_status) except sqlalchemy.orm.exc.NoResultFound: LOG.error("Member %s is not able to update " "in DB", member_id) try: if (pool_status is not None and pool_status != db_pool_dict['operating_status']): self._update_status( session, self.pool_repo, constants.POOL, pool_id, pool_status, db_pool_dict['operating_status']) except sqlalchemy.orm.exc.NoResultFound: LOG.error("Pool %s is not in DB", pool_id) return lb_status class UpdateStatsDb(update_base.StatsUpdateBase, stats.StatsMixin): def __init__(self): super(UpdateStatsDb, self).__init__() self.repo_listener = repo.ListenerRepository() def update_stats(self, health_message, srcaddr): # The executor will eat any exceptions from the update_stats code # so we need to wrap it and log the unhandled exception try: self._update_stats(health_message, srcaddr) except Exception: LOG.exception('update_stats encountered an unknown error ' 'processing stats for amphora %s with IP ' '%s', health_message['id'], srcaddr) def _update_stats(self, health_message, srcaddr): """This function is to update the db with listener stats :param health_message: The health message containing the listener stats :type map: string :returns: null Example V1 message:: health = { "id": self.FAKE_UUID_1, "listeners": { "listener-id-1": { "status": constants.OPEN, "stats": { "ereq":0, "conns": 0, "totconns": 0, "rx": 0, "tx": 0, }, "pools": { "pool-id-1": { "status": constants.UP, "members": {"member-id-1": constants.ONLINE} } } } } } Example V2 message:: {"id": "", "seq": 67, "listeners": { "": { "status": "OPEN", "stats": { "tx": 0, "rx": 0, "conns": 0, "totconns": 0, "ereq": 0 } } }, "pools": { ":": { "status": "UP", "members": { "": "no check" } } }, "ver": 2 } """ session = db_api.get_session() amphora_id = health_message['id'] listeners = health_message['listeners'] for listener_id, listener in listeners.items(): stats = listener.get('stats') stats = {'bytes_in': stats['rx'], 'bytes_out': stats['tx'], 'active_connections': stats['conns'], 'total_connections': stats['totconns'], 'request_errors': stats['ereq']} LOG.debug("Updating listener stats in db and sending event.") LOG.debug("Listener %s / Amphora %s stats: %s", listener_id, amphora_id, stats) self.listener_stats_repo.replace( session, listener_id, amphora_id, **stats) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/healthmanager/health_drivers/update_logging.py0000664000175000017500000000206000000000000030146 0ustar00zuulzuul00000000000000# Copyright 2018 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from octavia.controller.healthmanager.health_drivers import update_base LOG = logging.getLogger(__name__) class HealthUpdateLogger(update_base.HealthUpdateBase): def update_health(self, health, srcaddr): LOG.info("Health update triggered for: %s", health.get('id')) class StatsUpdateLogger(update_base.StatsUpdateBase): def update_stats(self, health_message, srcaddr): LOG.info("Stats update triggered for: %s", health_message.get('id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/healthmanager/health_manager.py0000664000175000017500000001457400000000000025127 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from concurrent import futures import functools import time from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from octavia.common import constants from octavia.controller.worker.v1 import controller_worker as cw1 from octavia.controller.worker.v2 import controller_worker as cw2 from octavia.db import api as db_api from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) def wait_done_or_dead(futs, dead, check_timeout=1): while True: _done, not_done = futures.wait(futs, timeout=check_timeout) if not not_done: break if dead.is_set(): for fut in not_done: # This may not actually be able to cancel, but try to # if we can. fut.cancel() def update_stats_on_done(stats, fut): # This utilizes the fact that python, non-primitive types are # passed by reference (not by value)... stats['failover_attempted'] += 1 try: fut.result() except futures.CancelledError: stats['failover_cancelled'] += 1 except Exception: stats['failover_failed'] += 1 class HealthManager(object): def __init__(self, exit_event): if CONF.api_settings.default_provider_driver == constants.AMPHORAV2: self.cw = cw2.ControllerWorker() else: self.cw = cw1.ControllerWorker() self.threads = CONF.health_manager.failover_threads self.executor = futures.ThreadPoolExecutor(max_workers=self.threads) self.amp_repo = repo.AmphoraRepository() self.amp_health_repo = repo.AmphoraHealthRepository() self.lb_repo = repo.LoadBalancerRepository() self.dead = exit_event def _test_and_set_failover_prov_status(self, lock_session, lb_id): if self.lb_repo.set_status_for_failover(lock_session, lb_id, constants.PENDING_UPDATE): return True db_lb = self.lb_repo.get(lock_session, id=lb_id) prov_status = db_lb.provisioning_status LOG.warning("Load balancer %(id)s is in immutable state " "%(state)s. Skipping failover.", {"state": prov_status, "id": db_lb.id}) return False def health_check(self): stats = { 'failover_attempted': 0, 'failover_failed': 0, 'failover_cancelled': 0, } futs = [] while not self.dead.is_set(): amp_health = None lock_session = None try: lock_session = db_api.get_session(autocommit=False) amp = None amp_health = self.amp_health_repo.get_stale_amphora( lock_session) if amp_health: amp = self.amp_repo.get(lock_session, id=amp_health.amphora_id) # If there is an associated LB, attempt to set it to # PENDING_UPDATE. If it is already immutable, skip the # amphora on this cycle if amp and amp.load_balancer_id: if not self._test_and_set_failover_prov_status( lock_session, amp.load_balancer_id): lock_session.rollback() break lock_session.commit() except db_exc.DBDeadlock: LOG.debug('Database reports deadlock. Skipping.') lock_session.rollback() amp_health = None except db_exc.RetryRequest: LOG.debug('Database is requesting a retry. Skipping.') lock_session.rollback() amp_health = None except db_exc.DBConnectionError: db_api.wait_for_connection(self.dead) lock_session.rollback() amp_health = None if not self.dead.is_set(): # amphora heartbeat timestamps should also be outdated # while DB is unavailable and soon after DB comes back # online. Sleeping off the full "heartbeat_timeout" # interval to give the amps a chance to check in before # we start failovers. time.sleep(CONF.health_manager.heartbeat_timeout) except Exception: with excutils.save_and_reraise_exception(): if lock_session: lock_session.rollback() if amp_health is None: break LOG.info("Stale amphora's id is: %s", amp_health.amphora_id) fut = self.executor.submit( self.cw.failover_amphora, amp_health.amphora_id) fut.add_done_callback( functools.partial(update_stats_on_done, stats) ) futs.append(fut) if len(futs) == self.threads: break if futs: LOG.info("Waiting for %s failovers to finish", len(futs)) wait_done_or_dead(futs, self.dead) if stats['failover_attempted'] > 0: LOG.info("Attempted %s failovers of amphora", stats['failover_attempted']) LOG.info("Failed at %s failovers of amphora", stats['failover_failed']) LOG.info("Cancelled %s failovers of amphora", stats['failover_cancelled']) happy_failovers = stats['failover_attempted'] happy_failovers -= stats['failover_cancelled'] happy_failovers -= stats['failover_failed'] LOG.info("Successfully completed %s failovers of amphora", happy_failovers) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/housekeeping/0000775000175000017500000000000000000000000021471 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/housekeeping/__init__.py0000664000175000017500000000107400000000000023604 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/housekeeping/house_keeping.py0000664000175000017500000002015600000000000024674 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from concurrent import futures import datetime from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from sqlalchemy.orm import exc as sqlalchemy_exceptions from octavia.common import constants from octavia.controller.worker.v1 import controller_worker as cw1 from octavia.controller.worker.v2 import controller_worker as cw2 from octavia.db import api as db_api from octavia.db import repositories as repo LOG = logging.getLogger(__name__) CONF = cfg.CONF class SpareAmphora(object): def __init__(self): self.amp_repo = repo.AmphoraRepository() self.spares_repo = repo.SparesPoolRepository() self.az_repo = repo.AvailabilityZoneRepository() if CONF.api_settings.default_provider_driver == constants.AMPHORAV2: self.cw = cw2.ControllerWorker() self.check_booting_amphora = True else: self.cw = cw1.ControllerWorker() self.check_booting_amphora = False def spare_check(self): """Checks the DB for the Spare amphora count. If it's less than the requirement, starts new amphora. """ lock_session = db_api.get_session(autocommit=False) session = db_api.get_session() try: # Lock the spares_pool record for read and write spare_amp_row = self.spares_repo.get_for_update(lock_session) conf_spare_cnt = CONF.house_keeping.spare_amphora_pool_size LOG.debug("Required Spare Amphora count : %d", conf_spare_cnt) availability_zones, links = self.az_repo.get_all(session, enabled=True) compute_zones = set() for az in availability_zones: az_meta = self.az_repo.get_availability_zone_metadata_dict( session, az.name) compute_zones.add(az_meta.get(constants.COMPUTE_ZONE)) # If no AZs objects then build in the configured AZ (even if None) # Also if configured AZ is not None then also build in there # as could be different to the current AZs objects. if CONF.nova.availability_zone or not compute_zones: compute_zones.add(CONF.nova.availability_zone) amp_booting = [] for az_name in compute_zones: # TODO(rm_work): If az_name is None, this will get ALL spares # across all AZs. This is the safest/most backwards compatible # way I can think of, as cached_zone on the amphora records # won't ever match. This should not impact any existing deploys # with no AZ configured, as the behavior should be identical # in that case. In the case of multiple AZs configured, it will # simply ensure there are at least spares *somewhere*, but # will function more accurately if the operator actually # configures the AZ setting properly. curr_spare_cnt = self.amp_repo.get_spare_amphora_count( session, availability_zone=az_name, check_booting_amphora=self.check_booting_amphora) LOG.debug("Current Spare Amphora count for AZ %s: %d", az_name, curr_spare_cnt) diff_count = conf_spare_cnt - curr_spare_cnt # When the current spare amphora is less than required if diff_count > 0: LOG.info("Initiating creation of %d spare amphora " "for az %s.", diff_count, az_name) # Call Amphora Create Flow diff_count times with futures.ThreadPoolExecutor( max_workers=conf_spare_cnt) as executor: for i in range(1, diff_count + 1): LOG.debug("Starting amphorae number %d ...", i) amp_booting.append(executor.submit( self.cw.create_amphora, az_name)) else: LOG.debug("Current spare amphora count for AZ %s " "satisfies the requirement", az_name) # Wait for the amphora boot threads to finish futures.wait(amp_booting) spare_amp_row.updated_at = timeutils.utcnow() lock_session.commit() except Exception: lock_session.rollback() class DatabaseCleanup(object): def __init__(self): self.amp_repo = repo.AmphoraRepository() self.amp_health_repo = repo.AmphoraHealthRepository() self.lb_repo = repo.LoadBalancerRepository() def delete_old_amphorae(self): """Checks the DB for old amphora and deletes them based on its age.""" exp_age = datetime.timedelta( seconds=CONF.house_keeping.amphora_expiry_age) session = db_api.get_session() amp_ids = self.amp_repo.get_all_deleted_expiring(session, exp_age=exp_age) for amp_id in amp_ids: # If we're here, we already think the amp is expiring according to # the amphora table. Now check it is expired in the health table. # In this way, we ensure that amps aren't deleted unless they are # both expired AND no longer receiving zombie heartbeats. if self.amp_health_repo.check_amphora_health_expired( session, amp_id, exp_age): LOG.debug('Attempting to purge db record for Amphora ID: %s', amp_id) self.amp_repo.delete(session, id=amp_id) try: self.amp_health_repo.delete(session, amphora_id=amp_id) except sqlalchemy_exceptions.NoResultFound: pass # Best effort delete, this record might not exist LOG.info('Purged db record for Amphora ID: %s', amp_id) def cleanup_load_balancers(self): """Checks the DB for old load balancers and triggers their removal.""" exp_age = datetime.timedelta( seconds=CONF.house_keeping.load_balancer_expiry_age) session = db_api.get_session() lb_ids = self.lb_repo.get_all_deleted_expiring(session, exp_age=exp_age) for lb_id in lb_ids: LOG.info('Attempting to delete load balancer id : %s', lb_id) self.lb_repo.delete(session, id=lb_id) LOG.info('Deleted load balancer id : %s', lb_id) class CertRotation(object): def __init__(self): self.threads = CONF.house_keeping.cert_rotate_threads if CONF.api_settings.default_provider_driver == constants.AMPHORAV2: self.cw = cw2.ControllerWorker() else: self.cw = cw1.ControllerWorker() def rotate(self): """Check the amphora db table for expiring auth certs.""" amp_repo = repo.AmphoraRepository() with futures.ThreadPoolExecutor(max_workers=self.threads) as executor: session = db_api.get_session() rotation_count = 0 while True: amp = amp_repo.get_cert_expiring_amphora(session) if not amp: break rotation_count += 1 LOG.debug("Cert expired amphora's id is: %s", amp.id) executor.submit(self.cw.amphora_cert_rotation, amp.id) if rotation_count > 0: LOG.info("Rotated certificates for %s amphora", rotation_count) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/queue/0000775000175000017500000000000000000000000020127 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/queue/__init__.py0000664000175000017500000000107400000000000022242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/queue/v1/0000775000175000017500000000000000000000000020455 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/queue/v1/__init__.py0000664000175000017500000000107400000000000022570 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/queue/v1/consumer.py0000664000175000017500000000436500000000000022672 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cotyledon from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from octavia.common import rpc from octavia.controller.queue.v1 import endpoints LOG = logging.getLogger(__name__) class ConsumerService(cotyledon.Service): def __init__(self, worker_id, conf): super(ConsumerService, self).__init__(worker_id) self.conf = conf self.topic = conf.oslo_messaging.topic self.server = conf.host self.endpoints = [] self.access_policy = dispatcher.DefaultRPCAccessPolicy self.message_listener = None def run(self): LOG.info('Starting consumer...') target = messaging.Target(topic=self.topic, server=self.server, fanout=False) self.endpoints = [endpoints.Endpoints()] self.message_listener = rpc.get_server( target, self.endpoints, executor='threading', access_policy=self.access_policy ) self.message_listener.start() def terminate(self): if self.message_listener: LOG.info('Stopping consumer...') self.message_listener.stop() LOG.info('Consumer successfully stopped. Waiting for final ' 'messages to be processed...') self.message_listener.wait() if self.endpoints: LOG.info('Shutting down endpoint worker executors...') for e in self.endpoints: try: e.worker.executor.shutdown() except AttributeError: pass super(ConsumerService, self).terminate() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/queue/v1/endpoints.py0000664000175000017500000001450500000000000023037 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from stevedore import driver as stevedore_driver from octavia.common import constants CONF = cfg.CONF LOG = logging.getLogger(__name__) class Endpoints(object): # API version history: # 1.0 - Initial version. target = messaging.Target( namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, version='1.1') def __init__(self): self.worker = stevedore_driver.DriverManager( namespace='octavia.plugins', name=CONF.octavia_plugins, invoke_on_load=True ).driver def create_load_balancer(self, context, load_balancer_id, flavor=None, availability_zone=None): LOG.info('Creating load balancer \'%s\'...', load_balancer_id) self.worker.create_load_balancer(load_balancer_id, flavor, availability_zone) def update_load_balancer(self, context, load_balancer_id, load_balancer_updates): LOG.info('Updating load balancer \'%s\'...', load_balancer_id) self.worker.update_load_balancer(load_balancer_id, load_balancer_updates) def delete_load_balancer(self, context, load_balancer_id, cascade=False): LOG.info('Deleting load balancer \'%s\'...', load_balancer_id) self.worker.delete_load_balancer(load_balancer_id, cascade) def failover_load_balancer(self, context, load_balancer_id): LOG.info('Failing over amphora in load balancer \'%s\'...', load_balancer_id) self.worker.failover_loadbalancer(load_balancer_id) def failover_amphora(self, context, amphora_id): LOG.info('Failing over amphora \'%s\'...', amphora_id) self.worker.failover_amphora(amphora_id) def create_listener(self, context, listener_id): LOG.info('Creating listener \'%s\'...', listener_id) self.worker.create_listener(listener_id) def update_listener(self, context, listener_id, listener_updates): LOG.info('Updating listener \'%s\'...', listener_id) self.worker.update_listener(listener_id, listener_updates) def delete_listener(self, context, listener_id): LOG.info('Deleting listener \'%s\'...', listener_id) self.worker.delete_listener(listener_id) def create_pool(self, context, pool_id): LOG.info('Creating pool \'%s\'...', pool_id) self.worker.create_pool(pool_id) def update_pool(self, context, pool_id, pool_updates): LOG.info('Updating pool \'%s\'...', pool_id) self.worker.update_pool(pool_id, pool_updates) def delete_pool(self, context, pool_id): LOG.info('Deleting pool \'%s\'...', pool_id) self.worker.delete_pool(pool_id) def create_health_monitor(self, context, health_monitor_id): LOG.info('Creating health monitor \'%s\'...', health_monitor_id) self.worker.create_health_monitor(health_monitor_id) def update_health_monitor(self, context, health_monitor_id, health_monitor_updates): LOG.info('Updating health monitor \'%s\'...', health_monitor_id) self.worker.update_health_monitor(health_monitor_id, health_monitor_updates) def delete_health_monitor(self, context, health_monitor_id): LOG.info('Deleting health monitor \'%s\'...', health_monitor_id) self.worker.delete_health_monitor(health_monitor_id) def create_member(self, context, member_id): LOG.info('Creating member \'%s\'...', member_id) self.worker.create_member(member_id) def update_member(self, context, member_id, member_updates): LOG.info('Updating member \'%s\'...', member_id) self.worker.update_member(member_id, member_updates) def batch_update_members(self, context, old_member_ids, new_member_ids, updated_members): updated_member_ids = [m.get('id') for m in updated_members] LOG.info( 'Batch updating members: old=\'%(old)s\', new=\'%(new)s\', ' 'updated=\'%(updated)s\'...', {'old': old_member_ids, 'new': new_member_ids, 'updated': updated_member_ids}) self.worker.batch_update_members( old_member_ids, new_member_ids, updated_members) def delete_member(self, context, member_id): LOG.info('Deleting member \'%s\'...', member_id) self.worker.delete_member(member_id) def create_l7policy(self, context, l7policy_id): LOG.info('Creating l7policy \'%s\'...', l7policy_id) self.worker.create_l7policy(l7policy_id) def update_l7policy(self, context, l7policy_id, l7policy_updates): LOG.info('Updating l7policy \'%s\'...', l7policy_id) self.worker.update_l7policy(l7policy_id, l7policy_updates) def delete_l7policy(self, context, l7policy_id): LOG.info('Deleting l7policy \'%s\'...', l7policy_id) self.worker.delete_l7policy(l7policy_id) def create_l7rule(self, context, l7rule_id): LOG.info('Creating l7rule \'%s\'...', l7rule_id) self.worker.create_l7rule(l7rule_id) def update_l7rule(self, context, l7rule_id, l7rule_updates): LOG.info('Updating l7rule \'%s\'...', l7rule_id) self.worker.update_l7rule(l7rule_id, l7rule_updates) def delete_l7rule(self, context, l7rule_id): LOG.info('Deleting l7rule \'%s\'...', l7rule_id) self.worker.delete_l7rule(l7rule_id) def update_amphora_agent_config(self, context, amphora_id): LOG.info('Updating amphora \'%s\' agent configuration...', amphora_id) self.worker.update_amphora_agent_config(amphora_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/queue/v2/0000775000175000017500000000000000000000000020456 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/queue/v2/__init__.py0000664000175000017500000000107400000000000022571 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/queue/v2/consumer.py0000664000175000017500000000521200000000000022663 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cotyledon from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_utils import uuidutils from octavia.common import constants from octavia.common import rpc from octavia.controller.queue.v2 import endpoints LOG = logging.getLogger(__name__) CONF = cfg.CONF class ConsumerService(cotyledon.Service): def __init__(self, worker_id, conf): super(ConsumerService, self).__init__(worker_id) self.conf = conf self.topic = constants.TOPIC_AMPHORA_V2 self.server = conf.host self.endpoints = [] self.access_policy = dispatcher.DefaultRPCAccessPolicy self.message_listener = None def run(self): LOG.info('Starting V2 consumer...') target = messaging.Target(topic=self.topic, server=self.server, fanout=False) self.endpoints = [endpoints.Endpoints()] self.message_listener = rpc.get_server( target, self.endpoints, executor='threading', access_policy=self.access_policy ) self.message_listener.start() if constants.AMPHORAV2 in CONF.api_settings.enabled_provider_drivers: for e in self.endpoints: e.worker.services_controller.run_conductor( 'octavia-task-flow-conductor-%s' % uuidutils.generate_uuid()) def terminate(self): if self.message_listener: LOG.info('Stopping V2 consumer...') self.message_listener.stop() LOG.info('V2 Consumer successfully stopped. Waiting for ' 'final messages to be processed...') self.message_listener.wait() if self.endpoints: LOG.info('Shutting down V2 endpoint worker executors...') for e in self.endpoints: try: e.worker.executor.shutdown() except AttributeError: pass super(ConsumerService, self).terminate() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/queue/v2/endpoints.py0000664000175000017500000001621000000000000023033 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from octavia.common import constants from octavia.controller.worker.v2 import controller_worker CONF = cfg.CONF LOG = logging.getLogger(__name__) class Endpoints(object): # API version history: # 1.0 - Initial version. # 2.0 - Provider driver format. target = messaging.Target( namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, version='2.0') def __init__(self): self.worker = controller_worker.ControllerWorker() def create_load_balancer(self, context, loadbalancer, flavor=None, availability_zone=None): LOG.info('Creating load balancer \'%s\'...', loadbalancer[constants.LOADBALANCER_ID]) self.worker.create_load_balancer(loadbalancer, flavor, availability_zone) def update_load_balancer(self, context, original_load_balancer, load_balancer_updates): LOG.info('Updating load balancer \'%s\'...', original_load_balancer.get(constants.LOADBALANCER_ID)) self.worker.update_load_balancer(original_load_balancer, load_balancer_updates) def delete_load_balancer(self, context, loadbalancer, cascade=False): LOG.info('Deleting load balancer \'%s\'...', loadbalancer.get(constants.LOADBALANCER_ID)) self.worker.delete_load_balancer(loadbalancer, cascade) def failover_load_balancer(self, context, load_balancer_id): LOG.info('Failing over amphora in load balancer \'%s\'...', load_balancer_id) self.worker.failover_loadbalancer(load_balancer_id) def failover_amphora(self, context, amphora_id): LOG.info('Failing over amphora \'%s\'...', amphora_id) self.worker.failover_amphora(amphora_id) def create_listener(self, context, listener): LOG.info('Creating listener \'%s\'...', listener[constants.LISTENER_ID]) self.worker.create_listener(listener) def update_listener(self, context, original_listener, listener_updates): LOG.info('Updating listener \'%s\'...', original_listener[constants.LISTENER_ID]) self.worker.update_listener(original_listener, listener_updates) def delete_listener(self, context, listener): LOG.info('Deleting listener \'%s\'...', listener[constants.LISTENER_ID]) self.worker.delete_listener(listener) def create_pool(self, context, pool): LOG.info('Creating pool \'%s\'...', pool.get(constants.POOL_ID)) self.worker.create_pool(pool) def update_pool(self, context, original_pool, pool_updates): LOG.info('Updating pool \'%s\'...', original_pool.get(constants.POOL_ID)) self.worker.update_pool(original_pool, pool_updates) def delete_pool(self, context, pool): LOG.info('Deleting pool \'%s\'...', pool.get(constants.POOL_ID)) self.worker.delete_pool(pool) def create_health_monitor(self, context, health_monitor): LOG.info('Creating health monitor \'%s\'...', health_monitor.get( constants.HEALTHMONITOR_ID)) self.worker.create_health_monitor(health_monitor) def update_health_monitor(self, context, original_health_monitor, health_monitor_updates): LOG.info('Updating health monitor \'%s\'...', original_health_monitor.get(constants.HEALTHMONITOR_ID)) self.worker.update_health_monitor(original_health_monitor, health_monitor_updates) def delete_health_monitor(self, context, health_monitor): LOG.info('Deleting health monitor \'%s\'...', health_monitor.get( constants.HEALTHMONITOR_ID)) self.worker.delete_health_monitor(health_monitor) def create_member(self, context, member): LOG.info('Creating member \'%s\'...', member.get(constants.MEMBER_ID)) self.worker.create_member(member) def update_member(self, context, original_member, member_updates): LOG.info('Updating member \'%s\'...', original_member.get( constants.MEMBER_ID)) self.worker.update_member(original_member, member_updates) def batch_update_members(self, context, old_members, new_members, updated_members): updated_member_ids = [m.get(constants.ID) for m in updated_members] new_member_ids = [m.get(constants.ID) for m in new_members] old_member_ids = [m.get(constants.ID) for m in old_members] LOG.info( 'Batch updating members: old=\'%(old)s\', new=\'%(new)s\', ' 'updated=\'%(updated)s\'...', {'old': old_member_ids, 'new': new_member_ids, 'updated': updated_member_ids}) self.worker.batch_update_members( old_members, new_members, updated_members) def delete_member(self, context, member): LOG.info('Deleting member \'%s\'...', member.get(constants.MEMBER_ID)) self.worker.delete_member(member) def create_l7policy(self, context, l7policy): LOG.info('Creating l7policy \'%s\'...', l7policy.get(constants.L7POLICY_ID)) self.worker.create_l7policy(l7policy) def update_l7policy(self, context, original_l7policy, l7policy_updates): LOG.info('Updating l7policy \'%s\'...', original_l7policy.get( constants.L7POLICY_ID)) self.worker.update_l7policy(original_l7policy, l7policy_updates) def delete_l7policy(self, context, l7policy): LOG.info('Deleting l7policy \'%s\'...', l7policy.get( constants.L7POLICY_ID)) self.worker.delete_l7policy(l7policy) def create_l7rule(self, context, l7rule): LOG.info('Creating l7rule \'%s\'...', l7rule.get(constants.L7RULE_ID)) self.worker.create_l7rule(l7rule) def update_l7rule(self, context, original_l7rule, l7rule_updates): LOG.info('Updating l7rule \'%s\'...', original_l7rule.get( constants.L7RULE_ID)) self.worker.update_l7rule(original_l7rule, l7rule_updates) def delete_l7rule(self, context, l7rule): LOG.info('Deleting l7rule \'%s\'...', l7rule.get(constants.L7RULE_ID)) self.worker.delete_l7rule(l7rule) def update_amphora_agent_config(self, context, amphora_id): LOG.info('Updating amphora \'%s\' agent configuration...', amphora_id) self.worker.update_amphora_agent_config(amphora_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/worker/0000775000175000017500000000000000000000000020314 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/__init__.py0000664000175000017500000000107400000000000022427 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/amphora_rate_limit.py0000664000175000017500000001001200000000000024520 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from oslo_config import cfg from oslo_log import log as logging from octavia.common import exceptions from octavia.db import api as db_apis from octavia.db import repositories as repo LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_group('haproxy_amphora', 'octavia.common.config') class AmphoraBuildRateLimit(object): def __init__(self): self.amp_build_slots_repo = repo.AmphoraBuildSlotsRepository() self.amp_build_req_repo = repo.AmphoraBuildReqRepository() def add_to_build_request_queue(self, amphora_id, build_priority): self.amp_build_req_repo.add_to_build_queue( db_apis.get_session(), amphora_id=amphora_id, priority=build_priority) LOG.debug("Added build request for %s to the queue", amphora_id) self.wait_for_build_slot(amphora_id) def has_build_slot(self): build_rate_limit = CONF.haproxy_amphora.build_rate_limit session = db_apis.get_session() with session.begin(subtransactions=True): used_build_slots = (self.amp_build_slots_repo .get_used_build_slots_count(session)) available_build_slots = build_rate_limit - used_build_slots LOG.debug("Available build slots %d", available_build_slots) return available_build_slots > 0 def has_highest_priority(self, amphora_id): session = db_apis.get_session() with session.begin(subtransactions=True): highest_priority_build_req = ( self.amp_build_req_repo.get_highest_priority_build_req( session)) LOG.debug("Highest priority req: %s, Current req: %s", highest_priority_build_req, amphora_id) return amphora_id == highest_priority_build_req def update_build_status_and_available_build_slots(self, amphora_id): session = db_apis.get_session() with session.begin(subtransactions=True): self.amp_build_slots_repo.update_count(session, action='increment') self.amp_build_req_repo.update_req_status(session, amphora_id) def remove_from_build_req_queue(self, amphora_id): session = db_apis.get_session() with session.begin(subtransactions=True): self.amp_build_req_repo.delete(session, amphora_id=amphora_id) self.amp_build_slots_repo.update_count(session, action='decrement') LOG.debug("Removed request for %s from queue" " and released the build slot", amphora_id) def remove_all_from_build_req_queue(self): session = db_apis.get_session() with session.begin(subtransactions=True): self.amp_build_req_repo.delete_all(session) self.amp_build_slots_repo.update_count(session, action='reset') LOG.debug("Removed all the build requests and " "released the build slots") def wait_for_build_slot(self, amphora_id): LOG.debug("Waiting for a build slot") for i in range(CONF.haproxy_amphora.build_active_retries): if (self.has_build_slot() and self.has_highest_priority(amphora_id)): self.update_build_status_and_available_build_slots(amphora_id) return time.sleep(CONF.haproxy_amphora.build_retry_interval) self.remove_all_from_build_req_queue() raise exceptions.ComputeBuildQueueTimeoutException() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/task_utils.py0000664000175000017500000002612400000000000023055 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Methods common to the controller work tasks.""" from oslo_log import log as logging from octavia.common import constants from octavia.db import api as db_apis from octavia.db import repositories as repo LOG = logging.getLogger(__name__) class TaskUtils(object): """Class of helper/utility methods used by tasks.""" def __init__(self, **kwargs): self.amphora_repo = repo.AmphoraRepository() self.health_mon_repo = repo.HealthMonitorRepository() self.listener_repo = repo.ListenerRepository() self.loadbalancer_repo = repo.LoadBalancerRepository() self.member_repo = repo.MemberRepository() self.pool_repo = repo.PoolRepository() self.amp_health_repo = repo.AmphoraHealthRepository() self.l7policy_repo = repo.L7PolicyRepository() self.l7rule_repo = repo.L7RuleRepository() super(TaskUtils, self).__init__(**kwargs) def unmark_amphora_health_busy(self, amphora_id): """Unmark the amphora_health record busy for an amphora. NOTE: This should only be called from revert methods. :param amphora_id: The amphora id to unmark busy """ LOG.debug('Unmarking health monitoring busy on amphora: %s', amphora_id) try: self.amp_health_repo.update(db_apis.get_session(), amphora_id=amphora_id, busy=False) except Exception as e: LOG.debug('Failed to update amphora health record %(amp)s ' 'due to: %(except)s', {'amp': amphora_id, 'except': str(e)}) def mark_amphora_status_error(self, amphora_id): """Sets an amphora status to ERROR. NOTE: This should only be called from revert methods. :param amphora_id: Amphora ID to set the status to ERROR """ try: self.amphora_repo.update(db_apis.get_session(), id=amphora_id, status=constants.ERROR) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "status to ERROR due to: " "%(except)s", {'amp': amphora_id, 'except': str(e)}) def mark_health_mon_prov_status_error(self, health_mon_id): """Sets a health monitor provisioning status to ERROR. NOTE: This should only be called from revert methods. :param health_mon_id: Health Monitor ID to set prov status to ERROR """ try: self.health_mon_repo.update(db_apis.get_session(), id=health_mon_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update health monitor %(health)s " "provisioning status to ERROR due to: " "%(except)s", {'health': health_mon_id, 'except': str(e)}) def mark_l7policy_prov_status_active(self, l7policy_id): """Sets a L7 policy provisioning status to ACTIVE. NOTE: This should only be called from revert methods. :param l7policy_id: L7 Policy ID to set provisioning status to ACTIVE """ try: self.l7policy_repo.update(db_apis.get_session(), id=l7policy_id, provisioning_status=constants.ACTIVE) except Exception as e: LOG.error("Failed to update l7policy %(l7p)s " "provisioning status to ACTIVE due to: " "%(except)s", {'l7p': l7policy_id, 'except': str(e)}) def mark_l7policy_prov_status_error(self, l7policy_id): """Sets a L7 policy provisioning status to ERROR. NOTE: This should only be called from revert methods. :param l7policy_id: L7 Policy ID to set provisioning status to ERROR """ try: self.l7policy_repo.update(db_apis.get_session(), id=l7policy_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7policy %(l7p)s " "provisioning status to ERROR due to: " "%(except)s", {'l7p': l7policy_id, 'except': str(e)}) def mark_l7rule_prov_status_error(self, l7rule_id): """Sets a L7 rule provisioning status to ERROR. NOTE: This should only be called from revert methods. :param l7rule_id: L7 Rule ID to set provisioning status to ERROR """ try: self.l7rule_repo.update(db_apis.get_session(), id=l7rule_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7rule %(l7r)s " "provisioning status to ERROR due to: " "%(except)s", {'l7r': l7rule_id, 'except': str(e)}) def mark_listener_prov_status_error(self, listener_id): """Sets a listener provisioning status to ERROR. NOTE: This should only be called from revert methods. :param listener_id: Listener ID to set provisioning status to ERROR """ try: self.listener_repo.update(db_apis.get_session(), id=listener_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update listener %(list)s " "provisioning status to ERROR due to: " "%(except)s", {'list': listener_id, 'except': str(e)}) def mark_loadbalancer_prov_status_error(self, loadbalancer_id): """Sets a load balancer provisioning status to ERROR. NOTE: This should only be called from revert methods. :param loadbalancer_id: Load balancer ID to set provisioning status to ERROR """ try: self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update load balancer %(lb)s " "provisioning status to ERROR due to: " "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) def mark_listener_prov_status_active(self, listener_id): """Sets a listener provisioning status to ACTIVE. NOTE: This should only be called from revert methods. :param listener_id: Listener ID to set provisioning status to ACTIVE """ try: self.listener_repo.update(db_apis.get_session(), id=listener_id, provisioning_status=constants.ACTIVE) except Exception as e: LOG.error("Failed to update listener %(list)s " "provisioning status to ACTIVE due to: " "%(except)s", {'list': listener_id, 'except': str(e)}) def mark_pool_prov_status_active(self, pool_id): """Sets a pool provisioning status to ACTIVE. NOTE: This should only be called from revert methods. :param pool_id: Pool ID to set provisioning status to ACTIVE """ try: self.pool_repo.update(db_apis.get_session(), id=pool_id, provisioning_status=constants.ACTIVE) except Exception as e: LOG.error("Failed to update pool %(pool)s provisioning status " "to ACTIVE due to: %(except)s", {'pool': pool_id, 'except': str(e)}) def mark_loadbalancer_prov_status_active(self, loadbalancer_id): """Sets a load balancer provisioning status to ACTIVE. NOTE: This should only be called from revert methods. :param loadbalancer_id: Load balancer ID to set provisioning status to ACTIVE """ try: self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, provisioning_status=constants.ACTIVE) except Exception as e: LOG.error("Failed to update load balancer %(lb)s " "provisioning status to ACTIVE due to: " "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) def mark_member_prov_status_error(self, member_id): """Sets a member provisioning status to ERROR. NOTE: This should only be called from revert methods. :param member_id: Member ID to set provisioning status to ERROR """ try: self.member_repo.update(db_apis.get_session(), id=member_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update member %(member)s " "provisioning status to ERROR due to: " "%(except)s", {'member': member_id, 'except': str(e)}) def mark_pool_prov_status_error(self, pool_id): """Sets a pool provisioning status to ERROR. NOTE: This should only be called from revert methods. :param pool_id: Pool ID to set provisioning status to ERROR """ try: self.pool_repo.update(db_apis.get_session(), id=pool_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update pool %(pool)s " "provisioning status to ERROR due to: " "%(except)s", {'pool': pool_id, 'except': str(e)}) def get_current_loadbalancer_from_db(self, loadbalancer_id): """Gets a Loadbalancer from db. :param: loadbalancer_id: Load balancer ID which to get from db """ try: return self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) except Exception as e: LOG.error("Failed to get loadbalancer %(loadbalancer)s " "due to: %(except)s", {'loadbalancer': loadbalancer_id, 'except': str(e)}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/worker/v1/0000775000175000017500000000000000000000000020642 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/__init__.py0000664000175000017500000000107400000000000022755 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/controller_worker.py0000664000175000017500000014472500000000000025005 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy.orm import exc as db_exceptions from taskflow.listeners import logging as tf_logging import tenacity from octavia.common import base_taskflow from octavia.common import constants from octavia.common import exceptions from octavia.common import utils from octavia.controller.worker.v1.flows import amphora_flows from octavia.controller.worker.v1.flows import health_monitor_flows from octavia.controller.worker.v1.flows import l7policy_flows from octavia.controller.worker.v1.flows import l7rule_flows from octavia.controller.worker.v1.flows import listener_flows from octavia.controller.worker.v1.flows import load_balancer_flows from octavia.controller.worker.v1.flows import member_flows from octavia.controller.worker.v1.flows import pool_flows from octavia.db import api as db_apis from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) def _is_provisioning_status_pending_update(lb_obj): return not lb_obj.provisioning_status == constants.PENDING_UPDATE class ControllerWorker(base_taskflow.BaseTaskFlowEngine): def __init__(self): self._amphora_flows = amphora_flows.AmphoraFlows() self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows() self._lb_flows = load_balancer_flows.LoadBalancerFlows() self._listener_flows = listener_flows.ListenerFlows() self._member_flows = member_flows.MemberFlows() self._pool_flows = pool_flows.PoolFlows() self._l7policy_flows = l7policy_flows.L7PolicyFlows() self._l7rule_flows = l7rule_flows.L7RuleFlows() self._amphora_repo = repo.AmphoraRepository() self._amphora_health_repo = repo.AmphoraHealthRepository() self._health_mon_repo = repo.HealthMonitorRepository() self._lb_repo = repo.LoadBalancerRepository() self._listener_repo = repo.ListenerRepository() self._member_repo = repo.MemberRepository() self._pool_repo = repo.PoolRepository() self._l7policy_repo = repo.L7PolicyRepository() self._l7rule_repo = repo.L7RuleRepository() self._flavor_repo = repo.FlavorRepository() self._az_repo = repo.AvailabilityZoneRepository() super(ControllerWorker, self).__init__() @tenacity.retry( retry=( tenacity.retry_if_result(_is_provisioning_status_pending_update) | tenacity.retry_if_exception_type()), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def _get_db_obj_until_pending_update(self, repo, id): return repo.get(db_apis.get_session(), id=id) def create_amphora(self, availability_zone=None): """Creates an Amphora. This is used to create spare amphora. :returns: amphora_id """ try: store = {constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_SPARES_POOL_PRIORITY, constants.FLAVOR: None, constants.SERVER_GROUP_ID: None, constants.AVAILABILITY_ZONE: None} if availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), availability_zone)) create_amp_tf = self._taskflow_load( self._amphora_flows.get_create_amphora_flow(), store=store) with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG): create_amp_tf.run() return create_amp_tf.storage.fetch('amphora') except Exception as e: LOG.error('Failed to create an amphora due to: %s', str(e)) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_health_monitor(self, health_monitor_id): """Creates a health monitor. :param pool_id: ID of the pool to create a health monitor on :returns: None :raises NoResultFound: Unable to find the object """ health_mon = self._health_mon_repo.get(db_apis.get_session(), id=health_monitor_id) if not health_mon: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'health_monitor', health_monitor_id) raise db_exceptions.NoResultFound pool = health_mon.pool listeners = pool.listeners pool.health_monitor = health_mon load_balancer = pool.load_balancer create_hm_tf = self._taskflow_load( self._health_monitor_flows.get_create_health_monitor_flow(), store={constants.HEALTH_MON: health_mon, constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(create_hm_tf, log=LOG): create_hm_tf.run() def delete_health_monitor(self, health_monitor_id): """Deletes a health monitor. :param pool_id: ID of the pool to delete its health monitor :returns: None :raises HMNotFound: The referenced health monitor was not found """ health_mon = self._health_mon_repo.get(db_apis.get_session(), id=health_monitor_id) pool = health_mon.pool listeners = pool.listeners load_balancer = pool.load_balancer delete_hm_tf = self._taskflow_load( self._health_monitor_flows.get_delete_health_monitor_flow(), store={constants.HEALTH_MON: health_mon, constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(delete_hm_tf, log=LOG): delete_hm_tf.run() def update_health_monitor(self, health_monitor_id, health_monitor_updates): """Updates a health monitor. :param pool_id: ID of the pool to have it's health monitor updated :param health_monitor_updates: Dict containing updated health monitor :returns: None :raises HMNotFound: The referenced health monitor was not found """ health_mon = None try: health_mon = self._get_db_obj_until_pending_update( self._health_mon_repo, health_monitor_id) except tenacity.RetryError as e: LOG.warning('Health monitor did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) health_mon = e.last_attempt.result() pool = health_mon.pool listeners = pool.listeners pool.health_monitor = health_mon load_balancer = pool.load_balancer update_hm_tf = self._taskflow_load( self._health_monitor_flows.get_update_health_monitor_flow(), store={constants.HEALTH_MON: health_mon, constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: health_monitor_updates}) with tf_logging.DynamicLoggingListener(update_hm_tf, log=LOG): update_hm_tf.run() @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_listener(self, listener_id): """Creates a listener. :param listener_id: ID of the listener to create :returns: None :raises NoResultFound: Unable to find the object """ listener = self._listener_repo.get(db_apis.get_session(), id=listener_id) if not listener: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'listener', listener_id) raise db_exceptions.NoResultFound load_balancer = listener.load_balancer listeners = load_balancer.listeners create_listener_tf = self._taskflow_load(self._listener_flows. get_create_listener_flow(), store={constants.LOADBALANCER: load_balancer, constants.LISTENERS: listeners}) with tf_logging.DynamicLoggingListener(create_listener_tf, log=LOG): create_listener_tf.run() def delete_listener(self, listener_id): """Deletes a listener. :param listener_id: ID of the listener to delete :returns: None :raises ListenerNotFound: The referenced listener was not found """ listener = self._listener_repo.get(db_apis.get_session(), id=listener_id) load_balancer = listener.load_balancer delete_listener_tf = self._taskflow_load( self._listener_flows.get_delete_listener_flow(), store={constants.LOADBALANCER: load_balancer, constants.LISTENER: listener}) with tf_logging.DynamicLoggingListener(delete_listener_tf, log=LOG): delete_listener_tf.run() def update_listener(self, listener_id, listener_updates): """Updates a listener. :param listener_id: ID of the listener to update :param listener_updates: Dict containing updated listener attributes :returns: None :raises ListenerNotFound: The referenced listener was not found """ listener = None try: listener = self._get_db_obj_until_pending_update( self._listener_repo, listener_id) except tenacity.RetryError as e: LOG.warning('Listener did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) listener = e.last_attempt.result() load_balancer = listener.load_balancer update_listener_tf = self._taskflow_load(self._listener_flows. get_update_listener_flow(), store={constants.LISTENER: listener, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: listener_updates, constants.LISTENERS: [listener]}) with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG): update_listener_tf.run() @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_load_balancer(self, load_balancer_id, flavor=None, availability_zone=None): """Creates a load balancer by allocating Amphorae. First tries to allocate an existing Amphora in READY state. If none are available it will attempt to build one specifically for this load balancer. :param load_balancer_id: ID of the load balancer to create :returns: None :raises NoResultFound: Unable to find the object """ lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id) if not lb: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'load_balancer', load_balancer_id) raise db_exceptions.NoResultFound # TODO(johnsom) convert this to octavia_lib constant flavor # once octavia is transitioned to use octavia_lib store = {constants.LOADBALANCER_ID: load_balancer_id, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, constants.FLAVOR: flavor, constants.AVAILABILITY_ZONE: availability_zone} topology = lb.topology if (not CONF.nova.enable_anti_affinity or topology == constants.TOPOLOGY_SINGLE): store[constants.SERVER_GROUP_ID] = None store[constants.UPDATE_DICT] = { constants.TOPOLOGY: topology } create_lb_flow = self._lb_flows.get_create_load_balancer_flow( topology=topology, listeners=lb.listeners) create_lb_tf = self._taskflow_load(create_lb_flow, store=store) with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG): create_lb_tf.run() def delete_load_balancer(self, load_balancer_id, cascade=False): """Deletes a load balancer by de-allocating Amphorae. :param load_balancer_id: ID of the load balancer to delete :returns: None :raises LBNotFound: The referenced load balancer was not found """ lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id) if cascade: (flow, store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb) else: (flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb) store.update({constants.LOADBALANCER: lb, constants.SERVER_GROUP_ID: lb.server_group_id}) delete_lb_tf = self._taskflow_load(flow, store=store) with tf_logging.DynamicLoggingListener(delete_lb_tf, log=LOG): delete_lb_tf.run() def update_load_balancer(self, load_balancer_id, load_balancer_updates): """Updates a load balancer. :param load_balancer_id: ID of the load balancer to update :param load_balancer_updates: Dict containing updated load balancer :returns: None :raises LBNotFound: The referenced load balancer was not found """ lb = None try: lb = self._get_db_obj_until_pending_update( self._lb_repo, load_balancer_id) except tenacity.RetryError as e: LOG.warning('Load balancer did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) lb = e.last_attempt.result() listeners, _ = self._listener_repo.get_all( db_apis.get_session(), load_balancer_id=load_balancer_id) update_lb_tf = self._taskflow_load( self._lb_flows.get_update_load_balancer_flow(), store={constants.LOADBALANCER: lb, constants.LISTENERS: listeners, constants.UPDATE_DICT: load_balancer_updates}) with tf_logging.DynamicLoggingListener(update_lb_tf, log=LOG): update_lb_tf.run() @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_member(self, member_id): """Creates a pool member. :param member_id: ID of the member to create :returns: None :raises NoSuitablePool: Unable to find the node pool """ member = self._member_repo.get(db_apis.get_session(), id=member_id) if not member: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'member', member_id) raise db_exceptions.NoResultFound pool = member.pool listeners = pool.listeners load_balancer = pool.load_balancer store = { constants.MEMBER: member, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.POOL: pool} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} create_member_tf = self._taskflow_load( self._member_flows.get_create_member_flow(), store=store) with tf_logging.DynamicLoggingListener(create_member_tf, log=LOG): create_member_tf.run() def delete_member(self, member_id): """Deletes a pool member. :param member_id: ID of the member to delete :returns: None :raises MemberNotFound: The referenced member was not found """ member = self._member_repo.get(db_apis.get_session(), id=member_id) pool = member.pool listeners = pool.listeners load_balancer = pool.load_balancer store = { constants.MEMBER: member, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.POOL: pool} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} delete_member_tf = self._taskflow_load( self._member_flows.get_delete_member_flow(), store=store ) with tf_logging.DynamicLoggingListener(delete_member_tf, log=LOG): delete_member_tf.run() @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def batch_update_members(self, old_member_ids, new_member_ids, updated_members): new_members = [self._member_repo.get(db_apis.get_session(), id=mid) for mid in new_member_ids] # The API may not have commited all of the new member records yet. # Make sure we retry looking them up. if None in new_members or len(new_members) != len(new_member_ids): LOG.warning('Failed to fetch one of the new members from DB. ' 'Retrying for up to 60 seconds.') raise db_exceptions.NoResultFound old_members = [self._member_repo.get(db_apis.get_session(), id=mid) for mid in old_member_ids] updated_members = [ (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m) for m in updated_members] if old_members: pool = old_members[0].pool elif new_members: pool = new_members[0].pool else: pool = updated_members[0][0].pool listeners = pool.listeners load_balancer = pool.load_balancer store = { constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.POOL: pool} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} batch_update_members_tf = self._taskflow_load( self._member_flows.get_batch_update_members_flow( old_members, new_members, updated_members), store=store) with tf_logging.DynamicLoggingListener(batch_update_members_tf, log=LOG): batch_update_members_tf.run() def update_member(self, member_id, member_updates): """Updates a pool member. :param member_id: ID of the member to update :param member_updates: Dict containing updated member attributes :returns: None :raises MemberNotFound: The referenced member was not found """ try: member = self._get_db_obj_until_pending_update( self._member_repo, member_id) except tenacity.RetryError as e: LOG.warning('Member did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) member = e.last_attempt.result() pool = member.pool listeners = pool.listeners load_balancer = pool.load_balancer store = { constants.MEMBER: member, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.POOL: pool, constants.UPDATE_DICT: member_updates} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} update_member_tf = self._taskflow_load( self._member_flows.get_update_member_flow(), store=store) with tf_logging.DynamicLoggingListener(update_member_tf, log=LOG): update_member_tf.run() @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_pool(self, pool_id): """Creates a node pool. :param pool_id: ID of the pool to create :returns: None :raises NoResultFound: Unable to find the object """ pool = self._pool_repo.get(db_apis.get_session(), id=pool_id) if not pool: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'pool', pool_id) raise db_exceptions.NoResultFound listeners = pool.listeners load_balancer = pool.load_balancer create_pool_tf = self._taskflow_load(self._pool_flows. get_create_pool_flow(), store={constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(create_pool_tf, log=LOG): create_pool_tf.run() def delete_pool(self, pool_id): """Deletes a node pool. :param pool_id: ID of the pool to delete :returns: None :raises PoolNotFound: The referenced pool was not found """ pool = self._pool_repo.get(db_apis.get_session(), id=pool_id) load_balancer = pool.load_balancer listeners = pool.listeners delete_pool_tf = self._taskflow_load( self._pool_flows.get_delete_pool_flow(), store={constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(delete_pool_tf, log=LOG): delete_pool_tf.run() def update_pool(self, pool_id, pool_updates): """Updates a node pool. :param pool_id: ID of the pool to update :param pool_updates: Dict containing updated pool attributes :returns: None :raises PoolNotFound: The referenced pool was not found """ pool = None try: pool = self._get_db_obj_until_pending_update( self._pool_repo, pool_id) except tenacity.RetryError as e: LOG.warning('Pool did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) pool = e.last_attempt.result() listeners = pool.listeners load_balancer = pool.load_balancer update_pool_tf = self._taskflow_load(self._pool_flows. get_update_pool_flow(), store={constants.POOL: pool, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: pool_updates}) with tf_logging.DynamicLoggingListener(update_pool_tf, log=LOG): update_pool_tf.run() @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_l7policy(self, l7policy_id): """Creates an L7 Policy. :param l7policy_id: ID of the l7policy to create :returns: None :raises NoResultFound: Unable to find the object """ l7policy = self._l7policy_repo.get(db_apis.get_session(), id=l7policy_id) if not l7policy: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'l7policy', l7policy_id) raise db_exceptions.NoResultFound listeners = [l7policy.listener] load_balancer = l7policy.listener.load_balancer create_l7policy_tf = self._taskflow_load( self._l7policy_flows.get_create_l7policy_flow(), store={constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(create_l7policy_tf, log=LOG): create_l7policy_tf.run() def delete_l7policy(self, l7policy_id): """Deletes an L7 policy. :param l7policy_id: ID of the l7policy to delete :returns: None :raises L7PolicyNotFound: The referenced l7policy was not found """ l7policy = self._l7policy_repo.get(db_apis.get_session(), id=l7policy_id) load_balancer = l7policy.listener.load_balancer listeners = [l7policy.listener] delete_l7policy_tf = self._taskflow_load( self._l7policy_flows.get_delete_l7policy_flow(), store={constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(delete_l7policy_tf, log=LOG): delete_l7policy_tf.run() def update_l7policy(self, l7policy_id, l7policy_updates): """Updates an L7 policy. :param l7policy_id: ID of the l7policy to update :param l7policy_updates: Dict containing updated l7policy attributes :returns: None :raises L7PolicyNotFound: The referenced l7policy was not found """ l7policy = None try: l7policy = self._get_db_obj_until_pending_update( self._l7policy_repo, l7policy_id) except tenacity.RetryError as e: LOG.warning('L7 policy did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) l7policy = e.last_attempt.result() listeners = [l7policy.listener] load_balancer = l7policy.listener.load_balancer update_l7policy_tf = self._taskflow_load( self._l7policy_flows.get_update_l7policy_flow(), store={constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: l7policy_updates}) with tf_logging.DynamicLoggingListener(update_l7policy_tf, log=LOG): update_l7policy_tf.run() @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_l7rule(self, l7rule_id): """Creates an L7 Rule. :param l7rule_id: ID of the l7rule to create :returns: None :raises NoResultFound: Unable to find the object """ l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id) if not l7rule: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'l7rule', l7rule_id) raise db_exceptions.NoResultFound l7policy = l7rule.l7policy listeners = [l7policy.listener] load_balancer = l7policy.listener.load_balancer create_l7rule_tf = self._taskflow_load( self._l7rule_flows.get_create_l7rule_flow(), store={constants.L7RULE: l7rule, constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(create_l7rule_tf, log=LOG): create_l7rule_tf.run() def delete_l7rule(self, l7rule_id): """Deletes an L7 rule. :param l7rule_id: ID of the l7rule to delete :returns: None :raises L7RuleNotFound: The referenced l7rule was not found """ l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule_id) l7policy = l7rule.l7policy load_balancer = l7policy.listener.load_balancer listeners = [l7policy.listener] delete_l7rule_tf = self._taskflow_load( self._l7rule_flows.get_delete_l7rule_flow(), store={constants.L7RULE: l7rule, constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer}) with tf_logging.DynamicLoggingListener(delete_l7rule_tf, log=LOG): delete_l7rule_tf.run() def update_l7rule(self, l7rule_id, l7rule_updates): """Updates an L7 rule. :param l7rule_id: ID of the l7rule to update :param l7rule_updates: Dict containing updated l7rule attributes :returns: None :raises L7RuleNotFound: The referenced l7rule was not found """ l7rule = None try: l7rule = self._get_db_obj_until_pending_update( self._l7rule_repo, l7rule_id) except tenacity.RetryError as e: LOG.warning('L7 rule did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) l7rule = e.last_attempt.result() l7policy = l7rule.l7policy listeners = [l7policy.listener] load_balancer = l7policy.listener.load_balancer update_l7rule_tf = self._taskflow_load( self._l7rule_flows.get_update_l7rule_flow(), store={constants.L7RULE: l7rule, constants.L7POLICY: l7policy, constants.LISTENERS: listeners, constants.LOADBALANCER: load_balancer, constants.UPDATE_DICT: l7rule_updates}) with tf_logging.DynamicLoggingListener(update_l7rule_tf, log=LOG): update_l7rule_tf.run() def failover_amphora(self, amphora_id): """Perform failover operations for an amphora. Note: This expects the load balancer to already be in provisioning_status=PENDING_UPDATE state. :param amphora_id: ID for amphora to failover :returns: None :raises octavia.common.exceptions.NotFound: The referenced amphora was not found """ amphora = None try: amphora = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) if amphora is None: LOG.error('Amphora failover for amphora %s failed because ' 'there is no record of this amphora in the ' 'database. Check that the [house_keeping] ' 'amphora_expiry_age configuration setting is not ' 'too short. Skipping failover.', amphora_id) raise exceptions.NotFound(resource=constants.AMPHORA, id=amphora_id) if amphora.status == constants.DELETED: LOG.warning('Amphora %s is marked DELETED in the database but ' 'was submitted for failover. Deleting it from the ' 'amphora health table to exclude it from health ' 'checks and skipping the failover.', amphora.id) self._amphora_health_repo.delete(db_apis.get_session(), amphora_id=amphora.id) return loadbalancer = None if amphora.load_balancer_id: loadbalancer = self._lb_repo.get(db_apis.get_session(), id=amphora.load_balancer_id) lb_amp_count = None if loadbalancer: if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: lb_amp_count = 2 elif loadbalancer.topology == constants.TOPOLOGY_SINGLE: lb_amp_count = 1 amp_failover_flow = self._amphora_flows.get_failover_amphora_flow( amphora, lb_amp_count) az_metadata = {} flavor = {} lb_id = None vip = None server_group_id = None if loadbalancer: lb_id = loadbalancer.id if loadbalancer.flavor_id: flavor = self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), loadbalancer.flavor_id) flavor[constants.LOADBALANCER_TOPOLOGY] = ( loadbalancer.topology) else: flavor = {constants.LOADBALANCER_TOPOLOGY: loadbalancer.topology} if loadbalancer.availability_zone: az_metadata = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), loadbalancer.availability_zone)) vip = loadbalancer.vip server_group_id = loadbalancer.server_group_id stored_params = {constants.AVAILABILITY_ZONE: az_metadata, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, constants.FLAVOR: flavor, constants.LOADBALANCER: loadbalancer, constants.SERVER_GROUP_ID: server_group_id, constants.LOADBALANCER_ID: lb_id, constants.VIP: vip} failover_amphora_tf = self._taskflow_load(amp_failover_flow, store=stored_params) with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG): failover_amphora_tf.run() LOG.info("Successfully completed the failover for an amphora: %s", {"id": amphora_id, "load_balancer_id": lb_id, "lb_network_ip": amphora.lb_network_ip, "compute_id": amphora.compute_id, "role": amphora.role}) except Exception as e: with excutils.save_and_reraise_exception(reraise=False): LOG.exception("Amphora %s failover exception: %s", amphora_id, str(e)) self._amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) if amphora and amphora.load_balancer_id: self._lb_repo.update( db_apis.get_session(), amphora.load_balancer_id, provisioning_status=constants.ERROR) @staticmethod def _get_amphorae_for_failover(load_balancer): """Returns an ordered list of amphora to failover. :param load_balancer: The load balancer being failed over. :returns: An ordered list of amphora to failover, first amp to failover is last in the list :raises octavia.common.exceptions.InvalidTopology: LB has an unknown topology. """ if load_balancer.topology == constants.TOPOLOGY_SINGLE: # In SINGLE topology, amp failover order does not matter return [a for a in load_balancer.amphorae if a.status != constants.DELETED] if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: # In Active/Standby we should preference the standby amp # for failover first in case the Active is still able to pass # traffic. # Note: The active amp can switch at any time and in less than a # second, so this is "best effort". amphora_driver = utils.get_amphora_driver() timeout_dict = { constants.CONN_MAX_RETRIES: CONF.haproxy_amphora.failover_connection_max_retries, constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.failover_connection_retry_interval} amps = [] selected_amp = None for amp in load_balancer.amphorae: if amp.status == constants.DELETED: continue if selected_amp is None: try: if amphora_driver.get_interface_from_ip( amp, load_balancer.vip.ip_address, timeout_dict): # This is a potential ACTIVE, add it to the list amps.append(amp) else: # This one doesn't have the VIP IP, so start # failovers here. selected_amp = amp LOG.debug("Selected amphora %s as the initial " "failover amphora.", amp.id) except Exception: # This amphora is broken, so start failovers here. selected_amp = amp else: # We have already found a STANDBY, so add the rest to the # list without querying them. amps.append(amp) # Put the selected amphora at the end of the list so it is # first to failover. if selected_amp: amps.append(selected_amp) return amps LOG.error('Unknown load balancer topology found: %s, aborting ' 'failover.', load_balancer.topology) raise exceptions.InvalidTopology(topology=load_balancer.topology) def failover_loadbalancer(self, load_balancer_id): """Perform failover operations for a load balancer. Note: This expects the load balancer to already be in provisioning_status=PENDING_UPDATE state. :param load_balancer_id: ID for load balancer to failover :returns: None :raises octavia.commom.exceptions.NotFound: The load balancer was not found. """ try: lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id) if lb is None: raise exceptions.NotFound(resource=constants.LOADBALANCER, id=load_balancer_id) # Get the ordered list of amphorae to failover for this LB. amps = self._get_amphorae_for_failover(lb) if lb.topology == constants.TOPOLOGY_SINGLE: if len(amps) != 1: LOG.warning('%d amphorae found on load balancer %s where ' 'one should exist. Repairing.', len(amps), load_balancer_id) elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: if len(amps) != 2: LOG.warning('%d amphorae found on load balancer %s where ' 'two should exist. Repairing.', len(amps), load_balancer_id) else: LOG.error('Unknown load balancer topology found: %s, aborting ' 'failover!', lb.topology) raise exceptions.InvalidTopology(topology=lb.topology) # Build our failover flow. lb_failover_flow = self._lb_flows.get_failover_LB_flow(amps, lb) # We must provide a topology in the flavor definition # here for the amphora to be created with the correct # configuration. if lb.flavor_id: flavor = self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), lb.flavor_id) flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology else: flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology} stored_params = {constants.LOADBALANCER: lb, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, constants.SERVER_GROUP_ID: lb.server_group_id, constants.LOADBALANCER_ID: lb.id, constants.FLAVOR: flavor} if lb.availability_zone: stored_params[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), lb.availability_zone)) else: stored_params[constants.AVAILABILITY_ZONE] = {} failover_lb_tf = self._taskflow_load(lb_failover_flow, store=stored_params) with tf_logging.DynamicLoggingListener(failover_lb_tf, log=LOG): failover_lb_tf.run() LOG.info('Failover of load balancer %s completed successfully.', lb.id) except Exception as e: with excutils.save_and_reraise_exception(reraise=False): LOG.exception("LB %(lbid)s failover exception: %(exc)s", {'lbid': load_balancer_id, 'exc': str(e)}) self._lb_repo.update( db_apis.get_session(), load_balancer_id, provisioning_status=constants.ERROR) def amphora_cert_rotation(self, amphora_id): """Perform cert rotation for an amphora. :param amphora_id: ID for amphora to rotate :returns: None :raises AmphoraNotFound: The referenced amphora was not found """ amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) LOG.info("Start amphora cert rotation, amphora's id is: %s", amphora_id) certrotation_amphora_tf = self._taskflow_load( self._amphora_flows.cert_rotate_amphora_flow(), store={constants.AMPHORA: amp, constants.AMPHORA_ID: amp.id}) with tf_logging.DynamicLoggingListener(certrotation_amphora_tf, log=LOG): certrotation_amphora_tf.run() LOG.info("Finished amphora cert rotation, amphora's id was: %s", amphora_id) def update_amphora_agent_config(self, amphora_id): """Update the amphora agent configuration. Note: This will update the amphora agent configuration file and update the running configuration for mutatable configuration items. :param amphora_id: ID of the amphora to update. :returns: None """ LOG.info("Start amphora agent configuration update, amphora's id " "is: %s", amphora_id) amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(), amphora_id) flavor = {} if lb.flavor_id: flavor = self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), lb.flavor_id) update_amphora_tf = self._taskflow_load( self._amphora_flows.update_amphora_config_flow(), store={constants.AMPHORA: amp, constants.FLAVOR: flavor}) with tf_logging.DynamicLoggingListener(update_amphora_tf, log=LOG): update_amphora_tf.run() LOG.info("Finished amphora agent configuration update, amphora's id " "was: %s", amphora_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/worker/v1/flows/0000775000175000017500000000000000000000000021774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/__init__.py0000664000175000017500000000107400000000000024107 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/amphora_flows.py0000664000175000017500000007635500000000000025227 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from oslo_log import log as logging from taskflow.patterns import graph_flow from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants from octavia.common import utils from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import cert_task from octavia.controller.worker.v1.tasks import compute_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import network_tasks from octavia.controller.worker.v1.tasks import retry_tasks CONF = cfg.CONF LOG = logging.getLogger(__name__) class AmphoraFlows(object): def get_create_amphora_flow(self): """Creates a flow to create an amphora. :returns: The flow for creating the amphora """ create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW) create_amphora_flow.add(database_tasks.CreateAmphoraInDB( provides=constants.AMPHORA_ID)) create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask( requires=constants.AMPHORA_ID)) create_amphora_flow.add(cert_task.GenerateServerPEMTask( provides=constants.SERVER_PEM)) create_amphora_flow.add( database_tasks.UpdateAmphoraDBCertExpiration( requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) create_amphora_flow.add(compute_tasks.CertComputeCreate( requires=(constants.AMPHORA_ID, constants.SERVER_PEM, constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY, constants.FLAVOR, constants.AVAILABILITY_ZONE), provides=constants.COMPUTE_ID)) create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB( requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) create_amphora_flow.add(compute_tasks.ComputeActiveWait( requires=(constants.COMPUTE_ID, constants.AMPHORA_ID), provides=constants.COMPUTE_OBJ)) create_amphora_flow.add(database_tasks.UpdateAmphoraInfo( requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), provides=constants.AMPHORA)) create_amphora_flow.add( amphora_driver_tasks.AmphoraComputeConnectivityWait( requires=constants.AMPHORA)) create_amphora_flow.add(database_tasks.ReloadAmphora( requires=constants.AMPHORA_ID, provides=constants.AMPHORA)) create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize( requires=constants.AMPHORA)) create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB( requires=constants.AMPHORA)) return create_amphora_flow def _get_post_map_lb_subflow(self, prefix, role): """Set amphora type after mapped to lb.""" sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW post_map_amp_to_lb = linear_flow.Flow( sf_name) post_map_amp_to_lb.add(database_tasks.ReloadAmphora( name=sf_name + '-' + constants.RELOAD_AMPHORA, requires=constants.AMPHORA_ID, provides=constants.AMPHORA)) post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate( name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK, requires=(constants.AMPHORA, constants.FLAVOR))) if role == constants.ROLE_MASTER: post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB( name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_BACKUP: post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB( name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_STANDALONE: post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB( name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, requires=constants.AMPHORA)) return post_map_amp_to_lb def _get_create_amp_for_lb_subflow(self, prefix, role, is_spare=False): """Create a new amphora for lb.""" sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW create_amp_for_lb_subflow = linear_flow.Flow(sf_name) create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB( name=sf_name + '-' + constants.CREATE_AMPHORA_INDB, requires=constants.LOADBALANCER_ID, provides=constants.AMPHORA_ID)) create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( name=sf_name + '-' + constants.GENERATE_SERVER_PEM, provides=constants.SERVER_PEM)) create_amp_for_lb_subflow.add( database_tasks.UpdateAmphoraDBCertExpiration( name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, requires=(constants.AMPHORA_ID, constants.SERVER_PEM, constants.BUILD_TYPE_PRIORITY, constants.SERVER_GROUP_ID, constants.FLAVOR, constants.AVAILABILITY_ZONE), provides=constants.COMPUTE_ID)) create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId( name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID, requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB( name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB, requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait( name=sf_name + '-' + constants.COMPUTE_WAIT, requires=(constants.COMPUTE_ID, constants.AMPHORA_ID, constants.AVAILABILITY_ZONE), provides=constants.COMPUTE_OBJ)) create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo( name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO, requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), provides=constants.AMPHORA)) create_amp_for_lb_subflow.add( amphora_driver_tasks.AmphoraComputeConnectivityWait( name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT, requires=constants.AMPHORA)) create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( name=sf_name + '-' + constants.AMPHORA_FINALIZE, requires=constants.AMPHORA)) if is_spare: create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraReadyInDB( name=sf_name + '-' + constants.MARK_AMPHORA_READY_INDB, requires=constants.AMPHORA)) else: create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraAllocatedInDB( name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora( name=sf_name + '-' + constants.RELOAD_AMPHORA, requires=constants.AMPHORA_ID, provides=constants.AMPHORA)) if role == constants.ROLE_MASTER: create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB( name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_BACKUP: create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB( name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_STANDALONE: create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraStandAloneInDB( name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, requires=constants.AMPHORA)) return create_amp_for_lb_subflow def _allocate_amp_to_lb_decider(self, history): """decides if the lb shall be mapped to a spare amphora :return: True if a spare amphora exists in DB """ return list(history.values())[0] is not None def _create_new_amp_for_lb_decider(self, history): """decides if a new amphora must be created for the lb :return: True if there is no spare amphora """ return list(history.values())[0] is None def get_amphora_for_lb_subflow( self, prefix, role=constants.ROLE_STANDALONE, is_spare=False): """Tries to allocate a spare amphora to a loadbalancer if none exists, create a new amphora. """ sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW # Don't replace a spare with another spare, just build a fresh one. if is_spare: get_spare_amp_flow = linear_flow.Flow(sf_name) get_spare_amp_flow.add(self._get_create_amp_for_lb_subflow( prefix, role, is_spare=is_spare)) return get_spare_amp_flow # We need a graph flow here for a conditional flow amp_for_lb_flow = graph_flow.Flow(sf_name) # Setup the task that maps an amphora to a load balancer allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora( name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA, requires=(constants.LOADBALANCER_ID, constants.FLAVOR, constants.AVAILABILITY_ZONE), provides=constants.AMPHORA_ID) # Define a subflow for if we successfully map an amphora map_lb_to_amp = self._get_post_map_lb_subflow(prefix, role) # Define a subflow for if we can't map an amphora create_amp = self._get_create_amp_for_lb_subflow(prefix, role) # Add them to the graph flow amp_for_lb_flow.add(allocate_and_associate_amp, map_lb_to_amp, create_amp) # Setup the decider for the path if we can map an amphora amp_for_lb_flow.link(allocate_and_associate_amp, map_lb_to_amp, decider=self._allocate_amp_to_lb_decider, decider_depth='flow') # Setup the decider for the path if we can't map an amphora amp_for_lb_flow.link(allocate_and_associate_amp, create_amp, decider=self._create_new_amp_for_lb_decider, decider_depth='flow') return amp_for_lb_flow def get_delete_amphora_flow( self, amphora, retry_attempts=CONF.controller_worker.amphora_delete_retries, retry_interval=( CONF.controller_worker.amphora_delete_retry_interval)): """Creates a subflow to delete an amphora and it's port. This flow is idempotent and safe to retry. :param amphora: An amphora object. :param retry_attempts: The number of times the flow is retried. :param retry_interval: The time to wait, in seconds, between retries. :returns: The subflow for deleting the amphora. :raises AmphoraNotFound: The referenced Amphora was not found. """ delete_amphora_flow = linear_flow.Flow( name=constants.DELETE_AMPHORA_FLOW + '-' + amphora.id, retry=retry_tasks.SleepingRetryTimesController( name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' + amphora.id, attempts=retry_attempts, interval=retry_interval)) delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora.id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add( database_tasks.MarkAmphoraPendingDeleteInDB( name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora.id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy( name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora.id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add(compute_tasks.ComputeDelete( name=constants.DELETE_AMPHORA + '-' + amphora.id, inject={constants.AMPHORA: amphora, constants.PASSIVE_FAILURE: True})) delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring( name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora.id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB( name=constants.MARK_AMPHORA_DELETED + '-' + amphora.id, inject={constants.AMPHORA: amphora})) if amphora.vrrp_port_id: delete_amphora_flow.add(network_tasks.DeletePort( name=(constants.DELETE_PORT + '-' + str(amphora.id) + '-' + str(amphora.vrrp_port_id)), inject={constants.PORT_ID: amphora.vrrp_port_id, constants.PASSIVE_FAILURE: True})) # TODO(johnsom) What about cleaning up any member ports? # maybe we should get the list of attached ports prior to delete # and call delete on them here. Fix this as part of # https://storyboard.openstack.org/#!/story/2007077 return delete_amphora_flow def get_vrrp_subflow(self, prefix, timeout_dict=None, create_vrrp_group=True): sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW vrrp_subflow = linear_flow.Flow(sf_name) # Optimization for failover flow. No reason to call this # when configuring the secondary amphora. if create_vrrp_group: vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, requires=constants.LOADBALANCER_ID)) vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE_NETWORK_CONFIG)) # VRRP update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow') # We have three tasks to run in order, per amphora amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow') amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, constants.AMP_VRRP_INT), inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-0-' + constants.AMP_VRRP_START, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow') amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, constants.AMP_VRRP_INT), inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-1-' + constants.AMP_VRRP_START, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) update_amps_subflow.add(amp_0_subflow) update_amps_subflow.add(amp_1_subflow) vrrp_subflow.add(update_amps_subflow) return vrrp_subflow def cert_rotate_amphora_flow(self): """Implement rotation for amphora's cert. 1. Create a new certificate 2. Upload the cert to amphora 3. update the newly created certificate info to amphora 4. update the cert_busy flag to be false after rotation :returns: The flow for updating an amphora """ rotated_amphora_flow = linear_flow.Flow( constants.CERT_ROTATE_AMPHORA_FLOW) rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( requires=constants.AMPHORA)) # create a new certificate, the returned value is the newly created # certificate rotated_amphora_flow.add(cert_task.GenerateServerPEMTask( provides=constants.SERVER_PEM)) # update it in amphora task rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload( requires=(constants.AMPHORA, constants.SERVER_PEM))) # update the newly created certificate info to amphora rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration( requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) # update the cert_busy flag to be false after rotation rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse( requires=constants.AMPHORA)) return rotated_amphora_flow def update_amphora_config_flow(self): """Creates a flow to update the amphora agent configuration. :returns: The flow for updating an amphora """ update_amphora_flow = linear_flow.Flow( constants.UPDATE_AMPHORA_CONFIG_FLOW) update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( requires=constants.AMPHORA)) update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate( requires=(constants.AMPHORA, constants.FLAVOR))) return update_amphora_flow def get_amphora_for_lb_failover_subflow( self, prefix, role=constants.ROLE_STANDALONE, failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False, is_spare=False): """Creates a new amphora that will be used in a failover flow. :requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer :provides: amphora_id, amphora :param prefix: The flow name prefix to use on the flow and tasks. :param role: The role this amphora will have in the topology. :param failed_amp_vrrp_port_id: The base port ID of the failed amp. :param is_vrrp_ipv6: True if the base port IP is IPv6. :param is_spare: True if we are getting a spare amphroa. :return: A Taskflow sub-flow that will create the amphora. """ sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW amp_for_failover_flow = linear_flow.Flow(sf_name) # Try to allocate or boot an amphora instance (unconfigured) amp_for_failover_flow.add(self.get_amphora_for_lb_subflow( prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW, role=role, is_spare=is_spare)) # If we are getting a spare amphora, this is all we need to do. if is_spare: return amp_for_failover_flow # Create the VIP base (aka VRRP) port for the amphora. amp_for_failover_flow.add(network_tasks.CreateVIPBasePort( name=prefix + '-' + constants.CREATE_VIP_BASE_PORT, requires=(constants.VIP, constants.VIP_SG_ID, constants.AMPHORA_ID), provides=constants.BASE_PORT)) # Attach the VIP base (aka VRRP) port to the amphora. amp_for_failover_flow.add(compute_tasks.AttachPort( name=prefix + '-' + constants.ATTACH_PORT, requires=(constants.AMPHORA, constants.PORT), rebind={constants.PORT: constants.BASE_PORT})) # Update the amphora database record with the VIP base port info. amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails( name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS, requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT))) # Make sure the amphora in the flow storage is up to date # or the vrrp_ip will be empty amp_for_failover_flow.add(database_tasks.ReloadAmphora( name=prefix + '-' + constants.RELOAD_AMPHORA, requires=constants.AMPHORA_ID, provides=constants.AMPHORA)) # Update the amphora networking for the plugged VIP port amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID, requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), provides=constants.AMPHORAE_NETWORK_CONFIG)) # Disable the base (vrrp) port on the failed amphora # This prevents a DAD failure when bringing up the new amphora. # Keepalived will handle this for act/stdby. if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and is_vrrp_ipv6): amp_for_failover_flow.add(network_tasks.AdminDownPort( name=prefix + '-' + constants.ADMIN_DOWN_PORT, inject={constants.PORT_ID: failed_amp_vrrp_port_id})) amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug( name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG, requires=(constants.AMPHORA, constants.LOADBALANCER, constants.AMPHORAE_NETWORK_CONFIG))) # Plug member ports amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta( name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA, requires=(constants.LOADBALANCER, constants.AMPHORA, constants.AVAILABILITY_ZONE, constants.VRRP_PORT), rebind={constants.VRRP_PORT: constants.BASE_PORT}, provides=constants.DELTA)) amp_for_failover_flow.add(network_tasks.HandleNetworkDelta( name=prefix + '-' + constants.HANDLE_NETWORK_DELTA, requires=(constants.AMPHORA, constants.DELTA), provides=constants.ADDED_PORTS)) amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG, requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) return amp_for_failover_flow def get_failover_amphora_flow(self, failed_amphora, lb_amp_count): """Get a Taskflow flow to failover an amphora. 1. Build a replacement amphora. 2. Delete the old amphora. 3. Update the amphorae listener configurations. 4. Update the VRRP configurations if needed. :param failed_amphora: The amphora object to failover. :param lb_amp_count: The number of amphora on this load balancer. :returns: The flow that will provide the failover. """ failover_amp_flow = linear_flow.Flow( constants.FAILOVER_AMPHORA_FLOW) # Revert amphora to status ERROR if this flow goes wrong failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) if failed_amphora.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP): amp_role = 'master_or_backup' elif failed_amphora.role == constants.ROLE_STANDALONE: amp_role = 'standalone' elif failed_amphora.role is None: amp_role = 'spare' else: amp_role = 'undefined' LOG.info("Performing failover for amphora: %s", {"id": failed_amphora.id, "load_balancer_id": failed_amphora.load_balancer_id, "lb_network_ip": failed_amphora.lb_network_ip, "compute_id": failed_amphora.compute_id, "role": amp_role}) failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID( requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID)) is_spare = True is_vrrp_ipv6 = False if failed_amphora.load_balancer_id: is_spare = False if failed_amphora.vrrp_ip: is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip) # Get a replacement amphora and plug all of the networking. # # Do this early as the compute services have been observed to be # unreliable. The community decided the chance that deleting first # would open resources for an instance is less likely than the # compute service failing to boot an instance for other reasons. # TODO(johnsom) Move this back out to run for spares after # delete amphora API is available. failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow( prefix=constants.FAILOVER_LOADBALANCER_FLOW, role=failed_amphora.role, failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id, is_vrrp_ipv6=is_vrrp_ipv6, is_spare=is_spare)) failover_amp_flow.add( self.get_delete_amphora_flow( failed_amphora, retry_attempts=CONF.controller_worker.amphora_delete_retries, retry_interval=( CONF.controller_worker.amphora_delete_retry_interval))) failover_amp_flow.add( database_tasks.DisableAmphoraHealthMonitoring( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) if not failed_amphora.load_balancer_id: # This is an unallocated amphora (spares pool), we are done. return failover_amp_flow failover_amp_flow.add(database_tasks.GetLoadBalancer( requires=constants.LOADBALANCER_ID, inject={constants.LOADBALANCER_ID: failed_amphora.load_balancer_id}, provides=constants.LOADBALANCER)) failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( name=constants.GET_AMPHORAE_FROM_LB, requires=constants.LOADBALANCER_ID, inject={constants.LOADBALANCER_ID: failed_amphora.load_balancer_id}, provides=constants.AMPHORAE)) # Setup timeouts for our requests to the amphorae timeout_dict = { constants.CONN_MAX_RETRIES: CONF.haproxy_amphora.active_connection_max_retries, constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.active_connection_rety_interval} # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. update_amps_subflow = unordered_flow.Flow( constants.UPDATE_AMPS_SUBFLOW) for amp_index in range(0, lb_amp_count): update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE, requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) failover_amp_flow.add(update_amps_subflow) # Configure and enable keepalived in the amphora if lb_amp_count == 2: failover_amp_flow.add( self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW, timeout_dict, create_vrrp_group=False)) # Reload the listener. This needs to be done here because # it will create the required haproxy check scripts for # the VRRP deployed above. # A "U" or newer amphora-agent will remove the need for this # task here. # TODO(johnsom) Remove this in the "W" cycle reload_listener_subflow = unordered_flow.Flow( constants.AMPHORA_LISTENER_RELOAD_SUBFLOW) for amp_index in range(0, lb_amp_count): reload_listener_subflow.add( amphora_driver_tasks.AmphoraIndexListenersReload( name=(str(amp_index) + '-' + constants.AMPHORA_RELOAD_LISTENER), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) failover_amp_flow.add(reload_listener_subflow) # Remove any extraneous ports # Note: Nova sometimes fails to delete ports attached to an instance. # For example, if you create an LB with a listener, then # 'openstack server delete' the amphora, you will see the vrrp # port attached to that instance will remain after the instance # is deleted. # TODO(johnsom) Fix this as part of # https://storyboard.openstack.org/#!/story/2007077 # Mark LB ACTIVE failover_amp_flow.add( database_tasks.MarkLBActiveInDB(mark_subobjects=True, requires=constants.LOADBALANCER)) return failover_amp_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/health_monitor_flows.py0000664000175000017500000001137000000000000026576 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import model_tasks class HealthMonitorFlows(object): def get_create_health_monitor_flow(self): """Create a flow to create a health monitor :returns: The flow for creating a health monitor """ create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW) create_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( requires=[constants.HEALTH_MON, constants.LISTENERS, constants.LOADBALANCER])) create_hm_flow.add(database_tasks.MarkHealthMonitorPendingCreateInDB( requires=constants.HEALTH_MON)) create_hm_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) create_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( requires=constants.HEALTH_MON)) create_hm_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return create_hm_flow def get_delete_health_monitor_flow(self): """Create a flow to delete a health monitor :returns: The flow for deleting a health monitor """ delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW) delete_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( requires=[constants.HEALTH_MON, constants.LISTENERS, constants.LOADBALANCER])) delete_hm_flow.add(database_tasks.MarkHealthMonitorPendingDeleteInDB( requires=constants.HEALTH_MON)) delete_hm_flow.add(model_tasks. DeleteModelObject(rebind={constants.OBJECT: constants.HEALTH_MON})) delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB( requires=constants.HEALTH_MON)) delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota( requires=constants.HEALTH_MON)) delete_hm_flow.add( database_tasks.UpdatePoolMembersOperatingStatusInDB( requires=constants.POOL, inject={constants.OPERATING_STATUS: constants.NO_MONITOR})) delete_hm_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return delete_hm_flow def get_update_health_monitor_flow(self): """Create a flow to update a health monitor :returns: The flow for updating a health monitor """ update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW) update_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( requires=[constants.HEALTH_MON, constants.LISTENERS, constants.LOADBALANCER])) update_hm_flow.add(database_tasks.MarkHealthMonitorPendingUpdateInDB( requires=constants.HEALTH_MON)) update_hm_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) update_hm_flow.add(database_tasks.UpdateHealthMonInDB( requires=[constants.HEALTH_MON, constants.UPDATE_DICT])) update_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( requires=constants.HEALTH_MON)) update_hm_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return update_hm_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/l7policy_flows.py0000664000175000017500000001006700000000000025326 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import model_tasks class L7PolicyFlows(object): def get_create_l7policy_flow(self): """Create a flow to create an L7 policy :returns: The flow for creating an L7 policy """ create_l7policy_flow = linear_flow.Flow(constants.CREATE_L7POLICY_FLOW) create_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( requires=[constants.L7POLICY, constants.LISTENERS, constants.LOADBALANCER])) create_l7policy_flow.add(database_tasks.MarkL7PolicyPendingCreateInDB( requires=constants.L7POLICY)) create_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) create_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) create_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return create_l7policy_flow def get_delete_l7policy_flow(self): """Create a flow to delete an L7 policy :returns: The flow for deleting an L7 policy """ delete_l7policy_flow = linear_flow.Flow(constants.DELETE_L7POLICY_FLOW) delete_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( requires=[constants.L7POLICY, constants.LISTENERS, constants.LOADBALANCER])) delete_l7policy_flow.add(database_tasks.MarkL7PolicyPendingDeleteInDB( requires=constants.L7POLICY)) delete_l7policy_flow.add(model_tasks.DeleteModelObject( rebind={constants.OBJECT: constants.L7POLICY})) delete_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) delete_l7policy_flow.add(database_tasks.DeleteL7PolicyInDB( requires=constants.L7POLICY)) delete_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return delete_l7policy_flow def get_update_l7policy_flow(self): """Create a flow to update an L7 policy :returns: The flow for updating an L7 policy """ update_l7policy_flow = linear_flow.Flow(constants.UPDATE_L7POLICY_FLOW) update_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( requires=[constants.L7POLICY, constants.LISTENERS, constants.LOADBALANCER])) update_l7policy_flow.add(database_tasks.MarkL7PolicyPendingUpdateInDB( requires=constants.L7POLICY)) update_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) update_l7policy_flow.add(database_tasks.UpdateL7PolicyInDB( requires=[constants.L7POLICY, constants.UPDATE_DICT])) update_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) update_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return update_l7policy_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/l7rule_flows.py0000664000175000017500000001042500000000000024774 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import model_tasks class L7RuleFlows(object): def get_create_l7rule_flow(self): """Create a flow to create an L7 rule :returns: The flow for creating an L7 rule """ create_l7rule_flow = linear_flow.Flow(constants.CREATE_L7RULE_FLOW) create_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( requires=[constants.L7RULE, constants.LISTENERS, constants.LOADBALANCER])) create_l7rule_flow.add(database_tasks.MarkL7RulePendingCreateInDB( requires=constants.L7RULE)) create_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) create_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( requires=constants.L7RULE)) create_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) create_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return create_l7rule_flow def get_delete_l7rule_flow(self): """Create a flow to delete an L7 rule :returns: The flow for deleting an L7 rule """ delete_l7rule_flow = linear_flow.Flow(constants.DELETE_L7RULE_FLOW) delete_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( requires=[constants.L7RULE, constants.LISTENERS, constants.LOADBALANCER])) delete_l7rule_flow.add(database_tasks.MarkL7RulePendingDeleteInDB( requires=constants.L7RULE)) delete_l7rule_flow.add(model_tasks.DeleteModelObject( rebind={constants.OBJECT: constants.L7RULE})) delete_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) delete_l7rule_flow.add(database_tasks.DeleteL7RuleInDB( requires=constants.L7RULE)) delete_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) delete_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return delete_l7rule_flow def get_update_l7rule_flow(self): """Create a flow to update an L7 rule :returns: The flow for updating an L7 rule """ update_l7rule_flow = linear_flow.Flow(constants.UPDATE_L7RULE_FLOW) update_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( requires=[constants.L7RULE, constants.LISTENERS, constants.LOADBALANCER])) update_l7rule_flow.add(database_tasks.MarkL7RulePendingUpdateInDB( requires=constants.L7RULE)) update_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) update_l7rule_flow.add(database_tasks.UpdateL7RuleInDB( requires=[constants.L7RULE, constants.UPDATE_DICT])) update_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( requires=constants.L7RULE)) update_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) update_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return update_l7rule_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/listener_flows.py0000664000175000017500000001322500000000000025410 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import network_tasks class ListenerFlows(object): def get_create_listener_flow(self): """Create a flow to create a listener :returns: The flow for creating a listener """ create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW) create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( requires=[constants.LOADBALANCER, constants.LISTENERS])) create_listener_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) create_listener_flow.add(network_tasks.UpdateVIP( requires=constants.LOADBALANCER)) create_listener_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return create_listener_flow def get_create_all_listeners_flow(self): """Create a flow to create all listeners :returns: The flow for creating all listeners """ create_all_listeners_flow = linear_flow.Flow( constants.CREATE_LISTENERS_FLOW) create_all_listeners_flow.add( database_tasks.GetListenersFromLoadbalancer( requires=constants.LOADBALANCER, provides=constants.LISTENERS)) create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer( requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) create_all_listeners_flow.add(network_tasks.UpdateVIP( requires=constants.LOADBALANCER)) return create_all_listeners_flow def get_delete_listener_flow(self): """Create a flow to delete a listener :returns: The flow for deleting a listener """ delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask( requires=constants.LISTENER)) delete_listener_flow.add(amphora_driver_tasks.ListenerDelete( requires=constants.LISTENER)) delete_listener_flow.add(network_tasks.UpdateVIPForDelete( requires=constants.LOADBALANCER)) delete_listener_flow.add(database_tasks.DeleteListenerInDB( requires=constants.LISTENER)) delete_listener_flow.add(database_tasks.DecrementListenerQuota( requires=constants.LISTENER)) delete_listener_flow.add(database_tasks.MarkLBActiveInDB( requires=constants.LOADBALANCER)) return delete_listener_flow def get_delete_listener_internal_flow(self, listener_name): """Create a flow to delete a listener and l7policies internally (will skip deletion on the amp and marking LB active) :returns: The flow for deleting a listener """ delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) # Should cascade delete all L7 policies delete_listener_flow.add(network_tasks.UpdateVIPForDelete( name='delete_update_vip_' + listener_name, requires=constants.LOADBALANCER)) delete_listener_flow.add(database_tasks.DeleteListenerInDB( name='delete_listener_in_db_' + listener_name, requires=constants.LISTENER, rebind={constants.LISTENER: listener_name})) delete_listener_flow.add(database_tasks.DecrementListenerQuota( name='decrement_listener_quota_' + listener_name, requires=constants.LISTENER, rebind={constants.LISTENER: listener_name})) return delete_listener_flow def get_update_listener_flow(self): """Create a flow to update a listener :returns: The flow for updating a listener """ update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW) update_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( requires=[constants.LOADBALANCER, constants.LISTENERS])) update_listener_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) update_listener_flow.add(network_tasks.UpdateVIP( requires=constants.LOADBALANCER)) update_listener_flow.add(database_tasks.UpdateListenerInDB( requires=[constants.LISTENER, constants.UPDATE_DICT])) update_listener_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return update_listener_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/load_balancer_flows.py0000664000175000017500000007476300000000000026347 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from oslo_log import log as logging from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants from octavia.common import exceptions from octavia.common import utils from octavia.controller.worker.v1.flows import amphora_flows from octavia.controller.worker.v1.flows import listener_flows from octavia.controller.worker.v1.flows import member_flows from octavia.controller.worker.v1.flows import pool_flows from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import compute_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import network_tasks CONF = cfg.CONF LOG = logging.getLogger(__name__) class LoadBalancerFlows(object): def __init__(self): self.amp_flows = amphora_flows.AmphoraFlows() self.listener_flows = listener_flows.ListenerFlows() self.pool_flows = pool_flows.PoolFlows() self.member_flows = member_flows.MemberFlows() def get_create_load_balancer_flow(self, topology, listeners=None): """Creates a conditional graph flow that allocates a loadbalancer to two spare amphorae. :raises InvalidTopology: Invalid topology specified :return: The graph flow for creating a loadbalancer. """ f_name = constants.CREATE_LOADBALANCER_FLOW lb_create_flow = linear_flow.Flow(f_name) lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask( requires=constants.LOADBALANCER_ID)) # allocate VIP lb_create_flow.add(database_tasks.ReloadLoadBalancer( name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER )) lb_create_flow.add(network_tasks.AllocateVIP( requires=constants.LOADBALANCER, provides=constants.VIP)) lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation( requires=(constants.LOADBALANCER_ID, constants.VIP), provides=constants.LOADBALANCER)) lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup( requires=constants.LOADBALANCER_ID)) lb_create_flow.add(network_tasks.GetSubnetFromVIP( requires=constants.LOADBALANCER, provides=constants.SUBNET)) if topology == constants.TOPOLOGY_ACTIVE_STANDBY: lb_create_flow.add(*self._create_active_standby_topology()) elif topology == constants.TOPOLOGY_SINGLE: lb_create_flow.add(*self._create_single_topology()) else: LOG.error("Unknown topology: %s. Unable to build load balancer.", topology) raise exceptions.InvalidTopology(topology=topology) post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW lb_create_flow.add( self.get_post_lb_amp_association_flow( post_amp_prefix, topology, mark_active=(not listeners))) if listeners: lb_create_flow.add(*self._create_listeners_flow()) return lb_create_flow def _create_single_topology(self): sf_name = (constants.ROLE_STANDALONE + '-' + constants.AMP_PLUG_NET_SUBFLOW) amp_for_lb_net_flow = linear_flow.Flow(sf_name) amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow( prefix=constants.ROLE_STANDALONE, role=constants.ROLE_STANDALONE) amp_for_lb_net_flow.add(amp_for_lb_flow) amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name)) return amp_for_lb_net_flow def _create_active_standby_topology( self, lf_name=constants.CREATE_LOADBALANCER_FLOW): # When we boot up amphora for an active/standby topology, # we should leverage the Nova anti-affinity capabilities # to place the amphora on different hosts, also we need to check # if anti-affinity-flag is enabled or not: anti_affinity = CONF.nova.enable_anti_affinity flows = [] if anti_affinity: # we need to create a server group first flows.append( compute_tasks.NovaServerGroupCreate( name=lf_name + '-' + constants.CREATE_SERVER_GROUP_FLOW, requires=(constants.LOADBALANCER_ID), provides=constants.SERVER_GROUP_ID)) # update server group id in lb table flows.append( database_tasks.UpdateLBServerGroupInDB( name=lf_name + '-' + constants.UPDATE_LB_SERVERGROUPID_FLOW, requires=(constants.LOADBALANCER_ID, constants.SERVER_GROUP_ID))) f_name = constants.CREATE_LOADBALANCER_FLOW amps_flow = unordered_flow.Flow(f_name) master_sf_name = (constants.ROLE_MASTER + '-' + constants.AMP_PLUG_NET_SUBFLOW) master_amp_sf = linear_flow.Flow(master_sf_name) master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER)) master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name)) backup_sf_name = (constants.ROLE_BACKUP + '-' + constants.AMP_PLUG_NET_SUBFLOW) backup_amp_sf = linear_flow.Flow(backup_sf_name) backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)) backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name)) amps_flow.add(master_amp_sf, backup_amp_sf) return flows + [amps_flow] def _get_amp_net_subflow(self, sf_name): flows = [] flows.append(network_tasks.PlugVIPAmpphora( name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, requires=(constants.LOADBALANCER, constants.AMPHORA, constants.SUBNET), provides=constants.AMP_DATA)) flows.append(network_tasks.ApplyQosAmphora( name=sf_name + '-' + constants.APPLY_QOS_AMP, requires=(constants.LOADBALANCER, constants.AMP_DATA, constants.UPDATE_DICT))) flows.append(database_tasks.UpdateAmphoraVIPData( name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA, requires=constants.AMP_DATA)) flows.append(database_tasks.ReloadAmphora( name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP, requires=constants.AMPHORA_ID, provides=constants.AMPHORA)) flows.append(database_tasks.ReloadLoadBalancer( name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) flows.append(network_tasks.GetAmphoraNetworkConfigs( name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, requires=(constants.LOADBALANCER, constants.AMPHORA), provides=constants.AMPHORA_NETWORK_CONFIG)) flows.append(amphora_driver_tasks.AmphoraPostVIPPlug( name=sf_name + '-' + constants.AMP_POST_VIP_PLUG, rebind={constants.AMPHORAE_NETWORK_CONFIG: constants.AMPHORA_NETWORK_CONFIG}, requires=(constants.LOADBALANCER, constants.AMPHORAE_NETWORK_CONFIG))) return flows def _create_listeners_flow(self): flows = [] flows.append( database_tasks.ReloadLoadBalancer( name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER ) ) flows.append( network_tasks.CalculateDelta( requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), provides=constants.DELTAS ) ) flows.append( network_tasks.HandleNetworkDeltas( requires=constants.DELTAS, provides=constants.ADDED_PORTS ) ) flows.append( amphora_driver_tasks.AmphoraePostNetworkPlug( requires=(constants.LOADBALANCER, constants.ADDED_PORTS) ) ) flows.append( self.listener_flows.get_create_all_listeners_flow() ) flows.append( database_tasks.MarkLBActiveInDB( mark_subobjects=True, requires=constants.LOADBALANCER ) ) return flows def get_post_lb_amp_association_flow(self, prefix, topology, mark_active=True): """Reload the loadbalancer and create networking subflows for created/allocated amphorae. :return: Post amphorae association subflow """ sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW post_create_LB_flow = linear_flow.Flow(sf_name) post_create_LB_flow.add( database_tasks.ReloadLoadBalancer( name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) if topology == constants.TOPOLOGY_ACTIVE_STANDBY: post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE)) vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix) post_create_LB_flow.add(vrrp_subflow) post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) if mark_active: post_create_LB_flow.add(database_tasks.MarkLBActiveInDB( name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB, requires=constants.LOADBALANCER)) return post_create_LB_flow def _get_delete_listeners_flow(self, lb): """Sets up an internal delete flow Because task flow doesn't support loops we store each listener we want to delete in the store part and then rebind :param lb: load balancer :return: (flow, store) -- flow for the deletion and store with all the listeners stored properly """ listeners_delete_flow = unordered_flow.Flow('listener_delete_flow') store = {} for listener in lb.listeners: listener_name = 'listener_' + listener.id store[listener_name] = listener listeners_delete_flow.add( self.listener_flows.get_delete_listener_internal_flow( listener_name)) return (listeners_delete_flow, store) def get_delete_load_balancer_flow(self, lb): """Creates a flow to delete a load balancer. :returns: The flow for deleting a load balancer """ return self._get_delete_load_balancer_flow(lb, False) def _get_delete_pools_flow(self, lb): """Sets up an internal delete flow Because task flow doesn't support loops we store each pool we want to delete in the store part and then rebind :param lb: load balancer :return: (flow, store) -- flow for the deletion and store with all the listeners stored properly """ pools_delete_flow = unordered_flow.Flow('pool_delete_flow') store = {} for pool in lb.pools: pool_name = 'pool' + pool.id store[pool_name] = pool pools_delete_flow.add( self.pool_flows.get_delete_pool_flow_internal( pool_name)) return (pools_delete_flow, store) def _get_delete_load_balancer_flow(self, lb, cascade): store = {} delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW) delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( requires=constants.LOADBALANCER)) delete_LB_flow.add(compute_tasks.NovaServerGroupDelete( requires=constants.SERVER_GROUP_ID)) delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy( requires=constants.LOADBALANCER)) if cascade: (listeners_delete, store) = self._get_delete_listeners_flow(lb) (pools_delete, pool_store) = self._get_delete_pools_flow(lb) store.update(pool_store) delete_LB_flow.add(pools_delete) delete_LB_flow.add(listeners_delete) delete_LB_flow.add(network_tasks.UnplugVIP( requires=constants.LOADBALANCER)) delete_LB_flow.add(network_tasks.DeallocateVIP( requires=constants.LOADBALANCER)) delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.MarkLBDeletedInDB( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota( requires=constants.LOADBALANCER)) return (delete_LB_flow, store) def get_cascade_delete_load_balancer_flow(self, lb): """Creates a flow to delete a load balancer. :returns: The flow for deleting a load balancer """ return self._get_delete_load_balancer_flow(lb, True) def get_update_load_balancer_flow(self): """Creates a flow to update a load balancer. :returns: The flow for update a load balancer """ update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW) update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( requires=constants.LOADBALANCER)) update_LB_flow.add(network_tasks.ApplyQos( requires=(constants.LOADBALANCER, constants.UPDATE_DICT))) update_LB_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) update_LB_flow.add(database_tasks.MarkLBActiveInDB( requires=constants.LOADBALANCER)) return update_LB_flow def get_failover_LB_flow(self, amps, lb): """Failover a load balancer. 1. Validate the VIP port is correct and present. 2. Build a replacement amphora. 3. Delete the failed amphora. 4. Configure the replacement amphora listeners. 5. Configure VRRP for the listeners. 6. Build the second replacement amphora. 7. Delete the second failed amphora. 8. Delete any extraneous amphora. 9. Configure the listeners on the new amphorae. 10. Configure the VRRP on the new amphorae. 11. Reload the listener configurations to pick up VRRP changes. 12. Mark the load balancer back to ACTIVE. :returns: The flow that will provide the failover. """ # Pick one amphora to be failed over if any exist. failed_amp = None if amps: failed_amp = amps.pop() failover_LB_flow = linear_flow.Flow( constants.FAILOVER_LOADBALANCER_FLOW) # Revert LB to provisioning_status ERROR if this flow goes wrong failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( requires=constants.LOADBALANCER)) # Setup timeouts for our requests to the amphorae timeout_dict = { constants.CONN_MAX_RETRIES: CONF.haproxy_amphora.active_connection_max_retries, constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.active_connection_rety_interval} if failed_amp: if failed_amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP): amp_role = 'master_or_backup' elif failed_amp.role == constants.ROLE_STANDALONE: amp_role = 'standalone' elif failed_amp.role is None: amp_role = 'spare' else: amp_role = 'undefined' LOG.info("Performing failover for amphora: %s", {"id": failed_amp.id, "load_balancer_id": lb.id, "lb_network_ip": failed_amp.lb_network_ip, "compute_id": failed_amp.compute_id, "role": amp_role}) failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) # Check that the VIP port exists and is ok failover_LB_flow.add( network_tasks.AllocateVIPforFailover( requires=constants.LOADBALANCER, provides=constants.VIP)) # Update the database with the VIP information failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation( requires=(constants.LOADBALANCER_ID, constants.VIP), provides=constants.LOADBALANCER)) # Make sure the SG has the correct rules and re-apply to the # VIP port. It is not used on the VIP port, but will help lock # the SG as in use. failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup( requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID)) new_amp_role = constants.ROLE_STANDALONE if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: new_amp_role = constants.ROLE_BACKUP # Get a replacement amphora and plug all of the networking. # # Do this early as the compute services have been observed to be # unreliable. The community decided the chance that deleting first # would open resources for an instance is less likely than the compute # service failing to boot an instance for other reasons. if failed_amp: failed_vrrp_is_ipv6 = False if failed_amp.vrrp_ip: failed_vrrp_is_ipv6 = utils.is_ipv6(failed_amp.vrrp_ip) failover_LB_flow.add( self.amp_flows.get_amphora_for_lb_failover_subflow( prefix=constants.FAILOVER_LOADBALANCER_FLOW, role=new_amp_role, failed_amp_vrrp_port_id=failed_amp.vrrp_port_id, is_vrrp_ipv6=failed_vrrp_is_ipv6)) else: failover_LB_flow.add( self.amp_flows.get_amphora_for_lb_failover_subflow( prefix=constants.FAILOVER_LOADBALANCER_FLOW, role=new_amp_role)) if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB( name=constants.MARK_AMP_BACKUP_INDB, requires=constants.AMPHORA)) # Delete the failed amp if failed_amp: failover_LB_flow.add( self.amp_flows.get_delete_amphora_flow(failed_amp)) # Update the data stored in the flow from the database failover_LB_flow.add(database_tasks.ReloadLoadBalancer( requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) # Configure the listener(s) # We will run update on this amphora again later if this is # an active/standby load balancer because we want this amp # functional as soon as possible. It must run again to update # the configurations for the new peers. failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate( name=constants.AMP_LISTENER_UPDATE, requires=(constants.LOADBALANCER, constants.AMPHORA), inject={constants.TIMEOUT_DICT: timeout_dict})) # Bring up the new "backup" amphora VIP now to reduce the outage # on the final failover. This dropped the outage from 8-9 seconds # to less than one in my lab. # This does mean some steps have to be repeated later to reconfigure # for the second amphora as a peer. if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB( name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, requires=constants.LOADBALANCER_ID)) failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( name=(new_amp_role + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID), requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), provides=constants.FIRST_AMP_NETWORK_CONFIGS)) failover_LB_flow.add( amphora_driver_tasks.AmphoraUpdateVRRPInterface( name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF, requires=constants.AMPHORA, inject={constants.TIMEOUT_DICT: timeout_dict}, provides=constants.FIRST_AMP_VRRP_INTERFACE)) failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate( name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORA), rebind={constants.AMPHORAE_NETWORK_CONFIG: constants.FIRST_AMP_NETWORK_CONFIGS, constants.AMP_VRRP_INT: constants.FIRST_AMP_VRRP_INTERFACE}, inject={constants.TIMEOUT_DICT: timeout_dict})) failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart( name=new_amp_role + '-' + constants.AMP_VRRP_START, requires=constants.AMPHORA, inject={constants.TIMEOUT_DICT: timeout_dict})) # Start the listener. This needs to be done here because # it will create the required haproxy check scripts for # the VRRP deployed above. # A "V" or newer amphora-agent will remove the need for this # task here. # TODO(johnsom) Remove this in the "X" cycle failover_LB_flow.add(amphora_driver_tasks.ListenersStart( name=new_amp_role + '-' + constants.AMP_LISTENER_START, requires=(constants.LOADBALANCER, constants.AMPHORA))) # #### Work on standby amphora if needed ##### new_amp_role = constants.ROLE_MASTER failed_amp = None if amps: failed_amp = amps.pop() if failed_amp: if failed_amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP): amp_role = 'master_or_backup' elif failed_amp.role == constants.ROLE_STANDALONE: amp_role = 'standalone' elif failed_amp.role is None: amp_role = 'spare' else: amp_role = 'undefined' LOG.info("Performing failover for amphora: %s", {"id": failed_amp.id, "load_balancer_id": lb.id, "lb_network_ip": failed_amp.lb_network_ip, "compute_id": failed_amp.compute_id, "role": amp_role}) failover_LB_flow.add( database_tasks.MarkAmphoraPendingDeleteInDB( name=(new_amp_role + '-' + constants.MARK_AMPHORA_PENDING_DELETE), requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( name=(new_amp_role + '-' + constants.MARK_AMPHORA_HEALTH_BUSY), requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) # Get a replacement amphora and plug all of the networking. # # Do this early as the compute services have been observed to be # unreliable. The community decided the chance that deleting first # would open resources for an instance is less likely than the # compute service failing to boot an instance for other reasons. failover_LB_flow.add( self.amp_flows.get_amphora_for_lb_failover_subflow( prefix=(new_amp_role + '-' + constants.FAILOVER_LOADBALANCER_FLOW), role=new_amp_role)) failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB( name=constants.MARK_AMP_MASTER_INDB, requires=constants.AMPHORA)) # Delete the failed amp if failed_amp: failover_LB_flow.add( self.amp_flows.get_delete_amphora_flow( failed_amp)) failover_LB_flow.add( database_tasks.DisableAmphoraHealthMonitoring( name=(new_amp_role + '-' + constants.DISABLE_AMP_HEALTH_MONITORING), requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) # Remove any extraneous amphora # Note: This runs in all topology situations. # It should run before the act/stdby final listener update so # that we don't bother attempting to update dead amphorae. delete_extra_amps_flow = unordered_flow.Flow( constants.DELETE_EXTRA_AMPHORAE_FLOW) for amp in amps: LOG.debug('Found extraneous amphora %s on load balancer %s. ' 'Deleting.', amp.id, lb.id) delete_extra_amps_flow.add( self.amp_flows.get_delete_amphora_flow(amp)) failover_LB_flow.add(delete_extra_amps_flow) if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: # Update the data stored in the flow from the database failover_LB_flow.add(database_tasks.ReloadLoadBalancer( name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB, requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE)) # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. update_amps_subflow = unordered_flow.Flow( constants.UPDATE_AMPS_SUBFLOW) # Setup parallel flows for each amp. We don't know the new amp # details at flow creation time, so setup a subflow for each # amp on the LB, they let the task index into a list of amps # to find the amphora it should work on. update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-0-' + constants.AMP_LISTENER_UPDATE), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-1-' + constants.AMP_LISTENER_UPDATE), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) failover_LB_flow.add(update_amps_subflow) # Configure and enable keepalived in the amphora failover_LB_flow.add(self.amp_flows.get_vrrp_subflow( new_amp_role + '-' + constants.GET_VRRP_SUBFLOW, timeout_dict, create_vrrp_group=False)) # #### End of standby #### # Reload the listener. This needs to be done here because # it will create the required haproxy check scripts for # the VRRP deployed above. # A "V" or newer amphora-agent will remove the need for this # task here. # TODO(johnsom) Remove this in the "X" cycle failover_LB_flow.add( amphora_driver_tasks.AmphoraIndexListenersReload( name=(new_amp_role + '-' + constants.AMPHORA_RELOAD_LISTENER), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) # Remove any extraneous ports # Note: Nova sometimes fails to delete ports attached to an instance. # For example, if you create an LB with a listener, then # 'openstack server delete' the amphora, you will see the vrrp # port attached to that instance will remain after the instance # is deleted. # TODO(johnsom) Fix this as part of # https://storyboard.openstack.org/#!/story/2007077 # Mark LB ACTIVE failover_LB_flow.add( database_tasks.MarkLBActiveInDB(mark_subobjects=True, requires=constants.LOADBALANCER)) return failover_LB_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/member_flows.py0000664000175000017500000002304200000000000025030 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import model_tasks from octavia.controller.worker.v1.tasks import network_tasks class MemberFlows(object): def get_create_member_flow(self): """Create a flow to create a member :returns: The flow for creating a member """ create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB( requires=constants.MEMBER)) create_member_flow.add(network_tasks.CalculateDelta( requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), provides=constants.DELTAS)) create_member_flow.add(network_tasks.HandleNetworkDeltas( requires=constants.DELTAS, provides=constants.ADDED_PORTS)) create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( requires=(constants.LOADBALANCER, constants.ADDED_PORTS) )) create_member_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) create_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) create_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) create_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER, constants.LISTENERS))) return create_member_flow def get_delete_member_flow(self): """Create a flow to delete a member :returns: The flow for deleting a member """ delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB( requires=constants.MEMBER)) delete_member_flow.add(model_tasks. DeleteModelObject(rebind={constants.OBJECT: constants.MEMBER})) delete_member_flow.add(database_tasks.DeleteMemberInDB( requires=constants.MEMBER)) delete_member_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) delete_member_flow.add(database_tasks.DecrementMemberQuota( requires=constants.MEMBER)) delete_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) delete_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return delete_member_flow def get_update_member_flow(self): """Create a flow to update a member :returns: The flow for updating a member """ update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB( requires=constants.MEMBER)) update_member_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) update_member_flow.add(database_tasks.UpdateMemberInDB( requires=[constants.MEMBER, constants.UPDATE_DICT])) update_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) update_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) update_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return update_member_flow def get_batch_update_members_flow(self, old_members, new_members, updated_members): """Create a flow to batch update members :returns: The flow for batch updating members """ batch_update_members_flow = linear_flow.Flow( constants.BATCH_UPDATE_MEMBERS_FLOW) unordered_members_flow = unordered_flow.Flow( constants.UNORDERED_MEMBER_UPDATES_FLOW) unordered_members_active_flow = unordered_flow.Flow( constants.UNORDERED_MEMBER_ACTIVE_FLOW) # Delete old members unordered_members_flow.add( lifecycle_tasks.MembersToErrorOnRevertTask( inject={constants.MEMBERS: old_members}, name='{flow}-deleted'.format( flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) for m in old_members: unordered_members_flow.add( model_tasks.DeleteModelObject( inject={constants.OBJECT: m}, name='{flow}-{id}'.format( id=m.id, flow=constants.DELETE_MODEL_OBJECT_FLOW))) unordered_members_flow.add(database_tasks.DeleteMemberInDB( inject={constants.MEMBER: m}, name='{flow}-{id}'.format( id=m.id, flow=constants.DELETE_MEMBER_INDB))) unordered_members_flow.add(database_tasks.DecrementMemberQuota( inject={constants.MEMBER: m}, name='{flow}-{id}'.format( id=m.id, flow=constants.DECREMENT_MEMBER_QUOTA_FLOW))) # Create new members unordered_members_flow.add( lifecycle_tasks.MembersToErrorOnRevertTask( inject={constants.MEMBERS: new_members}, name='{flow}-created'.format( flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) for m in new_members: unordered_members_active_flow.add( database_tasks.MarkMemberActiveInDB( inject={constants.MEMBER: m}, name='{flow}-{id}'.format( id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB))) # Update existing members unordered_members_flow.add( lifecycle_tasks.MembersToErrorOnRevertTask( # updated_members is a list of (obj, dict), only pass `obj` inject={constants.MEMBERS: [m[0] for m in updated_members]}, name='{flow}-updated'.format( flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) for m, um in updated_members: um.pop('id', None) unordered_members_active_flow.add( database_tasks.MarkMemberActiveInDB( inject={constants.MEMBER: m}, name='{flow}-{id}'.format( id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB))) batch_update_members_flow.add(unordered_members_flow) # Done, do real updates batch_update_members_flow.add(network_tasks.CalculateDelta( requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), provides=constants.DELTAS)) batch_update_members_flow.add(network_tasks.HandleNetworkDeltas( requires=constants.DELTAS, provides=constants.ADDED_PORTS)) batch_update_members_flow.add( amphora_driver_tasks.AmphoraePostNetworkPlug( requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) # Update the Listener (this makes the changes active on the Amp) batch_update_members_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) # Mark all the members ACTIVE here, then pool then LB/Listeners batch_update_members_flow.add(unordered_members_active_flow) batch_update_members_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) batch_update_members_flow.add( database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER, constants.LISTENERS))) return batch_update_members_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/flows/pool_flows.py0000664000175000017500000001271300000000000024535 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.controller.worker.v1.tasks import database_tasks from octavia.controller.worker.v1.tasks import lifecycle_tasks from octavia.controller.worker.v1.tasks import model_tasks class PoolFlows(object): def get_create_pool_flow(self): """Create a flow to create a pool :returns: The flow for creating a pool """ create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW) create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( requires=[constants.POOL, constants.LISTENERS, constants.LOADBALANCER])) create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB( requires=constants.POOL)) create_pool_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) create_pool_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return create_pool_flow def get_delete_pool_flow(self): """Create a flow to delete a pool :returns: The flow for deleting a pool """ delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( requires=[constants.POOL, constants.LISTENERS, constants.LOADBALANCER])) delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( requires=constants.POOL)) delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( requires=constants.POOL, provides=constants.POOL_CHILD_COUNT)) delete_pool_flow.add(model_tasks.DeleteModelObject( rebind={constants.OBJECT: constants.POOL})) delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) delete_pool_flow.add(database_tasks.DeletePoolInDB( requires=constants.POOL)) delete_pool_flow.add(database_tasks.DecrementPoolQuota( requires=[constants.POOL, constants.POOL_CHILD_COUNT])) delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return delete_pool_flow def get_delete_pool_flow_internal(self, name): """Create a flow to delete a pool, etc. :returns: The flow for deleting a pool """ delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) # health monitor should cascade # members should cascade delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( name='mark_pool_pending_delete_in_db_' + name, requires=constants.POOL, rebind={constants.POOL: name})) delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( name='count_pool_children_for_quota_' + name, requires=constants.POOL, provides=constants.POOL_CHILD_COUNT, rebind={constants.POOL: name})) delete_pool_flow.add(model_tasks.DeleteModelObject( name='delete_model_object_' + name, rebind={constants.OBJECT: name})) delete_pool_flow.add(database_tasks.DeletePoolInDB( name='delete_pool_in_db_' + name, requires=constants.POOL, rebind={constants.POOL: name})) delete_pool_flow.add(database_tasks.DecrementPoolQuota( name='decrement_pool_quota_' + name, requires=[constants.POOL, constants.POOL_CHILD_COUNT], rebind={constants.POOL: name})) return delete_pool_flow def get_update_pool_flow(self): """Create a flow to update a pool :returns: The flow for updating a pool """ update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW) update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( requires=[constants.POOL, constants.LISTENERS, constants.LOADBALANCER])) update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB( requires=constants.POOL)) update_pool_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER)) update_pool_flow.add(database_tasks.UpdatePoolInDB( requires=[constants.POOL, constants.UPDATE_DICT])) update_pool_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) return update_pool_flow ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/worker/v1/tasks/0000775000175000017500000000000000000000000021767 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/__init__.py0000664000175000017500000000107400000000000024102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py0000664000175000017500000004577500000000000026572 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cryptography import fernet from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from taskflow import task from taskflow.types import failure from octavia.amphorae.backends.agent import agent_jinja_cfg from octavia.amphorae.driver_exceptions import exceptions as driver_except from octavia.common import constants from octavia.common import utils from octavia.controller.worker import task_utils as task_utilities from octavia.db import api as db_apis from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseAmphoraTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseAmphoraTask, self).__init__(**kwargs) self.amphora_driver = stevedore_driver.DriverManager( namespace='octavia.amphora.drivers', name=CONF.controller_worker.amphora_driver, invoke_on_load=True ).driver self.amphora_repo = repo.AmphoraRepository() self.listener_repo = repo.ListenerRepository() self.loadbalancer_repo = repo.LoadBalancerRepository() self.task_utils = task_utilities.TaskUtils() class AmpListenersUpdate(BaseAmphoraTask): """Task to update the listeners on one amphora.""" def execute(self, loadbalancer, amphora, timeout_dict=None): # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. try: # Make sure we have a fresh load balancer object loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer.id) self.amphora_driver.update_amphora_listeners( loadbalancer, amphora, timeout_dict) except Exception as e: LOG.error('Failed to update listeners on amphora %s. Skipping ' 'this amphora as it is failing to update due to: %s', amphora.id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.ERROR) class AmphoraIndexListenerUpdate(BaseAmphoraTask): """Task to update the listeners on one amphora.""" def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=None): # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. try: # Make sure we have a fresh load balancer object loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer.id) self.amphora_driver.update_amphora_listeners( loadbalancer, amphorae[amphora_index], timeout_dict) except Exception as e: amphora_id = amphorae[amphora_index].id LOG.error('Failed to update listeners on amphora %s. Skipping ' 'this amphora as it is failing to update due to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) class ListenersUpdate(BaseAmphoraTask): """Task to update amphora with all specified listeners' configurations.""" def execute(self, loadbalancer): """Execute updates per listener for an amphora.""" self.amphora_driver.update(loadbalancer) def revert(self, loadbalancer, *args, **kwargs): """Handle failed listeners updates.""" LOG.warning("Reverting listeners updates.") for listener in loadbalancer.listeners: self.task_utils.mark_listener_prov_status_error(listener.id) class ListenersStart(BaseAmphoraTask): """Task to start all listeners on the vip.""" def execute(self, loadbalancer, amphora=None): """Execute listener start routines for listeners on an amphora.""" if loadbalancer.listeners: self.amphora_driver.start(loadbalancer, amphora) LOG.debug("Started the listeners on the vip") def revert(self, loadbalancer, *args, **kwargs): """Handle failed listeners starts.""" LOG.warning("Reverting listeners starts.") for listener in loadbalancer.listeners: self.task_utils.mark_listener_prov_status_error(listener.id) class AmphoraIndexListenersReload(BaseAmphoraTask): """Task to reload all listeners on an amphora.""" def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=None): """Execute listener reload routines for listeners on an amphora.""" if loadbalancer.listeners: try: self.amphora_driver.reload( loadbalancer, amphorae[amphora_index], timeout_dict) except Exception as e: amphora_id = amphorae[amphora_index].id LOG.warning('Failed to reload listeners on amphora %s. ' 'Skipping this amphora as it is failing to ' 'reload due to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) class ListenerDelete(BaseAmphoraTask): """Task to delete the listener on the vip.""" def execute(self, listener): """Execute listener delete routines for an amphora.""" # TODO(rm_work): This is only relevant because of UDP listeners now. self.amphora_driver.delete(listener) LOG.debug("Deleted the listener on the vip") def revert(self, listener, *args, **kwargs): """Handle a failed listener delete.""" LOG.warning("Reverting listener delete.") self.task_utils.mark_listener_prov_status_error(listener.id) class AmphoraGetInfo(BaseAmphoraTask): """Task to get information on an amphora.""" def execute(self, amphora): """Execute get_info routine for an amphora.""" self.amphora_driver.get_info(amphora) class AmphoraGetDiagnostics(BaseAmphoraTask): """Task to get diagnostics on the amphora and the loadbalancers.""" def execute(self, amphora): """Execute get_diagnostic routine for an amphora.""" self.amphora_driver.get_diagnostics(amphora) class AmphoraFinalize(BaseAmphoraTask): """Task to finalize the amphora before any listeners are configured.""" def execute(self, amphora): """Execute finalize_amphora routine.""" self.amphora_driver.finalize_amphora(amphora) LOG.debug("Finalized the amphora.") def revert(self, result, amphora, *args, **kwargs): """Handle a failed amphora finalize.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting amphora finalize.") self.task_utils.mark_amphora_status_error(amphora.id) class AmphoraPostNetworkPlug(BaseAmphoraTask): """Task to notify the amphora post network plug.""" def execute(self, amphora, ports): """Execute post_network_plug routine.""" for port in ports: self.amphora_driver.post_network_plug(amphora, port) LOG.debug("post_network_plug called on compute instance " "%(compute_id)s for port %(port_id)s", {"compute_id": amphora.compute_id, "port_id": port.id}) def revert(self, result, amphora, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting post network plug.") self.task_utils.mark_amphora_status_error(amphora.id) class AmphoraePostNetworkPlug(BaseAmphoraTask): """Task to notify the amphorae post network plug.""" def execute(self, loadbalancer, added_ports): """Execute post_network_plug routine.""" amp_post_plug = AmphoraPostNetworkPlug() # We need to make sure we have the fresh list of amphora amphorae = self.amphora_repo.get_all( db_apis.get_session(), load_balancer_id=loadbalancer.id, status=constants.AMPHORA_ALLOCATED)[0] for amphora in amphorae: if amphora.id in added_ports: amp_post_plug.execute(amphora, added_ports[amphora.id]) def revert(self, result, loadbalancer, added_ports, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting post network plug.") amphorae = self.amphora_repo.get_all( db_apis.get_session(), load_balancer_id=loadbalancer.id, status=constants.AMPHORA_ALLOCATED)[0] for amphora in amphorae: self.task_utils.mark_amphora_status_error(amphora.id) class AmphoraPostVIPPlug(BaseAmphoraTask): """Task to notify the amphora post VIP plug.""" def execute(self, amphora, loadbalancer, amphorae_network_config): """Execute post_vip_routine.""" self.amphora_driver.post_vip_plug( amphora, loadbalancer, amphorae_network_config) LOG.debug("Notified amphora of vip plug") def revert(self, result, amphora, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting post vip plug.") self.task_utils.mark_amphora_status_error(amphora.id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) class AmphoraePostVIPPlug(BaseAmphoraTask): """Task to notify the amphorae post VIP plug.""" def execute(self, loadbalancer, amphorae_network_config): """Execute post_vip_plug across the amphorae.""" amp_post_vip_plug = AmphoraPostVIPPlug() for amphora in loadbalancer.amphorae: amp_post_vip_plug.execute(amphora, loadbalancer, amphorae_network_config) def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting amphorae post vip plug.") self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) class AmphoraCertUpload(BaseAmphoraTask): """Upload a certificate to the amphora.""" def execute(self, amphora, server_pem): """Execute cert_update_amphora routine.""" LOG.debug("Upload cert in amphora REST driver") key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) self.amphora_driver.upload_cert_amp(amphora, fer.decrypt(server_pem)) class AmphoraUpdateVRRPInterface(BaseAmphoraTask): """Task to get and update the VRRP interface device name from amphora.""" def execute(self, amphora, timeout_dict=None): try: interface = self.amphora_driver.get_interface_from_ip( amphora, amphora.vrrp_ip, timeout_dict=timeout_dict) except Exception as e: # This can occur when an active/standby LB has no listener LOG.error('Failed to get amphora VRRP interface on amphora ' '%s. Skipping this amphora as it is failing due to: ' '%s', amphora.id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.ERROR) return None self.amphora_repo.update(db_apis.get_session(), amphora.id, vrrp_interface=interface) return interface class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): """Task to get and update the VRRP interface device name from amphora.""" def execute(self, amphora_index, amphorae, timeout_dict=None): amphora_id = amphorae[amphora_index].id try: interface = self.amphora_driver.get_interface_from_ip( amphorae[amphora_index], amphorae[amphora_index].vrrp_ip, timeout_dict=timeout_dict) except Exception as e: # This can occur when an active/standby LB has no listener LOG.error('Failed to get amphora VRRP interface on amphora ' '%s. Skipping this amphora as it is failing due to: ' '%s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) return None self.amphora_repo.update(db_apis.get_session(), amphora_id, vrrp_interface=interface) return interface class AmphoraVRRPUpdate(BaseAmphoraTask): """Task to update the VRRP configuration of an amphora.""" def execute(self, loadbalancer_id, amphorae_network_config, amphora, amp_vrrp_int, timeout_dict=None): """Execute update_vrrp_conf.""" loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. amphora.vrrp_interface = amp_vrrp_int try: self.amphora_driver.update_vrrp_conf( loadbalancer, amphorae_network_config, amphora, timeout_dict) except Exception as e: LOG.error('Failed to update VRRP configuration amphora %s. ' 'Skipping this amphora as it is failing to update due ' 'to: %s', amphora.id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.ERROR) LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora.id) class AmphoraIndexVRRPUpdate(BaseAmphoraTask): """Task to update the VRRP configuration of an amphora.""" def execute(self, loadbalancer_id, amphorae_network_config, amphora_index, amphorae, amp_vrrp_int, timeout_dict=None): """Execute update_vrrp_conf.""" loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. amphora_id = amphorae[amphora_index].id amphorae[amphora_index].vrrp_interface = amp_vrrp_int try: self.amphora_driver.update_vrrp_conf( loadbalancer, amphorae_network_config, amphorae[amphora_index], timeout_dict) except Exception as e: LOG.error('Failed to update VRRP configuration amphora %s. ' 'Skipping this amphora as it is failing to update due ' 'to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) return LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) class AmphoraVRRPStart(BaseAmphoraTask): """Task to start keepalived on an amphora. This will reload keepalived if it is already running. """ def execute(self, amphora, timeout_dict=None): self.amphora_driver.start_vrrp_service(amphora, timeout_dict) LOG.debug("Started VRRP on amphora %s.", amphora.id) class AmphoraIndexVRRPStart(BaseAmphoraTask): """Task to start keepalived on an amphora. This will reload keepalived if it is already running. """ def execute(self, amphora_index, amphorae, timeout_dict=None): amphora_id = amphorae[amphora_index].id try: self.amphora_driver.start_vrrp_service(amphorae[amphora_index], timeout_dict) except Exception as e: LOG.error('Failed to start VRRP on amphora %s. ' 'Skipping this amphora as it is failing to start due ' 'to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) return LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index].id) class AmphoraComputeConnectivityWait(BaseAmphoraTask): """Task to wait for the compute instance to be up.""" def execute(self, amphora): """Execute get_info routine for an amphora until it responds.""" try: amp_info = self.amphora_driver.get_info(amphora) LOG.debug('Successfuly connected to amphora %s: %s', amphora.id, amp_info) except driver_except.TimeOutException: LOG.error("Amphora compute instance failed to become reachable. " "This either means the compute driver failed to fully " "boot the instance inside the timeout interval or the " "instance is not reachable via the lb-mgmt-net.") self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.ERROR) raise class AmphoraConfigUpdate(BaseAmphoraTask): """Task to push a new amphora agent configuration to the amphroa.""" def execute(self, amphora, flavor): # Extract any flavor based settings if flavor: topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, CONF.controller_worker.loadbalancer_topology) else: topology = CONF.controller_worker.loadbalancer_topology # Build the amphora agent config agent_cfg_tmpl = agent_jinja_cfg.AgentJinjaTemplater() agent_config = agent_cfg_tmpl.build_agent_config(amphora.id, topology) # Push the new configuration to the amphroa try: self.amphora_driver.update_amphora_agent_config(amphora, agent_config) except driver_except.AmpDriverNotImplementedError: LOG.error('Amphora %s does not support agent configuration ' 'update. Please update the amphora image for this ' 'amphora. Skipping.', amphora.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/cert_task.py0000664000175000017500000000317500000000000024326 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cryptography import fernet from oslo_config import cfg from stevedore import driver as stevedore_driver from taskflow import task from octavia.common import utils CONF = cfg.CONF class BaseCertTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseCertTask, self).__init__(**kwargs) self.cert_generator = stevedore_driver.DriverManager( namespace='octavia.cert_generator', name=CONF.certificates.cert_generator, invoke_on_load=True, ).driver class GenerateServerPEMTask(BaseCertTask): """Create the server certs for the agent comm Use the amphora_id for the CN """ def execute(self, amphora_id): cert = self.cert_generator.generate_cert_key_pair( cn=amphora_id, validity=CONF.certificates.cert_validity_time) key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) return fer.encrypt(cert.certificate + cert.private_key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/compute_tasks.py0000664000175000017500000003245100000000000025227 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from cryptography import fernet from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from taskflow import task from taskflow.types import failure import tenacity from octavia.amphorae.backends.agent import agent_jinja_cfg from octavia.common import constants from octavia.common import exceptions from octavia.common.jinja.logging import logging_jinja_cfg from octavia.common.jinja import user_data_jinja_cfg from octavia.common import utils from octavia.controller.worker import amphora_rate_limit CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseComputeTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseComputeTask, self).__init__(**kwargs) self.compute = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit() class ComputeCreate(BaseComputeTask): """Create the compute instance for a new amphora.""" def execute(self, amphora_id, server_group_id, config_drive_files=None, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, ports=None, flavor=None, availability_zone=None): """Create an amphora :returns: an amphora """ ports = ports or [] network_ids = CONF.controller_worker.amp_boot_network_list[:] config_drive_files = config_drive_files or {} user_data = None LOG.debug("Compute create execute for amphora with id %s", amphora_id) user_data_config_drive = CONF.controller_worker.user_data_config_drive key_name = CONF.controller_worker.amp_ssh_key_name # Apply an Octavia flavor customizations if flavor: topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, CONF.controller_worker.loadbalancer_topology) amp_compute_flavor = flavor.get( constants.COMPUTE_FLAVOR, CONF.controller_worker.amp_flavor_id) else: topology = CONF.controller_worker.loadbalancer_topology amp_compute_flavor = CONF.controller_worker.amp_flavor_id if availability_zone: amp_availability_zone = availability_zone.get( constants.COMPUTE_ZONE) amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) if amp_network: network_ids = [amp_network] else: amp_availability_zone = None try: if CONF.haproxy_amphora.build_rate_limit != -1: self.rate_limit.add_to_build_request_queue( amphora_id, build_type_priority) agent_cfg = agent_jinja_cfg.AgentJinjaTemplater() config_drive_files['/etc/octavia/amphora-agent.conf'] = ( agent_cfg.build_agent_config(amphora_id, topology)) logging_cfg = logging_jinja_cfg.LoggingJinjaTemplater( CONF.amphora_agent.logging_template_override) config_drive_files['/etc/rsyslog.d/10-rsyslog.conf'] = ( logging_cfg.build_logging_config()) if user_data_config_drive: udtemplater = user_data_jinja_cfg.UserDataJinjaCfg() user_data = udtemplater.build_user_data_config( config_drive_files) config_drive_files = None compute_id = self.compute.build( name="amphora-" + amphora_id, amphora_flavor=amp_compute_flavor, image_id=CONF.controller_worker.amp_image_id, image_tag=CONF.controller_worker.amp_image_tag, image_owner=CONF.controller_worker.amp_image_owner_id, key_name=key_name, sec_groups=CONF.controller_worker.amp_secgroup_list, network_ids=network_ids, port_ids=[port.id for port in ports], config_drive_files=config_drive_files, user_data=user_data, server_group_id=server_group_id, availability_zone=amp_availability_zone) LOG.debug("Server created with id: %s for amphora id: %s", compute_id, amphora_id) return compute_id except Exception: LOG.exception("Compute create for amphora id: %s failed", amphora_id) raise def revert(self, result, amphora_id, *args, **kwargs): """This method will revert the creation of the amphora. So it will just delete it in this flow """ if isinstance(result, failure.Failure): return compute_id = result LOG.warning("Reverting compute create for amphora with id " "%(amp)s and compute id: %(comp)s", {'amp': amphora_id, 'comp': compute_id}) try: self.compute.delete(compute_id) except Exception: LOG.exception("Reverting compute create failed") class CertComputeCreate(ComputeCreate): def execute(self, amphora_id, server_pem, server_group_id, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, ports=None, flavor=None, availability_zone=None): """Create an amphora :returns: an amphora """ # load client certificate with open(CONF.controller_worker.client_ca, 'r') as client_ca: ca = client_ca.read() key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) config_drive_files = { '/etc/octavia/certs/server.pem': fer.decrypt(server_pem), '/etc/octavia/certs/client_ca.pem': ca} return super(CertComputeCreate, self).execute( amphora_id, config_drive_files=config_drive_files, build_type_priority=build_type_priority, server_group_id=server_group_id, ports=ports, flavor=flavor, availability_zone=availability_zone) class DeleteAmphoraeOnLoadBalancer(BaseComputeTask): """Delete the amphorae on a load balancer. Iterate through amphorae, deleting them """ def execute(self, loadbalancer): for amp in loadbalancer.amphorae: # The compute driver will already handle NotFound try: self.compute.delete(amp.compute_id) except Exception: LOG.exception("Compute delete for amphora id: %s failed", amp.id) raise class ComputeDelete(BaseComputeTask): @tenacity.retry(retry=tenacity.retry_if_exception_type(), stop=tenacity.stop_after_attempt(CONF.compute.max_retries), wait=tenacity.wait_exponential( multiplier=CONF.compute.retry_backoff, min=CONF.compute.retry_interval, max=CONF.compute.retry_max), reraise=True) def execute(self, amphora, passive_failure=False): if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: LOG.debug('Compute delete execute for amphora with ID %s and ' 'compute ID: %s', amphora.id, amphora.compute_id) else: LOG.warning('Retrying compute delete of %s attempt %s of %s.', amphora.compute_id, self.execute.retry.statistics[ constants.ATTEMPT_NUMBER], self.execute.retry.stop.max_attempt_number) # Let the Taskflow engine know we are working and alive # Don't use get with a default for 'attempt_number', we need to fail # if that number is missing. self.update_progress( self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / self.execute.retry.stop.max_attempt_number) try: self.compute.delete(amphora.compute_id) except Exception: if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != self.execute.retry.stop.max_attempt_number): LOG.warning('Compute delete for amphora id: %s failed. ' 'Retrying.', amphora.id) raise if passive_failure: LOG.exception('Compute delete for compute ID: %s on amphora ' 'ID: %s failed. This resource will be abandoned ' 'and should manually be cleaned up once the ' 'compute service is functional.', amphora.compute_id, amphora.id) else: LOG.exception('Compute delete for compute ID: %s on amphora ' 'ID: %s failed. The compute service has failed. ' 'Aborting and reverting.', amphora.compute_id, amphora.id) raise class ComputeActiveWait(BaseComputeTask): """Wait for the compute driver to mark the amphora active.""" def execute(self, compute_id, amphora_id, availability_zone): """Wait for the compute driver to mark the amphora active :raises: Generic exception if the amphora is not active :returns: An amphora object """ if availability_zone: amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) else: amp_network = None for i in range(CONF.controller_worker.amp_active_retries): amp, fault = self.compute.get_amphora(compute_id, amp_network) if amp.status == constants.ACTIVE: if CONF.haproxy_amphora.build_rate_limit != -1: self.rate_limit.remove_from_build_req_queue(amphora_id) return amp if amp.status == constants.ERROR: raise exceptions.ComputeBuildException(fault=fault) time.sleep(CONF.controller_worker.amp_active_wait_sec) raise exceptions.ComputeWaitTimeoutException(id=compute_id) class NovaServerGroupCreate(BaseComputeTask): def execute(self, loadbalancer_id): """Create a server group by nova client api :param loadbalancer_id: will be used for server group's name :param policy: will used for server group's policy :raises: Generic exception if the server group is not created :returns: server group's id """ name = 'octavia-lb-' + loadbalancer_id server_group = self.compute.create_server_group( name, CONF.nova.anti_affinity_policy) LOG.debug("Server Group created with id: %s for load balancer id: " "%s", server_group.id, loadbalancer_id) return server_group.id def revert(self, result, *args, **kwargs): """This method will revert the creation of the :param result: here it refers to server group id """ server_group_id = result LOG.warning("Reverting server group create with id:%s", server_group_id) try: self.compute.delete_server_group(server_group_id) except Exception as e: LOG.error("Failed to delete server group. Resources may " "still be in use for server group: %(sg)s due to " "error: %(except)s", {'sg': server_group_id, 'except': str(e)}) class NovaServerGroupDelete(BaseComputeTask): def execute(self, server_group_id): if server_group_id is not None: self.compute.delete_server_group(server_group_id) else: return class AttachPort(BaseComputeTask): def execute(self, amphora, port): """Attach a port to an amphora instance. :param amphora: The amphora to attach the port to. :param port: The port to attach to the amphora. :returns: None """ LOG.debug('Attaching port: %s to compute: %s', port.id, amphora.compute_id) self.compute.attach_network_or_port(amphora.compute_id, port_id=port.id) def revert(self, amphora, port, *args, **kwargs): """Revert our port attach. :param amphora: The amphora to detach the port from. :param port: The port to attach to the amphora. """ LOG.warning('Reverting port: %s attach to compute: %s', port.id, amphora.compute_id) try: self.compute.detach_port(amphora.compute_id, port.id) except Exception as e: LOG.error('Failed to detach port %s from compute %s for revert ' 'due to %s.', port.id, amphora.compute_id, str(e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/database_tasks.py0000664000175000017500000030000400000000000025307 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cryptography import fernet from oslo_config import cfg from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils import sqlalchemy from sqlalchemy.orm import exc from taskflow import task from taskflow.types import failure from octavia.common import constants from octavia.common import data_models import octavia.common.tls_utils.cert_parser as cert_parser from octavia.common import utils from octavia.common import validate from octavia.controller.worker import task_utils as task_utilities from octavia.db import api as db_apis from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseDatabaseTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): self.repos = repo.Repositories() self.amphora_repo = repo.AmphoraRepository() self.health_mon_repo = repo.HealthMonitorRepository() self.listener_repo = repo.ListenerRepository() self.loadbalancer_repo = repo.LoadBalancerRepository() self.vip_repo = repo.VipRepository() self.member_repo = repo.MemberRepository() self.pool_repo = repo.PoolRepository() self.amp_health_repo = repo.AmphoraHealthRepository() self.l7policy_repo = repo.L7PolicyRepository() self.l7rule_repo = repo.L7RuleRepository() self.task_utils = task_utilities.TaskUtils() super(BaseDatabaseTask, self).__init__(**kwargs) def _delete_from_amp_health(self, amphora_id): """Delete the amphora_health record for an amphora. :param amphora_id: The amphora id to delete """ LOG.debug('Disabling health monitoring on amphora: %s', amphora_id) try: self.amp_health_repo.delete(db_apis.get_session(), amphora_id=amphora_id) except (sqlalchemy.orm.exc.NoResultFound, sqlalchemy.orm.exc.UnmappedInstanceError): LOG.debug('No existing amphora health record to delete ' 'for amphora: %s, skipping.', amphora_id) def _mark_amp_health_busy(self, amphora_id): """Mark the amphora_health record busy for an amphora. :param amphora_id: The amphora id to mark busy """ LOG.debug('Marking health monitoring busy on amphora: %s', amphora_id) try: self.amp_health_repo.update(db_apis.get_session(), amphora_id=amphora_id, busy=True) except (sqlalchemy.orm.exc.NoResultFound, sqlalchemy.orm.exc.UnmappedInstanceError): LOG.debug('No existing amphora health record to mark busy ' 'for amphora: %s, skipping.', amphora_id) class CreateAmphoraInDB(BaseDatabaseTask): """Task to create an initial amphora in the Database.""" def execute(self, *args, loadbalancer_id=None, **kwargs): """Creates an pending create amphora record in the database. :returns: The created amphora object """ amphora = self.amphora_repo.create(db_apis.get_session(), id=uuidutils.generate_uuid(), load_balancer_id=loadbalancer_id, status=constants.PENDING_CREATE, cert_busy=False) LOG.info("Created Amphora in DB with id %s", amphora.id) return amphora.id def revert(self, result, *args, **kwargs): """Revert by storing the amphora in error state in the DB In a future version we might change the status to DELETED if deleting the amphora was successful :param result: Id of created amphora. :returns: None """ if isinstance(result, failure.Failure): # This task's execute failed, so nothing needed to be done to # revert return # At this point the revert is being called because another task # executed after this failed so we will need to do something and # result is the amphora's id LOG.warning("Reverting create amphora in DB for amp id %s ", result) # Delete the amphora for now. May want to just update status later try: self.amphora_repo.delete(db_apis.get_session(), id=result) except Exception as e: LOG.error("Failed to delete amphora %(amp)s " "in the database due to: " "%(except)s", {'amp': result, 'except': str(e)}) class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask): """Task to mark a list of amphora deleted in the Database.""" def execute(self, loadbalancer): """Update load balancer's amphorae statuses to DELETED in the database. :param loadbalancer: The load balancer which amphorae should be marked DELETED. :returns: None """ for amp in loadbalancer.amphorae: LOG.debug("Marking amphora %s DELETED ", amp.id) self.amphora_repo.update(db_apis.get_session(), id=amp.id, status=constants.DELETED) class DeleteHealthMonitorInDB(BaseDatabaseTask): """Delete the health monitor in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Delete the health monitor in DB :param health_mon: The health monitor which should be deleted :returns: None """ LOG.debug("DB delete health monitor: %s ", health_mon.id) try: self.health_mon_repo.delete(db_apis.get_session(), id=health_mon.id) except exc.NoResultFound: # ignore if the HealthMonitor was not found pass def revert(self, health_mon, *args, **kwargs): """Mark the health monitor ERROR since the mark active couldn't happen :param health_mon: The health monitor which couldn't be deleted :returns: None """ LOG.warning("Reverting mark health monitor delete in DB " "for health monitor with id %s", health_mon.id) self.health_mon_repo.update(db_apis.get_session(), id=health_mon.id, provisioning_status=constants.ERROR) class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB): """Delete the health monitor in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool): """Delete the health monitor in the DB. :param pool: A pool which health monitor should be deleted. :returns: None """ super(DeleteHealthMonitorInDBByPool, self).execute( pool.health_monitor) def revert(self, pool, *args, **kwargs): """Mark the health monitor ERROR since the mark active couldn't happen :param pool: A pool which health monitor couldn't be deleted :returns: None """ super(DeleteHealthMonitorInDBByPool, self).revert( pool.health_monitor, *args, **kwargs) class DeleteMemberInDB(BaseDatabaseTask): """Delete the member in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Delete the member in the DB :param member: The member to be deleted :returns: None """ LOG.debug("DB delete member for id: %s ", member.id) self.member_repo.delete(db_apis.get_session(), id=member.id) def revert(self, member, *args, **kwargs): """Mark the member ERROR since the delete couldn't happen :param member: Member that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for member id %s", member.id) try: self.member_repo.update(db_apis.get_session(), member.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update member %(mem)s " "provisioning_status to ERROR due to: %(except)s", {'mem': member.id, 'except': str(e)}) class DeleteListenerInDB(BaseDatabaseTask): """Delete the listener in the DB.""" def execute(self, listener): """Delete the listener in DB :param listener: The listener to delete :returns: None """ LOG.debug("Delete in DB for listener id: %s", listener.id) self.listener_repo.delete(db_apis.get_session(), id=listener.id) def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the listener didn't delete :param listener: Listener that failed to get deleted :returns: None """ LOG.warning("Reverting mark listener delete in DB for listener id %s", listener.id) class DeletePoolInDB(BaseDatabaseTask): """Delete the pool in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool): """Delete the pool in DB :param pool: The pool to be deleted :returns: None """ LOG.debug("Delete in DB for pool id: %s ", pool.id) self.pool_repo.delete(db_apis.get_session(), id=pool.id) def revert(self, pool, *args, **kwargs): """Mark the pool ERROR since the delete couldn't happen :param pool: Pool that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for pool id %s", pool.id) try: self.pool_repo.update(db_apis.get_session(), pool.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update pool %(pool)s " "provisioning_status to ERROR due to: %(except)s", {'pool': pool.id, 'except': str(e)}) class DeleteL7PolicyInDB(BaseDatabaseTask): """Delete the L7 policy in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Delete the l7policy in DB :param l7policy: The l7policy to be deleted :returns: None """ LOG.debug("Delete in DB for l7policy id: %s ", l7policy.id) self.l7policy_repo.delete(db_apis.get_session(), id=l7policy.id) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy ERROR since the delete couldn't happen :param l7policy: L7 policy that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for l7policy id %s", l7policy.id) try: self.l7policy_repo.update(db_apis.get_session(), l7policy.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7policy %(l7policy)s " "provisioning_status to ERROR due to: %(except)s", {'l7policy': l7policy.id, 'except': str(e)}) class DeleteL7RuleInDB(BaseDatabaseTask): """Delete the L7 rule in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Delete the l7rule in DB :param l7rule: The l7rule to be deleted :returns: None """ LOG.debug("Delete in DB for l7rule id: %s ", l7rule.id) self.l7rule_repo.delete(db_apis.get_session(), id=l7rule.id) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule ERROR since the delete couldn't happen :param l7rule: L7 rule that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for l7rule id %s", l7rule.id) try: self.l7rule_repo.update(db_apis.get_session(), l7rule.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7rule %(l7rule)s " "provisioning_status to ERROR due to: %(except)s", {'l7rule': l7rule.id, 'except': str(e)}) class ReloadAmphora(BaseDatabaseTask): """Get an amphora object from the database.""" def execute(self, amphora_id): """Get an amphora object from the database. :param amphora_id: The amphora ID to lookup :returns: The amphora object """ LOG.debug("Get amphora from DB for amphora id: %s ", amphora_id) return self.amphora_repo.get(db_apis.get_session(), id=amphora_id) class ReloadLoadBalancer(BaseDatabaseTask): """Get an load balancer object from the database.""" def execute(self, loadbalancer_id, *args, **kwargs): """Get an load balancer object from the database. :param loadbalancer_id: The load balancer ID to lookup :returns: The load balancer object """ LOG.debug("Get load balancer from DB for load balancer id: %s ", loadbalancer_id) return self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) class UpdateVIPAfterAllocation(BaseDatabaseTask): """Update a VIP associated with a given load balancer.""" def execute(self, loadbalancer_id, vip): """Update a VIP associated with a given load balancer. :param loadbalancer_id: Id of a load balancer which VIP should be updated. :param vip: data_models.Vip object with update data. :returns: The load balancer object. """ self.repos.vip.update(db_apis.get_session(), loadbalancer_id, port_id=vip.port_id, subnet_id=vip.subnet_id, ip_address=vip.ip_address) return self.repos.load_balancer.get(db_apis.get_session(), id=loadbalancer_id) class UpdateAmphoraeVIPData(BaseDatabaseTask): """Update amphorae VIP data.""" def execute(self, amps_data): """Update amphorae VIP data. :param amps_data: Amphorae update dicts. :returns: None """ for amp_data in amps_data: self.repos.amphora.update(db_apis.get_session(), amp_data.id, vrrp_ip=amp_data.vrrp_ip, ha_ip=amp_data.ha_ip, vrrp_port_id=amp_data.vrrp_port_id, ha_port_id=amp_data.ha_port_id, vrrp_id=1) class UpdateAmphoraVIPData(BaseDatabaseTask): """Update amphorae VIP data.""" def execute(self, amp_data): """Update amphorae VIP data. :param amps_data: Amphorae update dicts. :returns: None """ self.repos.amphora.update(db_apis.get_session(), amp_data.id, vrrp_ip=amp_data.vrrp_ip, ha_ip=amp_data.ha_ip, vrrp_port_id=amp_data.vrrp_port_id, ha_port_id=amp_data.ha_port_id, vrrp_id=1) class UpdateAmpFailoverDetails(BaseDatabaseTask): """Update amphora failover details in the database.""" def execute(self, amphora, vip, base_port): """Update amphora failover details in the database. :param amphora: The amphora to update :param vip: The VIP object associated with this amphora. :param base_port: The base port object associated with the amphora. :returns: None """ # role and vrrp_priority will be updated later. self.repos.amphora.update(db_apis.get_session(), amphora.id, vrrp_ip=base_port.fixed_ips[0].ip_address, ha_ip=vip.ip_address, vrrp_port_id=base_port.id, ha_port_id=vip.port_id, vrrp_id=1) class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): """Associate failover amphora with loadbalancer in the database.""" def execute(self, amphora_id, loadbalancer_id): """Associate failover amphora with loadbalancer in the database. :param amphora_id: Id of an amphora to update :param loadbalancer_id: Id of a load balancer to be associated with a given amphora. :returns: None """ self.repos.amphora.associate(db_apis.get_session(), load_balancer_id=loadbalancer_id, amphora_id=amphora_id) def revert(self, amphora_id, *args, **kwargs): """Remove amphora-load balancer association. :param amphora_id: Id of an amphora that couldn't be associated with a load balancer. :returns: None """ try: self.repos.amphora.update(db_apis.get_session(), amphora_id, loadbalancer_id=None) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "load balancer id to None due to: " "%(except)s", {'amp': amphora_id, 'except': str(e)}) class MapLoadbalancerToAmphora(BaseDatabaseTask): """Maps and assigns a load balancer to an amphora in the database.""" def execute(self, loadbalancer_id, server_group_id=None, flavor=None, availability_zone=None): """Allocates an Amphora for the load balancer in the database. :param loadbalancer_id: The load balancer id to map to an amphora :returns: Amphora ID if one was allocated, None if it was unable to allocate an Amphora """ LOG.debug("Allocating an Amphora for load balancer with id %s", loadbalancer_id) if server_group_id is not None: LOG.debug("Load balancer is using anti-affinity. Skipping spares " "pool allocation.") return None # Validate the flavor is spares compatible if not validate.is_flavor_spares_compatible(flavor): LOG.debug("Load balancer has a flavor that is not compatible with " "using spares pool amphora. Skipping spares pool " "allocation.") return None if availability_zone: amp_az = availability_zone.get(constants.COMPUTE_ZONE) else: amp_az = CONF.nova.availability_zone try: amp = self.amphora_repo.allocate_and_associate( db_apis.get_session(), loadbalancer_id, amp_az) except Exception as e: LOG.error("Failed to get a spare amphora (AZ: %(amp_az)s) for " "loadbalancer %(lb_id)s due to: %(except)s", {'amp_az': amp_az, 'lb_id': loadbalancer_id, 'except': str(e)}) return None if amp is None: LOG.debug("No Amphora available for load balancer with id %s", loadbalancer_id) return None LOG.debug("Allocated Amphora with id %(amp)s for load balancer " "with id %(lb)s", {'amp': amp.id, 'lb': loadbalancer_id}) return amp.id def revert(self, result, loadbalancer_id, *args, **kwargs): LOG.warning("Reverting Amphora allocation for the load " "balancer %s in the database.", loadbalancer_id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask): """Alter the amphora role and priority in DB.""" def _execute(self, amphora, amp_role, vrrp_priority): """Alter the amphora role and priority in DB. :param amphora: Amphora to update. :param amp_role: Amphora role to be set. :param vrrp_priority: VRRP priority to set. :returns: None """ LOG.debug("Mark %(role)s in DB for amphora: %(amp)s", {'role': amp_role, 'amp': amphora.id}) self.amphora_repo.update(db_apis.get_session(), amphora.id, role=amp_role, vrrp_priority=vrrp_priority) def _revert(self, result, amphora, *args, **kwargs): """Removes role and vrrp_priority association. :param result: Result of the association. :param amphora: Amphora which role/vrrp_priority association failed. :returns: None """ if isinstance(result, failure.Failure): return LOG.warning("Reverting amphora role in DB for amp id %(amp)s", {'amp': amphora.id}) try: self.amphora_repo.update(db_apis.get_session(), amphora.id, role=None, vrrp_priority=None) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "role and vrrp_priority to None due to: " "%(except)s", {'amp': amphora.id, 'except': str(e)}) class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB): """Alter the amphora role to: MASTER.""" def execute(self, amphora): """Mark amphora as MASTER in db. :param amphora: Amphora to update role. :returns: None """ amp_role = constants.ROLE_MASTER self._execute(amphora, amp_role, constants.ROLE_MASTER_PRIORITY) def revert(self, result, amphora, *args, **kwargs): """Removes amphora role association. :param amphora: Amphora to update role. :returns: None """ self._revert(result, amphora, *args, **kwargs) class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB): """Alter the amphora role to: Backup.""" def execute(self, amphora): """Mark amphora as BACKUP in db. :param amphora: Amphora to update role. :returns: None """ amp_role = constants.ROLE_BACKUP self._execute(amphora, amp_role, constants.ROLE_BACKUP_PRIORITY) def revert(self, result, amphora, *args, **kwargs): """Removes amphora role association. :param amphora: Amphora to update role. :returns: None """ self._revert(result, amphora, *args, **kwargs) class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB): """Alter the amphora role to: Standalone.""" def execute(self, amphora): """Mark amphora as STANDALONE in db. :param amphora: Amphora to update role. :returns: None """ amp_role = constants.ROLE_STANDALONE self._execute(amphora, amp_role, None) def revert(self, result, amphora, *args, **kwargs): """Removes amphora role association. :param amphora: Amphora to update role. :returns: None """ self._revert(result, amphora, *args, **kwargs) class MarkAmphoraAllocatedInDB(BaseDatabaseTask): """Will mark an amphora as allocated to a load balancer in the database. Assume sqlalchemy made sure the DB got retried sufficiently - so just abort """ def execute(self, amphora, loadbalancer_id): """Mark amphora as allocated to a load balancer in DB. :param amphora: Amphora to be updated. :param loadbalancer_id: Id of a load balancer to which an amphora should be allocated. :returns: None """ LOG.info('Mark ALLOCATED in DB for amphora: %(amp)s with ' 'compute id %(comp)s for load balancer: %(lb)s', { 'amp': amphora.id, 'comp': amphora.compute_id, 'lb': loadbalancer_id }) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.AMPHORA_ALLOCATED, compute_id=amphora.compute_id, lb_network_ip=amphora.lb_network_ip, load_balancer_id=loadbalancer_id) def revert(self, result, amphora, loadbalancer_id, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param result: Execute task result :param amphora: Amphora that was updated. :param loadbalancer_id: Id of a load balancer to which an amphora failed to be allocated. :returns: None """ if isinstance(result, failure.Failure): return LOG.warning("Reverting mark amphora ready in DB for amp " "id %(amp)s and compute id %(comp)s", {'amp': amphora.id, 'comp': amphora.compute_id}) self.task_utils.mark_amphora_status_error(amphora.id) class MarkAmphoraBootingInDB(BaseDatabaseTask): """Mark the amphora as booting in the database.""" def execute(self, amphora_id, compute_id): """Mark amphora booting in DB. :param amphora_id: Id of the amphora to update :param compute_id: Id of a compute on which an amphora resides :returns: None """ LOG.debug("Mark BOOTING in DB for amphora: %(amp)s with " "compute id %(id)s", {'amp': amphora_id, 'id': compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.AMPHORA_BOOTING, compute_id=compute_id) def revert(self, result, amphora_id, compute_id, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param result: Execute task result :param amphora_id: Id of the amphora that failed to update :param compute_id: Id of a compute on which an amphora resides :returns: None """ if isinstance(result, failure.Failure): return LOG.warning("Reverting mark amphora booting in DB for amp " "id %(amp)s and compute id %(comp)s", {'amp': amphora_id, 'comp': compute_id}) try: self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR, compute_id=compute_id) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "status to ERROR due to: " "%(except)s", {'amp': amphora_id, 'except': str(e)}) class MarkAmphoraDeletedInDB(BaseDatabaseTask): """Mark the amphora deleted in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, amphora): """Mark the amphora as deleted in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Mark DELETED in DB for amphora: %(amp)s with " "compute id %(comp)s", {'amp': amphora.id, 'comp': amphora.compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.DELETED) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora deleted in DB " "for amp id %(amp)s and compute id %(comp)s", {'amp': amphora.id, 'comp': amphora.compute_id}) self.task_utils.mark_amphora_status_error(amphora.id) class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask): """Mark the amphora pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, amphora): """Mark the amphora as pending delete in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Mark PENDING DELETE in DB for amphora: %(amp)s " "with compute id %(id)s", {'amp': amphora.id, 'id': amphora.compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.PENDING_DELETE) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora pending delete in DB " "for amp id %(amp)s and compute id %(comp)s", {'amp': amphora.id, 'comp': amphora.compute_id}) self.task_utils.mark_amphora_status_error(amphora.id) class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask): """Mark the amphora pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, amphora): """Mark the amphora as pending update in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for amphora: %(amp)s " "with compute id %(id)s", {'amp': amphora.id, 'id': amphora.compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.PENDING_UPDATE) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora pending update in DB " "for amp id %(amp)s and compute id %(comp)s", {'amp': amphora.id, 'comp': amphora.compute_id}) self.task_utils.mark_amphora_status_error(amphora.id) class MarkAmphoraReadyInDB(BaseDatabaseTask): """This task will mark an amphora as ready in the database. Assume sqlalchemy made sure the DB got retried sufficiently - so just abort """ def execute(self, amphora): """Mark amphora as ready in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.info("Mark READY in DB for amphora: %(amp)s with compute " "id %(comp)s", {"amp": amphora.id, "comp": amphora.compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.AMPHORA_READY, compute_id=amphora.compute_id, lb_network_ip=amphora.lb_network_ip) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora ready in DB for amp " "id %(amp)s and compute id %(comp)s", {'amp': amphora.id, 'comp': amphora.compute_id}) try: self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.ERROR, compute_id=amphora.compute_id, lb_network_ip=amphora.lb_network_ip) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "status to ERROR due to: " "%(except)s", {'amp': amphora.id, 'except': str(e)}) class UpdateAmphoraComputeId(BaseDatabaseTask): """Associate amphora with a compute in DB.""" def execute(self, amphora_id, compute_id): """Associate amphora with a compute in DB. :param amphora_id: Id of the amphora to update :param compute_id: Id of a compute on which an amphora resides :returns: None """ self.amphora_repo.update(db_apis.get_session(), amphora_id, compute_id=compute_id) class UpdateAmphoraInfo(BaseDatabaseTask): """Update amphora with compute instance details.""" def execute(self, amphora_id, compute_obj): """Update amphora with compute instance details. :param amphora_id: Id of the amphora to update :param compute_obj: Compute on which an amphora resides :returns: Updated amphora object """ self.amphora_repo.update( db_apis.get_session(), amphora_id, lb_network_ip=compute_obj.lb_network_ip, cached_zone=compute_obj.cached_zone, image_id=compute_obj.image_id, compute_flavor=compute_obj.compute_flavor) return self.amphora_repo.get(db_apis.get_session(), id=amphora_id) class UpdateAmphoraDBCertExpiration(BaseDatabaseTask): """Update the amphora expiration date with new cert file date.""" def execute(self, amphora_id, server_pem): """Update the amphora expiration date with new cert file date. :param amphora_id: Id of the amphora to update :param server_pem: Certificate in PEM format :returns: None """ LOG.debug("Update DB cert expiry date of amphora id: %s", amphora_id) key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) cert_expiration = cert_parser.get_cert_expiration( fer.decrypt(server_pem)) LOG.debug("Certificate expiration date is %s ", cert_expiration) self.amphora_repo.update(db_apis.get_session(), amphora_id, cert_expiration=cert_expiration) class UpdateAmphoraCertBusyToFalse(BaseDatabaseTask): """Update the amphora cert_busy flag to be false.""" def execute(self, amphora): """Update the amphora cert_busy flag to be false. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Update cert_busy flag of amphora id %s to False", amphora.id) self.amphora_repo.update(db_apis.get_session(), amphora.id, cert_busy=False) class MarkLBActiveInDB(BaseDatabaseTask): """Mark the load balancer active in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def __init__(self, mark_subobjects=False, **kwargs): super(MarkLBActiveInDB, self).__init__(**kwargs) self.mark_subobjects = mark_subobjects def execute(self, loadbalancer): """Mark the load balancer as active in DB. This also marks ACTIVE all sub-objects of the load balancer if self.mark_subobjects is True. :param loadbalancer: Load balancer object to be updated :returns: None """ if self.mark_subobjects: LOG.debug("Marking all listeners of loadbalancer %s ACTIVE", loadbalancer.id) for listener in loadbalancer.listeners: self._mark_listener_status(listener, constants.ACTIVE) LOG.info("Mark ACTIVE in DB for load balancer id: %s", loadbalancer.id) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, provisioning_status=constants.ACTIVE) def _mark_listener_status(self, listener, status): self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=status) LOG.debug("Marking all l7policies of listener %s %s", listener.id, status) for l7policy in listener.l7policies: self._mark_l7policy_status(l7policy, status) if listener.default_pool: LOG.debug("Marking default pool of listener %s %s", listener.id, status) self._mark_pool_status(listener.default_pool, status) def _mark_l7policy_status(self, l7policy, status): self.l7policy_repo.update( db_apis.get_session(), l7policy.id, provisioning_status=status) LOG.debug("Marking all l7rules of l7policy %s %s", l7policy.id, status) for l7rule in l7policy.l7rules: self._mark_l7rule_status(l7rule, status) if l7policy.redirect_pool: LOG.debug("Marking redirect pool of l7policy %s %s", l7policy.id, status) self._mark_pool_status(l7policy.redirect_pool, status) def _mark_l7rule_status(self, l7rule, status): self.l7rule_repo.update( db_apis.get_session(), l7rule.id, provisioning_status=status) def _mark_pool_status(self, pool, status): self.pool_repo.update( db_apis.get_session(), pool.id, provisioning_status=status) if pool.health_monitor: LOG.debug("Marking health monitor of pool %s %s", pool.id, status) self._mark_hm_status(pool.health_monitor, status) LOG.debug("Marking all members of pool %s %s", pool.id, status) for member in pool.members: self._mark_member_status(member, status) def _mark_hm_status(self, hm, status): self.health_mon_repo.update( db_apis.get_session(), hm.id, provisioning_status=status) def _mark_member_status(self, member, status): self.member_repo.update( db_apis.get_session(), member.id, provisioning_status=status) def revert(self, loadbalancer, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. This also puts all sub-objects of the load balancer to ERROR state if self.mark_subobjects is True :param loadbalancer: Load balancer object that failed to update :returns: None """ if self.mark_subobjects: LOG.debug("Marking all listeners of loadbalancer %s ERROR", loadbalancer.id) for listener in loadbalancer.listeners: try: self._mark_listener_status(listener, constants.ERROR) except Exception: LOG.warning("Error updating listener %s provisioning " "status", listener.id) LOG.warning("Reverting mark load balancer deleted in DB " "for load balancer id %s", loadbalancer.id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) class UpdateLBServerGroupInDB(BaseDatabaseTask): """Update the server group id info for load balancer in DB.""" def execute(self, loadbalancer_id, server_group_id): """Update the server group id info for load balancer in DB. :param loadbalancer_id: Id of a load balancer to update :param server_group_id: Id of a server group to associate with the load balancer :returns: None """ LOG.debug("Server Group updated with id: %s for load balancer id: %s:", server_group_id, loadbalancer_id) self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, server_group_id=server_group_id) def revert(self, loadbalancer_id, server_group_id, *args, **kwargs): """Remove server group information from a load balancer in DB. :param loadbalancer_id: Id of a load balancer that failed to update :param server_group_id: Id of a server group that couldn't be associated with the load balancer :returns: None """ LOG.warning('Reverting Server Group updated with id: %(s1)s for ' 'load balancer id: %(s2)s ', {'s1': server_group_id, 's2': loadbalancer_id}) try: self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, server_group_id=None) except Exception as e: LOG.error("Failed to update load balancer %(lb)s " "server_group_id to None due to: " "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) class MarkLBDeletedInDB(BaseDatabaseTask): """Mark the load balancer deleted in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer): """Mark the load balancer as deleted in DB. :param loadbalancer: Load balancer object to be updated :returns: None """ LOG.debug("Mark DELETED in DB for load balancer id: %s", loadbalancer.id) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, provisioning_status=constants.DELETED) def revert(self, loadbalancer, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. :param loadbalancer: Load balancer object that failed to update :returns: None """ LOG.warning("Reverting mark load balancer deleted in DB " "for load balancer id %s", loadbalancer.id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) class MarkLBPendingDeleteInDB(BaseDatabaseTask): """Mark the load balancer pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer): """Mark the load balancer as pending delete in DB. :param loadbalancer: Load balancer object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s", loadbalancer.id) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, provisioning_status=(constants. PENDING_DELETE)) def revert(self, loadbalancer, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. :param loadbalancer: Load balancer object that failed to update :returns: None """ LOG.warning("Reverting mark load balancer pending delete in DB " "for load balancer id %s", loadbalancer.id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) class MarkLBAndListenersActiveInDB(BaseDatabaseTask): """Mark the load balancer and specified listeners active in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer, listeners): """Mark the load balancer and listeners as active in DB. :param loadbalancer: Load balancer object to be updated :param listeners: Listener objects to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for load balancer id: %s " "and updating status for listener ids: %s", loadbalancer.id, ', '.join([listener.id for listener in listeners])) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, provisioning_status=constants.ACTIVE) for listener in listeners: self.listener_repo.prov_status_active_if_not_error( db_apis.get_session(), listener.id) def revert(self, loadbalancer, listeners, *args, **kwargs): """Mark the load balancer and listeners as broken. :param loadbalancer: Load balancer object that failed to update :param listeners: Listener objects that failed to update :returns: None """ LOG.warning("Reverting mark load balancer and listeners active in DB " "for load balancer id %(LB)s and listener ids: %(list)s", {'LB': loadbalancer.id, 'list': ', '.join([listener.id for listener in listeners])}) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) for listener in listeners: self.task_utils.mark_listener_prov_status_error(listener.id) class MarkListenerDeletedInDB(BaseDatabaseTask): """Mark the listener deleted in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener): """Mark the listener as deleted in DB :param listener: The listener to be marked deleted :returns: None """ LOG.debug("Mark DELETED in DB for listener id: %s ", listener.id) self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.DELETED) def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the delete couldn't happen :param listener: The listener that couldn't be updated :returns: None """ LOG.warning("Reverting mark listener deleted in DB " "for listener id %s", listener.id) self.task_utils.mark_listener_prov_status_error(listener.id) class MarkListenerPendingDeleteInDB(BaseDatabaseTask): """Mark the listener pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener): """Mark the listener as pending delete in DB. :param listener: The listener to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for listener id: %s", listener.id) self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.PENDING_DELETE) def revert(self, listener, *args, **kwargs): """Mark the listener as broken and ready to be cleaned up. :param listener: The listener that couldn't be updated :returns: None """ LOG.warning("Reverting mark listener pending delete in DB " "for listener id %s", listener.id) self.task_utils.mark_listener_prov_status_error(listener.id) class UpdateLoadbalancerInDB(BaseDatabaseTask): """Update the loadbalancer in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer, update_dict): """Update the loadbalancer in the DB :param loadbalancer: The load balancer to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for loadbalancer id: %s ", loadbalancer.id) if update_dict.get('vip'): vip_dict = update_dict.pop('vip') self.vip_repo.update(db_apis.get_session(), loadbalancer.vip.load_balancer_id, **vip_dict) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, **update_dict) def revert(self, loadbalancer, *args, **kwargs): """Mark the loadbalancer ERROR since the update couldn't happen :param loadbalancer: The load balancer that couldn't be updated :returns: None """ LOG.warning("Reverting update loadbalancer in DB " "for loadbalancer id %s", loadbalancer.id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) class UpdateHealthMonInDB(BaseDatabaseTask): """Update the health monitor in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon, update_dict): """Update the health monitor in the DB :param health_mon: The health monitor to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for health monitor id: %s ", health_mon.id) self.health_mon_repo.update(db_apis.get_session(), health_mon.id, **update_dict) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor ERROR since the update couldn't happen :param health_mon: The health monitor that couldn't be updated :returns: None """ LOG.warning("Reverting update health monitor in DB " "for health monitor id %s", health_mon.id) try: self.health_mon_repo.update(db_apis.get_session(), health_mon.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update health monitor %(hm)s " "provisioning_status to ERROR due to: %(except)s", {'hm': health_mon.id, 'except': str(e)}) class UpdateListenerInDB(BaseDatabaseTask): """Update the listener in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener, update_dict): """Update the listener in the DB :param listener: The listener to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for listener id: %s ", listener.id) self.listener_repo.update(db_apis.get_session(), listener.id, **update_dict) def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the update couldn't happen :param listener: The listener that couldn't be updated :returns: None """ LOG.warning("Reverting update listener in DB " "for listener id %s", listener.id) self.task_utils.mark_listener_prov_status_error(listener.id) class UpdateMemberInDB(BaseDatabaseTask): """Update the member in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member, update_dict): """Update the member in the DB :param member: The member to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for member id: %s ", member.id) self.member_repo.update(db_apis.get_session(), member.id, **update_dict) def revert(self, member, *args, **kwargs): """Mark the member ERROR since the update couldn't happen :param member: The member that couldn't be updated :returns: None """ LOG.warning("Reverting update member in DB " "for member id %s", member.id) try: self.member_repo.update(db_apis.get_session(), member.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update member %(member)s provisioning_status " "to ERROR due to: %(except)s", {'member': member.id, 'except': str(e)}) class UpdatePoolInDB(BaseDatabaseTask): """Update the pool in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool, update_dict): """Update the pool in the DB :param pool: The pool to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for pool id: %s ", pool.id) self.repos.update_pool_and_sp(db_apis.get_session(), pool.id, update_dict) def revert(self, pool, *args, **kwargs): """Mark the pool ERROR since the update couldn't happen :param pool: The pool that couldn't be updated :returns: None """ LOG.warning("Reverting update pool in DB for pool id %s", pool.id) try: self.repos.update_pool_and_sp( db_apis.get_session(), pool.id, dict(provisioning_status=constants.ERROR)) except Exception as e: LOG.error("Failed to update pool %(pool)s provisioning_status to " "ERROR due to: %(except)s", {'pool': pool.id, 'except': str(e)}) class UpdateL7PolicyInDB(BaseDatabaseTask): """Update the L7 policy in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy, update_dict): """Update the L7 policy in the DB :param l7policy: The L7 policy to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for l7policy id: %s ", l7policy.id) self.l7policy_repo.update(db_apis.get_session(), l7policy.id, **update_dict) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy ERROR since the update couldn't happen :param l7policy: L7 policy that couldn't be updated :returns: None """ LOG.warning("Reverting update l7policy in DB " "for l7policy id %s", l7policy.id) try: self.l7policy_repo.update(db_apis.get_session(), l7policy.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7policy %(l7p)s provisioning_status " "to ERROR due to: %(except)s", {'l7p': l7policy.id, 'except': str(e)}) class UpdateL7RuleInDB(BaseDatabaseTask): """Update the L7 rule in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule, update_dict): """Update the L7 rule in the DB :param l7rule: The L7 rule to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for l7rule id: %s ", l7rule.id) self.l7rule_repo.update(db_apis.get_session(), l7rule.id, **update_dict) def revert(self, l7rule, *args, **kwargs): """Mark the L7 rule ERROR since the update couldn't happen :param l7rule: L7 rule that couldn't be updated :returns: None """ LOG.warning("Reverting update l7rule in DB " "for l7rule id %s", l7rule.id) try: self.l7policy_repo.update(db_apis.get_session(), l7rule.l7policy.id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update L7rule %(l7r)s provisioning_status to " "ERROR due to: %(except)s", {'l7r': l7rule.l7policy.id, 'except': str(e)}) class GetAmphoraDetails(BaseDatabaseTask): """Task to retrieve amphora network details.""" def execute(self, amphora): """Retrieve amphora network details. :param amphora: Amphora which network details are required :returns: data_models.Amphora object """ return data_models.Amphora(id=amphora.id, vrrp_ip=amphora.vrrp_ip, ha_ip=amphora.ha_ip, vrrp_port_id=amphora.vrrp_port_id, ha_port_id=amphora.ha_port_id, role=amphora.role, vrrp_id=amphora.vrrp_id, vrrp_priority=amphora.vrrp_priority) class GetAmphoraeFromLoadbalancer(BaseDatabaseTask): """Task to pull the amphorae from a loadbalancer.""" def execute(self, loadbalancer_id): """Pull the amphorae from a loadbalancer. :param loadbalancer_id: Load balancer ID to get amphorae from :returns: A list of Listener objects """ amphorae = [] loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) for amp in loadbalancer.amphorae: a = self.amphora_repo.get(db_apis.get_session(), id=amp.id, show_deleted=False) if a is None: continue amphorae.append(a) return amphorae class GetListenersFromLoadbalancer(BaseDatabaseTask): """Task to pull the listeners from a loadbalancer.""" def execute(self, loadbalancer): """Pull the listeners from a loadbalancer. :param loadbalancer: Load balancer which listeners are required :returns: A list of Listener objects """ listeners = [] for listener in loadbalancer.listeners: lb = self.listener_repo.get(db_apis.get_session(), id=listener.id) lb.load_balancer = loadbalancer listeners.append(lb) return listeners class GetLoadBalancer(BaseDatabaseTask): """Get an load balancer object from the database.""" def execute(self, loadbalancer_id, *args, **kwargs): """Get an load balancer object from the database. :param loadbalancer_id: The load balancer ID to lookup :returns: The load balancer object """ LOG.debug("Get load balancer from DB for load balancer id: %s", loadbalancer_id) return self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) class GetVipFromLoadbalancer(BaseDatabaseTask): """Task to pull the vip from a loadbalancer.""" def execute(self, loadbalancer): """Pull the vip from a loadbalancer. :param loadbalancer: Load balancer which VIP is required :returns: VIP associated with a given load balancer """ return loadbalancer.vip class CreateVRRPGroupForLB(BaseDatabaseTask): """Create a VRRP group for a load balancer.""" def execute(self, loadbalancer_id): """Create a VRRP group for a load balancer. :param loadbalancer_id: Load balancer ID for which a VRRP group should be created """ try: self.repos.vrrpgroup.create( db_apis.get_session(), load_balancer_id=loadbalancer_id, vrrp_group_name=str(loadbalancer_id).replace('-', ''), vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], advert_int=CONF.keepalived_vrrp.vrrp_advert_int) except odb_exceptions.DBDuplicateEntry: LOG.debug('VRRP_GROUP entry already exists for load balancer, ' 'skipping create.') class DisableAmphoraHealthMonitoring(BaseDatabaseTask): """Disable amphora health monitoring. This disables amphora health monitoring by removing it from the amphora_health table. """ def execute(self, amphora): """Disable health monitoring for an amphora :param amphora: The amphora to disable health monitoring for :returns: None """ self._delete_from_amp_health(amphora.id) class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask): """Disable health monitoring on the LB amphorae. This disables amphora health monitoring by removing it from the amphora_health table for each amphora on a load balancer. """ def execute(self, loadbalancer): """Disable health monitoring for amphora on a load balancer :param loadbalancer: The load balancer to disable health monitoring on :returns: None """ for amphora in loadbalancer.amphorae: self._delete_from_amp_health(amphora.id) class MarkAmphoraHealthBusy(BaseDatabaseTask): """Mark amphora health monitoring busy. This prevents amphora failover by marking the amphora busy in the amphora_health table. """ def execute(self, amphora): """Mark amphora health monitoring busy :param amphora: The amphora to mark amphora health busy :returns: None """ self._mark_amp_health_busy(amphora.id) class MarkLBAmphoraeHealthBusy(BaseDatabaseTask): """Mark amphorae health monitoring busy for the LB. This prevents amphorae failover by marking each amphora of a given load balancer busy in the amphora_health table. """ def execute(self, loadbalancer): """Marks amphorae health busy for each amphora on a load balancer :param loadbalancer: The load balancer to mark amphorae health busy :returns: None """ for amphora in loadbalancer.amphorae: self._mark_amp_health_busy(amphora.id) class MarkHealthMonitorActiveInDB(BaseDatabaseTask): """Mark the health monitor ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor ACTIVE in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for health monitor id: %s", health_mon.id) op_status = (constants.ONLINE if health_mon.enabled else constants.OFFLINE) self.health_mon_repo.update(db_apis.get_session(), health_mon.id, provisioning_status=constants.ACTIVE, operating_status=op_status) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health montor ACTIVE in DB " "for health monitor id %s", health_mon.id) self.task_utils.mark_health_mon_prov_status_error(health_mon.id) class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask): """Mark the health monitor pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor as pending create in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for health monitor id: %s", health_mon.id) self.health_mon_repo.update(db_apis.get_session(), health_mon.id, provisioning_status=(constants. PENDING_CREATE)) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health monitor pending create in DB " "for health monitor id %s", health_mon.id) self.task_utils.mark_health_mon_prov_status_error(health_mon.id) class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask): """Mark the health monitor pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor as pending delete in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for health monitor id: %s", health_mon.id) self.health_mon_repo.update(db_apis.get_session(), health_mon.id, provisioning_status=(constants. PENDING_DELETE)) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health monitor pending delete in DB " "for health monitor id %s", health_mon.id) self.task_utils.mark_health_mon_prov_status_error(health_mon.id) class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask): """Mark the health monitor pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor as pending update in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for health monitor id: %s", health_mon.id) self.health_mon_repo.update(db_apis.get_session(), health_mon.id, provisioning_status=(constants. PENDING_UPDATE)) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health monitor pending update in DB " "for health monitor id %s", health_mon.id) self.task_utils.mark_health_mon_prov_status_error(health_mon.id) class MarkL7PolicyActiveInDB(BaseDatabaseTask): """Mark the l7policy ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy ACTIVE in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for l7policy id: %s", l7policy.id) op_status = constants.ONLINE if l7policy.enabled else constants.OFFLINE self.l7policy_repo.update(db_apis.get_session(), l7policy.id, provisioning_status=constants.ACTIVE, operating_status=op_status) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy ACTIVE in DB " "for l7policy id %s", l7policy.id) self.task_utils.mark_l7policy_prov_status_error(l7policy.id) class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask): """Mark the l7policy pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy as pending create in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for l7policy id: %s", l7policy.id) self.l7policy_repo.update(db_apis.get_session(), l7policy.id, provisioning_status=constants.PENDING_CREATE) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy pending create in DB " "for l7policy id %s", l7policy.id) self.task_utils.mark_l7policy_prov_status_error(l7policy.id) class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask): """Mark the l7policy pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy as pending delete in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for l7policy id: %s", l7policy.id) self.l7policy_repo.update(db_apis.get_session(), l7policy.id, provisioning_status=constants.PENDING_DELETE) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy pending delete in DB " "for l7policy id %s", l7policy.id) self.task_utils.mark_l7policy_prov_status_error(l7policy.id) class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask): """Mark the l7policy pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy as pending update in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for l7policy id: %s", l7policy.id) self.l7policy_repo.update(db_apis.get_session(), l7policy.id, provisioning_status=(constants. PENDING_UPDATE)) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy pending update in DB " "for l7policy id %s", l7policy.id) self.task_utils.mark_l7policy_prov_status_error(l7policy.id) class MarkL7RuleActiveInDB(BaseDatabaseTask): """Mark the l7rule ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule ACTIVE in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for l7rule id: %s", l7rule.id) op_status = constants.ONLINE if l7rule.enabled else constants.OFFLINE self.l7rule_repo.update(db_apis.get_session(), l7rule.id, provisioning_status=constants.ACTIVE, operating_status=op_status) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule ACTIVE in DB " "for l7rule id %s", l7rule.id) self.task_utils.mark_l7rule_prov_status_error(l7rule.id) class MarkL7RulePendingCreateInDB(BaseDatabaseTask): """Mark the l7rule pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule as pending create in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for l7rule id: %s", l7rule.id) self.l7rule_repo.update(db_apis.get_session(), l7rule.id, provisioning_status=constants.PENDING_CREATE) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule pending create in DB " "for l7rule id %s", l7rule.id) self.task_utils.mark_l7rule_prov_status_error(l7rule.id) class MarkL7RulePendingDeleteInDB(BaseDatabaseTask): """Mark the l7rule pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule as pending delete in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for l7rule id: %s", l7rule.id) self.l7rule_repo.update(db_apis.get_session(), l7rule.id, provisioning_status=constants.PENDING_DELETE) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule pending delete in DB " "for l7rule id %s", l7rule.id) self.task_utils.mark_l7rule_prov_status_error(l7rule.id) class MarkL7RulePendingUpdateInDB(BaseDatabaseTask): """Mark the l7rule pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule as pending update in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for l7rule id: %s", l7rule.id) self.l7rule_repo.update(db_apis.get_session(), l7rule.id, provisioning_status=constants.PENDING_UPDATE) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule pending update in DB " "for l7rule id %s", l7rule.id) self.task_utils.mark_l7rule_prov_status_error(l7rule.id) class MarkMemberActiveInDB(BaseDatabaseTask): """Mark the member ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member ACTIVE in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for member id: %s", member.id) self.member_repo.update(db_apis.get_session(), member.id, provisioning_status=constants.ACTIVE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member ACTIVE in DB " "for member id %s", member.id) self.task_utils.mark_member_prov_status_error(member.id) class MarkMemberPendingCreateInDB(BaseDatabaseTask): """Mark the member pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member as pending create in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for member id: %s", member.id) self.member_repo.update(db_apis.get_session(), member.id, provisioning_status=constants.PENDING_CREATE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member pending create in DB " "for member id %s", member.id) self.task_utils.mark_member_prov_status_error(member.id) class MarkMemberPendingDeleteInDB(BaseDatabaseTask): """Mark the member pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member as pending delete in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for member id: %s", member.id) self.member_repo.update(db_apis.get_session(), member.id, provisioning_status=constants.PENDING_DELETE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member pending delete in DB " "for member id %s", member.id) self.task_utils.mark_member_prov_status_error(member.id) class MarkMemberPendingUpdateInDB(BaseDatabaseTask): """Mark the member pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member as pending update in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for member id: %s", member.id) self.member_repo.update(db_apis.get_session(), member.id, provisioning_status=constants.PENDING_UPDATE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member pending update in DB " "for member id %s", member.id) self.task_utils.mark_member_prov_status_error(member.id) class MarkPoolActiveInDB(BaseDatabaseTask): """Mark the pool ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool): """Mark the pool ACTIVE in DB. :param pool: Pool object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for pool id: %s", pool.id) self.pool_repo.update(db_apis.get_session(), pool.id, provisioning_status=constants.ACTIVE) def revert(self, pool, *args, **kwargs): """Mark the pool as broken :param pool: Pool object that failed to update :returns: None """ LOG.warning("Reverting mark pool ACTIVE in DB for pool id %s", pool.id) self.task_utils.mark_pool_prov_status_error(pool.id) class MarkPoolPendingCreateInDB(BaseDatabaseTask): """Mark the pool pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool): """Mark the pool as pending create in DB. :param pool: Pool object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for pool id: %s", pool.id) self.pool_repo.update(db_apis.get_session(), pool.id, provisioning_status=constants.PENDING_CREATE) def revert(self, pool, *args, **kwargs): """Mark the pool as broken :param pool: Pool object that failed to update :returns: None """ LOG.warning("Reverting mark pool pending create in DB " "for pool id %s", pool.id) self.task_utils.mark_pool_prov_status_error(pool.id) class MarkPoolPendingDeleteInDB(BaseDatabaseTask): """Mark the pool pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool): """Mark the pool as pending delete in DB. :param pool: Pool object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for pool id: %s", pool.id) self.pool_repo.update(db_apis.get_session(), pool.id, provisioning_status=constants.PENDING_DELETE) def revert(self, pool, *args, **kwargs): """Mark the pool as broken :param pool: Pool object that failed to update :returns: None """ LOG.warning("Reverting mark pool pending delete in DB " "for pool id %s", pool.id) self.task_utils.mark_pool_prov_status_error(pool.id) class MarkPoolPendingUpdateInDB(BaseDatabaseTask): """Mark the pool pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool): """Mark the pool as pending update in DB. :param pool: Pool object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for pool id: %s", pool.id) self.pool_repo.update(db_apis.get_session(), pool.id, provisioning_status=constants.PENDING_UPDATE) def revert(self, pool, *args, **kwargs): """Mark the pool as broken :param pool: Pool object that failed to update :returns: None """ LOG.warning("Reverting mark pool pending update in DB " "for pool id %s", pool.id) self.task_utils.mark_pool_prov_status_error(pool.id) class DecrementHealthMonitorQuota(BaseDatabaseTask): """Decrements the health monitor quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Decrements the health monitor quota. :param health_mon: The health monitor to decrement the quota on. :returns: None """ LOG.debug("Decrementing health monitor quota for " "project: %s ", health_mon.project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.HealthMonitor, health_mon.project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement health monitor quota for ' 'project: %(proj)s the project may have excess ' 'quota in use.', {'proj': health_mon.project_id}) lock_session.rollback() def revert(self, health_mon, result, *args, **kwargs): """Re-apply the quota :param health_mon: The health monitor to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for health monitor on project' ' %(proj)s Project quota counts may be incorrect.', {'proj': health_mon.project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.HealthMonitor, health_mon.project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementListenerQuota(BaseDatabaseTask): """Decrements the listener quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener): """Decrements the listener quota. :param listener: The listener to decrement the quota on. :returns: None """ LOG.debug("Decrementing listener quota for " "project: %s ", listener.project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.Listener, listener.project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement listener quota for project: ' '%(proj)s the project may have excess quota in use.', {'proj': listener.project_id}) lock_session.rollback() def revert(self, listener, result, *args, **kwargs): """Re-apply the quota :param listener: The listener to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for listener on project ' '%(proj)s Project quota counts may be incorrect.', {'proj': listener.project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Listener, listener.project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementLoadBalancerQuota(BaseDatabaseTask): """Decrements the load balancer quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer): """Decrements the load balancer quota. :param loadbalancer: The load balancer to decrement the quota on. :returns: None """ LOG.debug("Decrementing load balancer quota for " "project: %s ", loadbalancer.project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.LoadBalancer, loadbalancer.project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement load balancer quota for ' 'project: %(proj)s the project may have excess ' 'quota in use.', {'proj': loadbalancer.project_id}) lock_session.rollback() def revert(self, loadbalancer, result, *args, **kwargs): """Re-apply the quota :param loadbalancer: The load balancer to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for load balancer on project ' '%(proj)s Project quota counts may be incorrect.', {'proj': loadbalancer.project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.LoadBalancer, loadbalancer.project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementMemberQuota(BaseDatabaseTask): """Decrements the member quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Decrements the member quota. :param member: The member to decrement the quota on. :returns: None """ LOG.debug("Decrementing member quota for " "project: %s ", member.project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.Member, member.project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement member quota for project: ' '%(proj)s the project may have excess quota in use.', {'proj': member.project_id}) lock_session.rollback() def revert(self, member, result, *args, **kwargs): """Re-apply the quota :param member: The member to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for member on project %(proj)s ' 'Project quota counts may be incorrect.', {'proj': member.project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Member, member.project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementPoolQuota(BaseDatabaseTask): """Decrements the pool quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool, pool_child_count): """Decrements the pool quota. :param pool: The pool to decrement the quota on :returns: None """ LOG.debug("Decrementing pool quota for " "project: %s ", pool.project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.Pool, pool.project_id) # Pools cascade delete members and health monitors # update the quota for those items as well. if pool_child_count['HM'] > 0: self.repos.decrement_quota(lock_session, data_models.HealthMonitor, pool.project_id) if pool_child_count['member'] > 0: self.repos.decrement_quota( lock_session, data_models.Member, pool.project_id, quantity=pool_child_count['member']) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement pool quota for project: ' '%(proj)s the project may have excess quota in use.', {'proj': pool.project_id}) lock_session.rollback() def revert(self, pool, pool_child_count, result, *args, **kwargs): """Re-apply the quota :param project_id: The id of project to decrement the quota on :returns: None """ LOG.warning('Reverting decrement quota for pool on project %(proj)s ' 'Project quota counts may be incorrect.', {'proj': pool.project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): # These are all independent to maximize the correction # in case other quota actions have occurred try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Pool, pool.project_id) lock_session.commit() except Exception: lock_session.rollback() # Attempt to increment back the health monitor quota if pool_child_count['HM'] > 0: lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.HealthMonitor, pool.project_id) lock_session.commit() except Exception: lock_session.rollback() # Attempt to increment back the member quota # This is separate calls to maximize the correction # should other factors have increased the in use quota # before this point in the revert flow for i in range(pool_child_count['member']): lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Member, pool.project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class CountPoolChildrenForQuota(BaseDatabaseTask): """Counts the pool child resources for quota management. Since the children of pools are cleaned up by the sqlalchemy cascade delete settings, we need to collect the quota counts for the child objects early. """ def execute(self, pool): """Count the pool child resources for quota management :param pool: The pool to count children on :returns: None """ LOG.debug("Counting pool children for " "project: %s ", pool.project_id) health_mon_count = 1 if pool.health_monitor else 0 member_count = len(pool.members) return {'HM': health_mon_count, 'member': member_count} class UpdatePoolMembersOperatingStatusInDB(BaseDatabaseTask): """Updates the members of a pool operating status. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool, operating_status): """Update the members of a pool operating status in DB. :param pool: Pool object to be updated :param operating_status: Operating status to set :returns: None """ LOG.debug("Updating member operating status to %(status)s in DB for " "pool id: %(pool)s", {'status': operating_status, 'pool': pool.id}) self.member_repo.update_pool_members(db_apis.get_session(), pool.id, operating_status=operating_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/lifecycle_tasks.py0000664000175000017500000001422400000000000025510 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from taskflow import task from octavia.controller.worker import task_utils as task_utilities class BaseLifecycleTask(task.Task): """Base task to instansiate common classes.""" def __init__(self, **kwargs): self.task_utils = task_utilities.TaskUtils() super(BaseLifecycleTask, self).__init__(**kwargs) class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask): """Task to checkpoint Amphora lifecycle milestones.""" def execute(self, amphora_id): pass def revert(self, amphora_id, *args, **kwargs): self.task_utils.mark_amphora_status_error(amphora_id) class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask): """Task to checkpoint Amphora lifecycle milestones.""" def execute(self, amphora): pass def revert(self, amphora, *args, **kwargs): super(AmphoraToErrorOnRevertTask, self).revert(amphora.id) class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask): """Task to set a member to ERROR on revert.""" def execute(self, health_mon, listeners, loadbalancer): pass def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs): self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id) self.task_utils.mark_pool_prov_status_active(health_mon.pool_id) self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) for listener in listeners: self.task_utils.mark_listener_prov_status_active(listener.id) class L7PolicyToErrorOnRevertTask(BaseLifecycleTask): """Task to set a l7policy to ERROR on revert.""" def execute(self, l7policy, listeners, loadbalancer): pass def revert(self, l7policy, listeners, loadbalancer, *args, **kwargs): self.task_utils.mark_l7policy_prov_status_error(l7policy.id) self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) for listener in listeners: self.task_utils.mark_listener_prov_status_active(listener.id) class L7RuleToErrorOnRevertTask(BaseLifecycleTask): """Task to set a l7rule to ERROR on revert.""" def execute(self, l7rule, listeners, loadbalancer): pass def revert(self, l7rule, listeners, loadbalancer, *args, **kwargs): self.task_utils.mark_l7rule_prov_status_error(l7rule.id) self.task_utils.mark_l7policy_prov_status_active(l7rule.l7policy_id) self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) for listener in listeners: self.task_utils.mark_listener_prov_status_active(listener.id) class ListenerToErrorOnRevertTask(BaseLifecycleTask): """Task to set a listener to ERROR on revert.""" def execute(self, listener): pass def revert(self, listener, *args, **kwargs): self.task_utils.mark_listener_prov_status_error(listener.id) self.task_utils.mark_loadbalancer_prov_status_active( listener.load_balancer.id) class ListenersToErrorOnRevertTask(BaseLifecycleTask): """Task to set listeners to ERROR on revert.""" def execute(self, listeners, loadbalancer): pass def revert(self, listeners, loadbalancer, *args, **kwargs): self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer.id) for listener in listeners: self.task_utils.mark_listener_prov_status_error(listener.id) class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask): """Task to set the load balancer to ERROR on revert.""" def execute(self, loadbalancer_id): pass def revert(self, loadbalancer_id, *args, **kwargs): self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask): """Task to set the load balancer to ERROR on revert.""" def execute(self, loadbalancer): pass def revert(self, loadbalancer, *args, **kwargs): super(LoadBalancerToErrorOnRevertTask, self).revert(loadbalancer.id) class MemberToErrorOnRevertTask(BaseLifecycleTask): """Task to set a member to ERROR on revert.""" def execute(self, member, listeners, loadbalancer, pool): pass def revert(self, member, listeners, loadbalancer, pool, *args, **kwargs): self.task_utils.mark_member_prov_status_error(member.id) for listener in listeners: self.task_utils.mark_listener_prov_status_active(listener.id) self.task_utils.mark_pool_prov_status_active(pool.id) self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) class MembersToErrorOnRevertTask(BaseLifecycleTask): """Task to set members to ERROR on revert.""" def execute(self, members, listeners, loadbalancer, pool): pass def revert(self, members, listeners, loadbalancer, pool, *args, **kwargs): for m in members: self.task_utils.mark_member_prov_status_error(m.id) for listener in listeners: self.task_utils.mark_listener_prov_status_active(listener.id) self.task_utils.mark_pool_prov_status_active(pool.id) self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) class PoolToErrorOnRevertTask(BaseLifecycleTask): """Task to set a pool to ERROR on revert.""" def execute(self, pool, listeners, loadbalancer): pass def revert(self, pool, listeners, loadbalancer, *args, **kwargs): self.task_utils.mark_pool_prov_status_error(pool.id) self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) for listener in listeners: self.task_utils.mark_listener_prov_status_active(listener.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/model_tasks.py0000664000175000017500000000245100000000000024650 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow import task class DeleteModelObject(task.Task): """Task to delete an object in a model.""" def execute(self, object): object.delete() class UpdateAttributes(task.Task): """Task to update an object for changes.""" def execute(self, object, update_dict): """Update an object and its associated resources. Note: This relies on the data_model update() methods to handle complex objects with nested objects (LoadBalancer.vip, Pool.session_persistence, etc.) :param object: The object will be updated. :param update_dict: The updates dictionary. :returns: None """ object.update(update_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/network_tasks.py0000664000175000017500000007573600000000000025261 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from taskflow import task from taskflow.types import failure import tenacity from octavia.common import constants from octavia.common import utils from octavia.controller.worker import task_utils from octavia.db import api as db_apis from octavia.db import repositories from octavia.network import base from octavia.network import data_models as n_data_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class BaseNetworkTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseNetworkTask, self).__init__(**kwargs) self._network_driver = None self.task_utils = task_utils.TaskUtils() self.lb_repo = repositories.LoadBalancerRepository() @property def network_driver(self): if self._network_driver is None: self._network_driver = utils.get_network_driver() return self._network_driver class CalculateAmphoraDelta(BaseNetworkTask): default_provides = constants.DELTA def execute(self, loadbalancer, amphora, availability_zone, vrrp_port=None): LOG.debug("Calculating network delta for amphora id: %s", amphora.id) if vrrp_port is None: vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id) if (availability_zone and availability_zone.get(constants.MANAGEMENT_NETWORK)): management_nets = [availability_zone.get( constants.MANAGEMENT_NETWORK)] else: management_nets = CONF.controller_worker.amp_boot_network_list desired_network_ids = {vrrp_port.network_id}.union(management_nets) for pool in loadbalancer.pools: member_networks = [ self.network_driver.get_subnet(member.subnet_id).network_id for member in pool.members if member.subnet_id ] desired_network_ids.update(member_networks) nics = self.network_driver.get_plugged_networks(amphora.compute_id) # assume we don't have two nics in the same network actual_network_nics = dict((nic.network_id, nic) for nic in nics) del_ids = set(actual_network_nics) - desired_network_ids delete_nics = list( actual_network_nics[net_id] for net_id in del_ids) add_ids = desired_network_ids - set(actual_network_nics) add_nics = list(n_data_models.Interface( network_id=net_id) for net_id in add_ids) delta = n_data_models.Delta( amphora_id=amphora.id, compute_id=amphora.compute_id, add_nics=add_nics, delete_nics=delete_nics) return delta class CalculateDelta(BaseNetworkTask): """Task to calculate the delta between the nics on the amphora and the ones we need. Returns a list for plumbing them. """ default_provides = constants.DELTAS def execute(self, loadbalancer, availability_zone): """Compute which NICs need to be plugged for the amphora to become operational. :param loadbalancer: the loadbalancer to calculate deltas for all amphorae :param availability_zone: availability zone metadata dict :returns: dict of octavia.network.data_models.Delta keyed off amphora id """ calculate_amp = CalculateAmphoraDelta() deltas = {} for amphora in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): delta = calculate_amp.execute(loadbalancer, amphora, availability_zone) deltas[amphora.id] = delta return deltas class GetPlumbedNetworks(BaseNetworkTask): """Task to figure out the NICS on an amphora. This will likely move into the amphora driver :returns: Array of networks """ default_provides = constants.NICS def execute(self, amphora): """Get plumbed networks for the amphora.""" LOG.debug("Getting plumbed networks for amphora id: %s", amphora.id) return self.network_driver.get_plugged_networks(amphora.compute_id) class PlugNetworks(BaseNetworkTask): """Task to plug the networks. This uses the delta to add all missing networks/nics """ def execute(self, amphora, delta): """Update the amphora networks for the delta.""" LOG.debug("Plug or unplug networks for amphora id: %s", amphora.id) if not delta: LOG.debug("No network deltas for amphora id: %s", amphora.id) return # add nics for nic in delta.add_nics: self.network_driver.plug_network(amphora.compute_id, nic.network_id) def revert(self, amphora, delta, *args, **kwargs): """Handle a failed network plug by removing all nics added.""" LOG.warning("Unable to plug networks for amp id %s", amphora.id) if not delta: return for nic in delta.add_nics: try: self.network_driver.unplug_network(amphora.compute_id, nic.network_id) except base.NetworkNotFound: pass class UnPlugNetworks(BaseNetworkTask): """Task to unplug the networks Loop over all nics and unplug them based on delta """ def execute(self, amphora, delta): """Unplug the networks.""" LOG.debug("Unplug network for amphora") if not delta: LOG.debug("No network deltas for amphora id: %s", amphora.id) return for nic in delta.delete_nics: try: self.network_driver.unplug_network(amphora.compute_id, nic.network_id) except base.NetworkNotFound: LOG.debug("Network %d not found", nic.network_id) except Exception: LOG.exception("Unable to unplug network") # TODO(xgerman) follow up if that makes sense class GetMemberPorts(BaseNetworkTask): def execute(self, loadbalancer, amphora): vip_port = self.network_driver.get_port(loadbalancer.vip.port_id) member_ports = [] interfaces = self.network_driver.get_plugged_networks( amphora.compute_id) for interface in interfaces: port = self.network_driver.get_port(interface.port_id) if vip_port.network_id == port.network_id: continue port.network = self.network_driver.get_network(port.network_id) for fixed_ip in port.fixed_ips: if amphora.lb_network_ip == fixed_ip.ip_address: break fixed_ip.subnet = self.network_driver.get_subnet( fixed_ip.subnet_id) # Only add the port to the list if the IP wasn't the mgmt IP else: member_ports.append(port) return member_ports class HandleNetworkDelta(BaseNetworkTask): """Task to plug and unplug networks Plug or unplug networks based on delta """ def execute(self, amphora, delta): """Handle network plugging based off deltas.""" added_ports = {} added_ports[amphora.id] = [] for nic in delta.add_nics: interface = self.network_driver.plug_network(delta.compute_id, nic.network_id) port = self.network_driver.get_port(interface.port_id) port.network = self.network_driver.get_network(port.network_id) for fixed_ip in port.fixed_ips: fixed_ip.subnet = self.network_driver.get_subnet( fixed_ip.subnet_id) added_ports[amphora.id].append(port) for nic in delta.delete_nics: try: self.network_driver.unplug_network(delta.compute_id, nic.network_id) except base.NetworkNotFound: LOG.debug("Network %d not found ", nic.network_id) except Exception: LOG.exception("Unable to unplug network") return added_ports def revert(self, result, amphora, delta, *args, **kwargs): """Handle a network plug or unplug failures.""" if isinstance(result, failure.Failure): return if not delta: return LOG.warning("Unable to plug networks for amp id %s", delta.amphora_id) for nic in delta.add_nics: try: self.network_driver.unplug_network(delta.compute_id, nic.network_id) except Exception: pass class HandleNetworkDeltas(BaseNetworkTask): """Task to plug and unplug networks Loop through the deltas and plug or unplug networks based on delta """ def execute(self, deltas): """Handle network plugging based off deltas.""" added_ports = {} for amp_id, delta in deltas.items(): added_ports[amp_id] = [] for nic in delta.add_nics: interface = self.network_driver.plug_network(delta.compute_id, nic.network_id) port = self.network_driver.get_port(interface.port_id) port.network = self.network_driver.get_network(port.network_id) for fixed_ip in port.fixed_ips: fixed_ip.subnet = self.network_driver.get_subnet( fixed_ip.subnet_id) added_ports[amp_id].append(port) for nic in delta.delete_nics: try: self.network_driver.unplug_network(delta.compute_id, nic.network_id) except base.NetworkNotFound: LOG.debug("Network %d not found ", nic.network_id) except Exception: LOG.exception("Unable to unplug network") return added_ports def revert(self, result, deltas, *args, **kwargs): """Handle a network plug or unplug failures.""" if isinstance(result, failure.Failure): return for amp_id, delta in deltas.items(): LOG.warning("Unable to plug networks for amp id %s", delta.amphora_id) if not delta: return for nic in delta.add_nics: try: self.network_driver.unplug_network(delta.compute_id, nic.network_id) except base.NetworkNotFound: pass class PlugVIP(BaseNetworkTask): """Task to plumb a VIP.""" def execute(self, loadbalancer): """Plumb a vip to an amphora.""" LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id) amps_data = self.network_driver.plug_vip(loadbalancer, loadbalancer.vip) return amps_data def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to plumb a vip.""" if isinstance(result, failure.Failure): return LOG.warning("Unable to plug VIP for loadbalancer id %s", loadbalancer.id) try: # Make sure we have the current port IDs for cleanup for amp_data in result: for amphora in filter( # pylint: disable=cell-var-from-loop lambda amp: amp.id == amp_data.id, loadbalancer.amphorae): amphora.vrrp_port_id = amp_data.vrrp_port_id amphora.ha_port_id = amp_data.ha_port_id self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip) except Exception as e: LOG.error("Failed to unplug VIP. Resources may still " "be in use from vip: %(vip)s due to error: %(except)s", {'vip': loadbalancer.vip.ip_address, 'except': str(e)}) class UpdateVIPSecurityGroup(BaseNetworkTask): """Task to setup SG for LB.""" def execute(self, loadbalancer_id): """Task to setup SG for LB. Task is idempotent and safe to retry. """ LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer_id) loadbalancer = self.lb_repo.get(db_apis.get_session(), id=loadbalancer_id) return self.network_driver.update_vip_sg(loadbalancer, loadbalancer.vip) class GetSubnetFromVIP(BaseNetworkTask): """Task to plumb a VIP.""" def execute(self, loadbalancer): """Plumb a vip to an amphora.""" LOG.debug("Getting subnet for LB: %s", loadbalancer.id) return self.network_driver.get_subnet(loadbalancer.vip.subnet_id) class PlugVIPAmpphora(BaseNetworkTask): """Task to plumb a VIP.""" def execute(self, loadbalancer, amphora, subnet): """Plumb a vip to an amphora.""" LOG.debug("Plumbing VIP for amphora id: %s", amphora.id) amp_data = self.network_driver.plug_aap_port( loadbalancer, loadbalancer.vip, amphora, subnet) return amp_data def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): """Handle a failure to plumb a vip.""" if isinstance(result, failure.Failure): return LOG.warning("Unable to plug VIP for amphora id %s " "load balancer id %s", amphora.id, loadbalancer.id) try: amphora.vrrp_port_id = result.vrrp_port_id amphora.ha_port_id = result.ha_port_id self.network_driver.unplug_aap_port(loadbalancer.vip, amphora, subnet) except Exception as e: LOG.error('Failed to unplug AAP port. Resources may still be in ' 'use for VIP: %s due to error: %s', loadbalancer.vip, str(e)) class UnplugVIP(BaseNetworkTask): """Task to unplug the vip.""" def execute(self, loadbalancer): """Unplug the vip.""" LOG.debug("Unplug vip on amphora") try: self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip) except Exception: LOG.exception("Unable to unplug vip from load balancer %s", loadbalancer.id) class AllocateVIP(BaseNetworkTask): """Task to allocate a VIP.""" def execute(self, loadbalancer): """Allocate a vip to the loadbalancer.""" LOG.debug("Allocate_vip port_id %s, subnet_id %s," "ip_address %s", loadbalancer.vip.port_id, loadbalancer.vip.subnet_id, loadbalancer.vip.ip_address) return self.network_driver.allocate_vip(loadbalancer) def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to allocate vip.""" if isinstance(result, failure.Failure): LOG.exception("Unable to allocate VIP") return vip = result LOG.warning("Deallocating vip %s", vip.ip_address) try: self.network_driver.deallocate_vip(vip) except Exception as e: LOG.error("Failed to deallocate VIP. Resources may still " "be in use from vip: %(vip)s due to error: %(except)s", {'vip': vip.ip_address, 'except': str(e)}) class AllocateVIPforFailover(AllocateVIP): """Task to allocate/validate the VIP for a failover flow.""" def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to allocate vip.""" if isinstance(result, failure.Failure): LOG.exception("Unable to allocate VIP") return vip = result LOG.info("Failover revert is not deallocating vip %s because this is " "a failover.", vip.ip_address) class DeallocateVIP(BaseNetworkTask): """Task to deallocate a VIP.""" def execute(self, loadbalancer): """Deallocate a VIP.""" LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address) # NOTE(blogan): this is kind of ugly but sufficient for now. Drivers # will need access to the load balancer that the vip is/was attached # to. However the data model serialization for the vip does not give a # backref to the loadbalancer if accessed through the loadbalancer. vip = loadbalancer.vip vip.load_balancer = loadbalancer self.network_driver.deallocate_vip(vip) class UpdateVIP(BaseNetworkTask): """Task to update a VIP.""" def execute(self, loadbalancer): LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id) self.network_driver.update_vip(loadbalancer) class UpdateVIPForDelete(BaseNetworkTask): """Task to update a VIP for listener delete flows.""" def execute(self, loadbalancer): LOG.debug("Updating VIP for listener delete on load_balancer %s.", loadbalancer.id) self.network_driver.update_vip(loadbalancer, for_delete=True) class GetAmphoraNetworkConfigs(BaseNetworkTask): """Task to retrieve amphora network details.""" def execute(self, loadbalancer, amphora=None): LOG.debug("Retrieving vip network details.") return self.network_driver.get_network_configs(loadbalancer, amphora=amphora) class GetAmphoraNetworkConfigsByID(BaseNetworkTask): """Task to retrieve amphora network details.""" def execute(self, loadbalancer_id, amphora_id=None): LOG.debug("Retrieving vip network details.") amp_repo = repositories.AmphoraRepository() loadbalancer = self.lb_repo.get(db_apis.get_session(), id=loadbalancer_id) amphora = amp_repo.get(db_apis.get_session(), id=amphora_id) return self.network_driver.get_network_configs(loadbalancer, amphora=amphora) class GetAmphoraeNetworkConfigs(BaseNetworkTask): """Task to retrieve amphorae network details.""" def execute(self, loadbalancer_id): LOG.debug("Retrieving vip network details.") loadbalancer = self.lb_repo.get(db_apis.get_session(), id=loadbalancer_id) return self.network_driver.get_network_configs(loadbalancer) class FailoverPreparationForAmphora(BaseNetworkTask): """Task to prepare an amphora for failover.""" def execute(self, amphora): LOG.debug("Prepare amphora %s for failover.", amphora.id) self.network_driver.failover_preparation(amphora) class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask): """Task retrieving all the port ids on an amphora, except lb network.""" def execute(self, amphora): LOG.debug("Retrieve all but the lb network port id on amphora %s.", amphora.id) interfaces = self.network_driver.get_plugged_networks( compute_id=amphora.compute_id) ports = [] for interface_ in interfaces: if interface_.port_id not in ports: port = self.network_driver.get_port(port_id=interface_.port_id) ips = port.fixed_ips lb_network = False for ip in ips: if ip.ip_address == amphora.lb_network_ip: lb_network = True if not lb_network: ports.append(port) return ports class PlugPorts(BaseNetworkTask): """Task to plug neutron ports into a compute instance.""" def execute(self, amphora, ports): for port in ports: LOG.debug('Plugging port ID: %(port_id)s into compute instance: ' '%(compute_id)s.', {'port_id': port.id, 'compute_id': amphora.compute_id}) self.network_driver.plug_port(amphora, port) class ApplyQos(BaseNetworkTask): """Apply Quality of Services to the VIP""" def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id, is_revert=False, request_qos_id=None): """Call network driver to apply QoS Policy on the vrrp ports.""" if not amps_data: amps_data = loadbalancer.amphorae apply_qos = ApplyQosAmphora() for amp_data in amps_data: apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id) def execute(self, loadbalancer, amps_data=None, update_dict=None): """Apply qos policy on the vrrp ports which are related with vip.""" qos_policy_id = loadbalancer.vip.qos_policy_id if not qos_policy_id and ( not update_dict or ( 'vip' not in update_dict or 'qos_policy_id' not in update_dict['vip'])): return self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id) def revert(self, result, loadbalancer, amps_data=None, update_dict=None, *args, **kwargs): """Handle a failure to apply QoS to VIP""" request_qos_id = loadbalancer.vip.qos_policy_id orig_lb = self.task_utils.get_current_loadbalancer_from_db( loadbalancer.id) orig_qos_id = orig_lb.vip.qos_policy_id if request_qos_id != orig_qos_id: self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id, is_revert=True, request_qos_id=request_qos_id) class ApplyQosAmphora(BaseNetworkTask): """Apply Quality of Services to the VIP""" def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id, is_revert=False, request_qos_id=None): """Call network driver to apply QoS Policy on the vrrp ports.""" try: self.network_driver.apply_qos_on_port(qos_policy_id, amp_data.vrrp_port_id) except Exception: if not is_revert: raise LOG.warning('Failed to undo qos policy %(qos_id)s ' 'on vrrp port: %(port)s from ' 'amphorae: %(amp)s', {'qos_id': request_qos_id, 'port': amp_data.vrrp_port_id, 'amp': [amp.id for amp in amp_data]}) def execute(self, loadbalancer, amp_data=None, update_dict=None): """Apply qos policy on the vrrp ports which are related with vip.""" qos_policy_id = loadbalancer.vip.qos_policy_id if not qos_policy_id and ( update_dict and ( 'vip' not in update_dict or 'qos_policy_id' not in update_dict['vip'])): return self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id) def revert(self, result, loadbalancer, amp_data=None, update_dict=None, *args, **kwargs): """Handle a failure to apply QoS to VIP""" try: request_qos_id = loadbalancer.vip.qos_policy_id orig_lb = self.task_utils.get_current_loadbalancer_from_db( loadbalancer.id) orig_qos_id = orig_lb.vip.qos_policy_id if request_qos_id != orig_qos_id: self._apply_qos_on_vrrp_port(loadbalancer, amp_data, orig_qos_id, is_revert=True, request_qos_id=request_qos_id) except Exception as e: LOG.error('Failed to remove QoS policy: %s from port: %s due ' 'to error: %s', orig_qos_id, amp_data.vrrp_port_id, str(e)) class DeletePort(BaseNetworkTask): """Task to delete a network port.""" @tenacity.retry(retry=tenacity.retry_if_exception_type(), stop=tenacity.stop_after_attempt( CONF.networking.max_retries), wait=tenacity.wait_exponential( multiplier=CONF.networking.retry_backoff, min=CONF.networking.retry_interval, max=CONF.networking.retry_max), reraise=True) def execute(self, port_id, passive_failure=False): """Delete the network port.""" if port_id is None: return if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: LOG.debug("Deleting network port %s", port_id) else: LOG.warning('Retrying network port %s delete attempt %s of %s.', port_id, self.execute.retry.statistics[ constants.ATTEMPT_NUMBER], self.execute.retry.stop.max_attempt_number) # Let the Taskflow engine know we are working and alive # Don't use get with a default for 'attempt_number', we need to fail # if that number is missing. self.update_progress( self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / self.execute.retry.stop.max_attempt_number) try: self.network_driver.delete_port(port_id) except Exception: if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != self.execute.retry.stop.max_attempt_number): LOG.warning('Network port delete for port id: %s failed. ' 'Retrying.', port_id) raise if passive_failure: LOG.exception('Network port delete for port ID: %s failed. ' 'This resource will be abandoned and should ' 'manually be cleaned up once the ' 'network service is functional.', port_id) # Let's at least attempt to disable it so if the instance # comes back from the dead it doesn't conflict with anything. try: self.network_driver.admin_down_port(port_id) LOG.info('Successfully disabled (admin down) network port ' '%s that failed to delete.', port_id) except Exception: LOG.warning('Attempt to disable (admin down) network port ' '%s failed. The network service has failed. ' 'Continuing.', port_id) else: LOG.exception('Network port delete for port ID: %s failed. ' 'The network service has failed. ' 'Aborting and reverting.', port_id) raise class CreateVIPBasePort(BaseNetworkTask): """Task to create the VIP base port for an amphora.""" @tenacity.retry(retry=tenacity.retry_if_exception_type(), stop=tenacity.stop_after_attempt( CONF.networking.max_retries), wait=tenacity.wait_exponential( multiplier=CONF.networking.retry_backoff, min=CONF.networking.retry_interval, max=CONF.networking.retry_max), reraise=True) def execute(self, vip, vip_sg_id, amphora_id): port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id fixed_ips = [{constants.SUBNET_ID: vip.subnet_id}] sg_id = [] if vip_sg_id: sg_id = [vip_sg_id] port = self.network_driver.create_port( vip.network_id, name=port_name, fixed_ips=fixed_ips, secondary_ips=[vip.ip_address], security_group_ids=sg_id, qos_policy_id=vip.qos_policy_id) LOG.info('Created port %s with ID %s for amphora %s', port_name, port.id, amphora_id) return port def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs): if isinstance(result, failure.Failure): return try: port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id for port in result: self.network_driver.delete_port(port.id) LOG.info('Deleted port %s with ID %s for amphora %s due to a ' 'revert.', port_name, port.id, amphora_id) except Exception as e: LOG.error('Failed to delete port %s. Resources may still be in ' 'use for a port intended for amphora %s due to error ' '%s. Search for a port named %s', result, amphora_id, str(e), port_name) class AdminDownPort(BaseNetworkTask): def execute(self, port_id): try: self.network_driver.set_port_admin_state_up(port_id, False) except base.PortNotFound: return for i in range(CONF.networking.max_retries): port = self.network_driver.get_port(port_id) if port.status == constants.DOWN: LOG.debug('Disabled port: %s', port_id) return LOG.debug('Port %s is %s instead of DOWN, waiting.', port_id, port.status) time.sleep(CONF.networking.retry_interval) LOG.error('Port %s failed to go DOWN. Port status is still %s. ' 'Ignoring and continuing.', port_id, port.status) def revert(self, result, port_id, *args, **kwargs): if isinstance(result, failure.Failure): return try: self.network_driver.set_port_admin_state_up(port_id, True) except Exception as e: LOG.error('Failed to bring port %s admin up on revert due to: %s.', port_id, str(e)) class GetVIPSecurityGroupID(BaseNetworkTask): def execute(self, loadbalancer_id): sg_name = utils.get_vip_security_group_name(loadbalancer_id) try: security_group = self.network_driver.get_security_group(sg_name) if security_group: return security_group.id except base.SecurityGroupNotFound: with excutils.save_and_reraise_exception() as ctxt: if self.network_driver.sec_grp_enabled: LOG.error('VIP security group %s was not found.', sg_name) else: ctxt.reraise = False return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v1/tasks/retry_tasks.py0000664000175000017500000000650100000000000024715 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from taskflow import retry LOG = logging.getLogger(__name__) class SleepingRetryTimesController(retry.Times): """A retry controller to attempt subflow retries a number of times. This retry controller overrides the Times on_failure to inject a sleep interval between retries. It also adds a log message when all of the retries are exhausted. :param attempts: number of attempts to retry the associated subflow before giving up :type attempts: int :param name: Meaningful name for this atom, should be something that is distinguishable and understandable for notification, debugging, storing and any other similar purposes. :param provides: A set, string or list of items that this will be providing (or could provide) to others, used to correlate and associate the thing/s this atom produces, if it produces anything at all. :param requires: A set or list of required inputs for this atom's ``execute`` method. :param rebind: A dict of key/value pairs used to define argument name conversions for inputs to this atom's ``execute`` method. :param revert_all: when provided this will cause the full flow to revert when the number of attempts that have been tried has been reached (when false, it will only locally revert the associated subflow) :type revert_all: bool :param interval: Interval, in seconds, between retry attempts. :type interval: int """ def __init__(self, attempts=1, name=None, provides=None, requires=None, auto_extract=True, rebind=None, revert_all=False, interval=1): super(SleepingRetryTimesController, self).__init__( attempts, name, provides, requires, auto_extract, rebind, revert_all) self._interval = interval def on_failure(self, history, *args, **kwargs): if len(history) < self._attempts: LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and ' 'retrying.', self.name[self.name.startswith('retry-') and len('retry-'):], len(history), self._attempts, self._interval) time.sleep(self._interval) return retry.RETRY return self._revert_action def revert(self, history, *args, **kwargs): LOG.error('%s retries with interval %s seconds have failed for %s. ' 'Giving up.', len(history), self._interval, self.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3902166 octavia-6.2.2/octavia/controller/worker/v2/0000775000175000017500000000000000000000000020643 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/__init__.py0000664000175000017500000000107400000000000022756 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/controller_worker.py0000664000175000017500000014152100000000000024775 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from octavia_lib.common import constants as lib_consts from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy.orm import exc as db_exceptions from stevedore import driver as stevedore_driver import tenacity from octavia.api.drivers import utils as provider_utils from octavia.common import base_taskflow from octavia.common import constants from octavia.common import exceptions from octavia.common import utils from octavia.controller.worker.v2.flows import flow_utils from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver from octavia.db import api as db_apis from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) def _is_provisioning_status_pending_update(lb_obj): return not lb_obj.provisioning_status == constants.PENDING_UPDATE class ControllerWorker(object): def __init__(self): self._amphora_repo = repo.AmphoraRepository() self._amphora_health_repo = repo.AmphoraHealthRepository() self._health_mon_repo = repo.HealthMonitorRepository() self._lb_repo = repo.LoadBalancerRepository() self._listener_repo = repo.ListenerRepository() self._member_repo = repo.MemberRepository() self._pool_repo = repo.PoolRepository() self._l7policy_repo = repo.L7PolicyRepository() self._l7rule_repo = repo.L7RuleRepository() self._flavor_repo = repo.FlavorRepository() self._az_repo = repo.AvailabilityZoneRepository() persistence = tsk_driver.MysqlPersistenceDriver() self.jobboard_driver = stevedore_driver.DriverManager( namespace='octavia.worker.jobboard_driver', name=CONF.task_flow.jobboard_backend_driver, invoke_args=(persistence,), invoke_on_load=True).driver @tenacity.retry( retry=( tenacity.retry_if_result(_is_provisioning_status_pending_update) | tenacity.retry_if_exception_type()), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def _get_db_obj_until_pending_update(self, repo, id): return repo.get(db_apis.get_session(), id=id) @property def services_controller(self): return base_taskflow.TaskFlowServiceController(self.jobboard_driver) def create_amphora(self, availability_zone=None): """Creates an Amphora. This is used to create spare amphora. :returns: uuid """ try: store = {constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_SPARES_POOL_PRIORITY, constants.FLAVOR: None, constants.SERVER_GROUP_ID: None, constants.AVAILABILITY_ZONE: None} if availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), availability_zone)) job_id = self.services_controller.run_poster( flow_utils.get_create_amphora_flow, store=store, wait=True) return job_id except Exception as e: LOG.error('Failed to create an amphora due to: %s', str(e)) def delete_amphora(self, amphora_id): """Deletes an existing Amphora. :param amphora_id: ID of the amphora to delete :returns: None :raises AmphoraNotFound: The referenced Amphora was not found """ amphora = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) store = {constants.AMPHORA: amphora.to_dict()} self.services_controller.run_poster( flow_utils.get_delete_amphora_flow, store=store) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_health_monitor(self, health_monitor): """Creates a health monitor. :param health_monitor: Provider health monitor dict :returns: None :raises NoResultFound: Unable to find the object """ db_health_monitor = self._health_mon_repo.get( db_apis.get_session(), id=health_monitor[constants.HEALTHMONITOR_ID]) if not db_health_monitor: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'healthmonitor', health_monitor[constants.HEALTHMONITOR_ID]) raise db_exceptions.NoResultFound pool = db_health_monitor.pool pool.health_monitor = db_health_monitor load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) store = {constants.HEALTH_MON: health_monitor, constants.POOL_ID: pool.id, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb} self.services_controller.run_poster( flow_utils.get_create_health_monitor_flow, store=store) def delete_health_monitor(self, health_monitor): """Deletes a health monitor. :param health_monitor: Provider health monitor dict :returns: None :raises HMNotFound: The referenced health monitor was not found """ db_health_monitor = self._health_mon_repo.get( db_apis.get_session(), id=health_monitor[constants.HEALTHMONITOR_ID]) pool = db_health_monitor.pool load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) store = {constants.HEALTH_MON: health_monitor, constants.POOL_ID: pool.id, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.PROJECT_ID: load_balancer.project_id} self.services_controller.run_poster( flow_utils.get_delete_health_monitor_flow, store=store) def update_health_monitor(self, original_health_monitor, health_monitor_updates): """Updates a health monitor. :param original_health_monitor: Provider health monitor dict :param health_monitor_updates: Dict containing updated health monitor :returns: None :raises HMNotFound: The referenced health monitor was not found """ try: db_health_monitor = self._get_db_obj_until_pending_update( self._health_mon_repo, original_health_monitor[constants.HEALTHMONITOR_ID]) except tenacity.RetryError as e: LOG.warning('Health monitor did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) db_health_monitor = e.last_attempt.result() pool = db_health_monitor.pool listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() store = {constants.HEALTH_MON: original_health_monitor, constants.POOL_ID: pool.id, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.UPDATE_DICT: health_monitor_updates} self.services_controller.run_poster( flow_utils.get_update_health_monitor_flow, store=store) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_listener(self, listener): """Creates a listener. :param listener: A listener provider dictionary. :returns: None :raises NoResultFound: Unable to find the object """ db_listener = self._listener_repo.get( db_apis.get_session(), id=listener[constants.LISTENER_ID]) if not db_listener: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'listener', listener[constants.LISTENER_ID]) raise db_exceptions.NoResultFound load_balancer = db_listener.load_balancer listeners = load_balancer.listeners dict_listeners = [] for li in listeners: dict_listeners.append( provider_utils.db_listener_to_provider_listener(li).to_dict()) provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() store = {constants.LISTENERS: dict_listeners, constants.LOADBALANCER: provider_lb, constants.LOADBALANCER_ID: load_balancer.id} self.services_controller.run_poster( flow_utils.get_create_listener_flow, store=store) def delete_listener(self, listener): """Deletes a listener. :param listener: A listener provider dictionary to delete :returns: None :raises ListenerNotFound: The referenced listener was not found """ store = {constants.LISTENER: listener, constants.LOADBALANCER_ID: listener[constants.LOADBALANCER_ID], constants.PROJECT_ID: listener[constants.PROJECT_ID]} self.services_controller.run_poster( flow_utils.get_delete_listener_flow, store=store) def update_listener(self, listener, listener_updates): """Updates a listener. :param listener: A listener provider dictionary to update :param listener_updates: Dict containing updated listener attributes :returns: None :raises ListenerNotFound: The referenced listener was not found """ db_lb = self._lb_repo.get(db_apis.get_session(), id=listener[constants.LOADBALANCER_ID]) store = {constants.LISTENER: listener, constants.UPDATE_DICT: listener_updates, constants.LOADBALANCER_ID: db_lb.id, constants.LISTENERS: [listener]} self.services_controller.run_poster( flow_utils.get_update_listener_flow, store=store) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_load_balancer(self, loadbalancer, flavor=None, availability_zone=None): """Creates a load balancer by allocating Amphorae. First tries to allocate an existing Amphora in READY state. If none are available it will attempt to build one specifically for this load balancer. :param loadbalancer: The dict of load balancer to create :returns: None :raises NoResultFound: Unable to find the object """ lb = self._lb_repo.get(db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) if not lb: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'load_balancer', loadbalancer[constants.LOADBALANCER_ID]) raise db_exceptions.NoResultFound store = {lib_consts.LOADBALANCER_ID: loadbalancer[lib_consts.LOADBALANCER_ID], constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, lib_consts.FLAVOR: flavor, lib_consts.AVAILABILITY_ZONE: availability_zone} topology = lb.topology if (not CONF.nova.enable_anti_affinity or topology == constants.TOPOLOGY_SINGLE): store[constants.SERVER_GROUP_ID] = None listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( lb.listeners) ) store[constants.UPDATE_DICT] = { constants.TOPOLOGY: topology } self.services_controller.run_poster( flow_utils.get_create_load_balancer_flow, topology, listeners=listeners_dicts, store=store) def delete_load_balancer(self, load_balancer, cascade=False): """Deletes a load balancer by de-allocating Amphorae. :param load_balancer: Dict of the load balancer to delete :returns: None :raises LBNotFound: The referenced load balancer was not found """ loadbalancer_id = load_balancer[constants.LOADBALANCER_ID] db_lb = self._lb_repo.get(db_apis.get_session(), id=loadbalancer_id) store = {constants.LOADBALANCER: load_balancer, constants.LOADBALANCER_ID: loadbalancer_id, constants.SERVER_GROUP_ID: db_lb.server_group_id, constants.PROJECT_ID: db_lb.project_id} if cascade: listeners = flow_utils.get_listeners_on_lb(db_lb) pools = flow_utils.get_pools_on_lb(db_lb) self.services_controller.run_poster( flow_utils.get_cascade_delete_load_balancer_flow, load_balancer, listeners, pools, store=store) else: self.services_controller.run_poster( flow_utils.get_delete_load_balancer_flow, load_balancer, store=store) def update_load_balancer(self, original_load_balancer, load_balancer_updates): """Updates a load balancer. :param original_load_balancer: Dict of the load balancer to update :param load_balancer_updates: Dict containing updated load balancer :returns: None :raises LBNotFound: The referenced load balancer was not found """ store = {constants.LOADBALANCER: original_load_balancer, constants.LOADBALANCER_ID: original_load_balancer[constants.LOADBALANCER_ID], constants.UPDATE_DICT: load_balancer_updates} self.services_controller.run_poster( flow_utils.get_update_load_balancer_flow, store=store) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_member(self, member): """Creates a pool member. :param member: A member provider dictionary to create :returns: None :raises NoSuitablePool: Unable to find the node pool """ db_member = self._member_repo.get(db_apis.get_session(), id=member[constants.MEMBER_ID]) if not db_member: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'l7member', member[constants.MEMBER_ID]) raise db_exceptions.NoResultFound pool = db_member.pool load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) store = { constants.MEMBER: member, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.POOL_ID: pool.id} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} self.services_controller.run_poster( flow_utils.get_create_member_flow, store=store) def delete_member(self, member): """Deletes a pool member. :param member: A member provider dictionary to delete :returns: None :raises MemberNotFound: The referenced member was not found """ pool = self._pool_repo.get(db_apis.get_session(), id=member[constants.POOL_ID]) load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) store = { constants.MEMBER: member, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.POOL_ID: pool.id, constants.PROJECT_ID: load_balancer.project_id} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} self.services_controller.run_poster( flow_utils.get_delete_member_flow, store=store) def batch_update_members(self, old_members, new_members, updated_members): updated_members = [ (provider_utils.db_member_to_provider_member( self._member_repo.get(db_apis.get_session(), id=m.get(constants.ID))).to_dict(), m) for m in updated_members] provider_old_members = [ provider_utils.db_member_to_provider_member( self._member_repo.get(db_apis.get_session(), id=m.get(constants.ID))).to_dict() for m in old_members] if old_members: pool = self._pool_repo.get(db_apis.get_session(), id=old_members[0][constants.POOL_ID]) elif new_members: pool = self._pool_repo.get(db_apis.get_session(), id=new_members[0][constants.POOL_ID]) else: pool = self._pool_repo.get( db_apis.get_session(), id=updated_members[0][0][constants.POOL_ID]) load_balancer = pool.load_balancer listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() store = { constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.POOL_ID: pool.id, constants.PROJECT_ID: load_balancer.project_id} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} self.services_controller.run_poster( flow_utils.get_batch_update_members_flow, provider_old_members, new_members, updated_members, store=store) def update_member(self, member, member_updates): """Updates a pool member. :param member_id: A member provider dictionary to update :param member_updates: Dict containing updated member attributes :returns: None :raises MemberNotFound: The referenced member was not found """ # TODO(ataraday) when other flows will use dicts - revisit this pool = self._pool_repo.get(db_apis.get_session(), id=member[constants.POOL_ID]) load_balancer = pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( pool.listeners)) store = { constants.MEMBER: member, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb, constants.POOL_ID: pool.id, constants.UPDATE_DICT: member_updates} if load_balancer.availability_zone: store[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), load_balancer.availability_zone)) else: store[constants.AVAILABILITY_ZONE] = {} self.services_controller.run_poster( flow_utils.get_update_member_flow, store=store) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_pool(self, pool): """Creates a node pool. :param pool: Provider pool dict to create :returns: None :raises NoResultFound: Unable to find the object """ # TODO(ataraday) It seems we need to get db pool here anyway to get # proper listeners db_pool = self._pool_repo.get(db_apis.get_session(), id=pool[constants.POOL_ID]) if not db_pool: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'pool', pool[constants.POOL_ID]) raise db_exceptions.NoResultFound load_balancer = db_pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( db_pool.listeners)) store = {constants.POOL_ID: pool[constants.POOL_ID], constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id, constants.LOADBALANCER: provider_lb} self.services_controller.run_poster( flow_utils.get_create_pool_flow, store=store) def delete_pool(self, pool): """Deletes a node pool. :param pool: Provider pool dict to delete :returns: None :raises PoolNotFound: The referenced pool was not found """ db_pool = self._pool_repo.get(db_apis.get_session(), id=pool[constants.POOL_ID]) listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( db_pool.listeners)) load_balancer = db_pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() store = {constants.POOL_ID: pool[constants.POOL_ID], constants.LISTENERS: listeners_dicts, constants.LOADBALANCER: provider_lb, constants.LOADBALANCER_ID: load_balancer.id, constants.PROJECT_ID: db_pool.project_id} self.services_controller.run_poster( flow_utils.get_delete_pool_flow, store=store) def update_pool(self, origin_pool, pool_updates): """Updates a node pool. :param origin_pool: Provider pool dict to update :param pool_updates: Dict containing updated pool attributes :returns: None :raises PoolNotFound: The referenced pool was not found """ try: db_pool = self._get_db_obj_until_pending_update( self._pool_repo, origin_pool[constants.POOL_ID]) except tenacity.RetryError as e: LOG.warning('Pool did not go into %s in 60 seconds. ' 'This either due to an in-progress Octavia upgrade ' 'or an overloaded and failing database. Assuming ' 'an upgrade is in progress and continuing.', constants.PENDING_UPDATE) db_pool = e.last_attempt.result() load_balancer = db_pool.load_balancer provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( load_balancer).to_dict() listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( db_pool.listeners)) store = {constants.POOL_ID: db_pool.id, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER: provider_lb, constants.LOADBALANCER_ID: load_balancer.id, constants.UPDATE_DICT: pool_updates} self.services_controller.run_poster( flow_utils.get_update_pool_flow, store=store) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_l7policy(self, l7policy): """Creates an L7 Policy. :param l7policy: Provider dict of the l7policy to create :returns: None :raises NoResultFound: Unable to find the object """ db_l7policy = self._l7policy_repo.get( db_apis.get_session(), id=l7policy[constants.L7POLICY_ID]) if not db_l7policy: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'l7policy', l7policy[constants.L7POLICY_ID]) raise db_exceptions.NoResultFound db_listener = db_l7policy.listener listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_listener])) store = {constants.L7POLICY: l7policy, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: db_listener.load_balancer.id } self.services_controller.run_poster( flow_utils.get_create_l7policy_flow, store=store) def delete_l7policy(self, l7policy): """Deletes an L7 policy. :param l7policy: Provider dict of the l7policy to delete :returns: None :raises L7PolicyNotFound: The referenced l7policy was not found """ db_listener = self._listener_repo.get( db_apis.get_session(), id=l7policy[constants.LISTENER_ID]) listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_listener])) store = {constants.L7POLICY: l7policy, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: db_listener.load_balancer.id } self.services_controller.run_poster( flow_utils.get_delete_l7policy_flow, store=store) def update_l7policy(self, original_l7policy, l7policy_updates): """Updates an L7 policy. :param l7policy: Provider dict of the l7policy to update :param l7policy_updates: Dict containing updated l7policy attributes :returns: None :raises L7PolicyNotFound: The referenced l7policy was not found """ db_listener = self._listener_repo.get( db_apis.get_session(), id=original_l7policy[constants.LISTENER_ID]) listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_listener])) store = {constants.L7POLICY: original_l7policy, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: db_listener.load_balancer.id, constants.UPDATE_DICT: l7policy_updates} self.services_controller.run_poster( flow_utils.get_update_l7policy_flow, store=store) @tenacity.retry( retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), wait=tenacity.wait_incrementing( CONF.haproxy_amphora.api_db_commit_retry_initial_delay, CONF.haproxy_amphora.api_db_commit_retry_backoff, CONF.haproxy_amphora.api_db_commit_retry_max), stop=tenacity.stop_after_attempt( CONF.haproxy_amphora.api_db_commit_retry_attempts)) def create_l7rule(self, l7rule): """Creates an L7 Rule. :param l7rule: Provider dict l7rule :returns: None :raises NoResultFound: Unable to find the object """ db_l7rule = self._l7rule_repo.get(db_apis.get_session(), id=l7rule[constants.L7RULE_ID]) if not db_l7rule: LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' '60 seconds.', 'l7rule', l7rule[constants.L7RULE_ID]) raise db_exceptions.NoResultFound db_l7policy = db_l7rule.l7policy load_balancer = db_l7policy.listener.load_balancer listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_l7policy.listener])) l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy( db_l7policy) store = {constants.L7RULE: l7rule, constants.L7POLICY: l7policy_dict.to_dict(), constants.L7POLICY_ID: db_l7policy.id, constants.LISTENERS: listeners_dicts, constants.LOADBALANCER_ID: load_balancer.id } self.services_controller.run_poster( flow_utils.get_create_l7rule_flow, store=store) def delete_l7rule(self, l7rule): """Deletes an L7 rule. :param l7rule: Provider dict of the l7rule to delete :returns: None :raises L7RuleNotFound: The referenced l7rule was not found """ db_l7policy = self._l7policy_repo.get(db_apis.get_session(), id=l7rule[constants.L7POLICY_ID]) l7policy = provider_utils.db_l7policy_to_provider_l7policy(db_l7policy) load_balancer = db_l7policy.listener.load_balancer listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_l7policy.listener])) store = {constants.L7RULE: l7rule, constants.L7POLICY: l7policy.to_dict(), constants.LISTENERS: listeners_dicts, constants.L7POLICY_ID: db_l7policy.id, constants.LOADBALANCER_ID: load_balancer.id } self.services_controller.run_poster( flow_utils.get_delete_l7rule_flow, store=store) def update_l7rule(self, original_l7rule, l7rule_updates): """Updates an L7 rule. :param l7rule: Origin dict of the l7rule to update :param l7rule_updates: Dict containing updated l7rule attributes :returns: None :raises L7RuleNotFound: The referenced l7rule was not found """ db_l7policy = self._l7policy_repo.get( db_apis.get_session(), id=original_l7rule[constants.L7POLICY_ID]) load_balancer = db_l7policy.listener.load_balancer listeners_dicts = ( provider_utils.db_listeners_to_provider_dicts_list_of_dicts( [db_l7policy.listener])) l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy( db_l7policy) store = {constants.L7RULE: original_l7rule, constants.L7POLICY: l7policy_dict.to_dict(), constants.LISTENERS: listeners_dicts, constants.L7POLICY_ID: db_l7policy.id, constants.LOADBALANCER_ID: load_balancer.id, constants.UPDATE_DICT: l7rule_updates} self.services_controller.run_poster( flow_utils.get_update_l7rule_flow, store=store) def failover_amphora(self, amphora_id): """Perform failover operations for an amphora. Note: This expects the load balancer to already be in provisioning_status=PENDING_UPDATE state. :param amphora_id: ID for amphora to failover :returns: None :raises octavia.common.exceptions.NotFound: The referenced amphora was not found """ amphora = None try: amphora = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) if amphora is None: LOG.error('Amphora failover for amphora %s failed because ' 'there is no record of this amphora in the ' 'database. Check that the [house_keeping] ' 'amphora_expiry_age configuration setting is not ' 'too short. Skipping failover.', amphora_id) raise exceptions.NotFound(resource=constants.AMPHORA, id=amphora_id) if amphora.status == constants.DELETED: LOG.warning('Amphora %s is marked DELETED in the database but ' 'was submitted for failover. Deleting it from the ' 'amphora health table to exclude it from health ' 'checks and skipping the failover.', amphora.id) self._amphora_health_repo.delete(db_apis.get_session(), amphora_id=amphora.id) return loadbalancer = None if amphora.load_balancer_id: loadbalancer = self._lb_repo.get(db_apis.get_session(), id=amphora.load_balancer_id) lb_amp_count = None if loadbalancer: if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: lb_amp_count = 2 elif loadbalancer.topology == constants.TOPOLOGY_SINGLE: lb_amp_count = 1 az_metadata = {} flavor_dict = {} lb_id = None vip_dict = {} server_group_id = None if loadbalancer: lb_id = loadbalancer.id # Even if the LB doesn't have a flavor, create one and # pass through the topology. if loadbalancer.flavor_id: flavor_dict = self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), loadbalancer.flavor_id) flavor_dict[constants.LOADBALANCER_TOPOLOGY] = ( loadbalancer.topology) else: flavor_dict = {constants.LOADBALANCER_TOPOLOGY: loadbalancer.topology} if loadbalancer.availability_zone: az_metadata = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), loadbalancer.availability_zone)) vip_dict = loadbalancer.vip.to_dict() server_group_id = loadbalancer.server_group_id provider_lb_dict = (provider_utils. db_loadbalancer_to_provider_loadbalancer)( loadbalancer).to_dict() if loadbalancer else loadbalancer stored_params = {constants.AVAILABILITY_ZONE: az_metadata, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, constants.FLAVOR: flavor_dict, constants.LOADBALANCER: provider_lb_dict, constants.SERVER_GROUP_ID: server_group_id, constants.LOADBALANCER_ID: lb_id, constants.VIP: vip_dict, constants.AMPHORA_ID: amphora_id} self.services_controller.run_poster( flow_utils.get_failover_amphora_flow, amphora.to_dict(), lb_amp_count, store=stored_params, wait=True) LOG.info("Successfully completed the failover for an amphora: %s", {"id": amphora_id, "load_balancer_id": lb_id, "lb_network_ip": amphora.lb_network_ip, "compute_id": amphora.compute_id, "role": amphora.role}) except Exception as e: with excutils.save_and_reraise_exception(reraise=False): LOG.exception("Amphora %s failover exception: %s", amphora_id, str(e)) self._amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) if amphora and amphora.load_balancer_id: self._lb_repo.update( db_apis.get_session(), amphora.load_balancer_id, provisioning_status=constants.ERROR) @staticmethod def _get_amphorae_for_failover(load_balancer): """Returns an ordered list of amphora to failover. :param load_balancer: The load balancer being failed over. :returns: An ordered list of amphora to failover, first amp to failover is last in the list :raises octavia.common.exceptions.InvalidTopology: LB has an unknown topology. """ if load_balancer.topology == constants.TOPOLOGY_SINGLE: # In SINGLE topology, amp failover order does not matter return [a.to_dict() for a in load_balancer.amphorae if a.status != constants.DELETED] if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: # In Active/Standby we should preference the standby amp # for failover first in case the Active is still able to pass # traffic. # Note: The active amp can switch at any time and in less than a # second, so this is "best effort". amphora_driver = utils.get_amphora_driver() timeout_dict = { constants.CONN_MAX_RETRIES: CONF.haproxy_amphora.failover_connection_max_retries, constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.failover_connection_retry_interval} amps = [] selected_amp = None for amp in load_balancer.amphorae: if amp.status == constants.DELETED: continue if selected_amp is None: try: if amphora_driver.get_interface_from_ip( amp, load_balancer.vip.ip_address, timeout_dict): # This is a potential ACTIVE, add it to the list amps.append(amp.to_dict()) else: # This one doesn't have the VIP IP, so start # failovers here. selected_amp = amp LOG.debug("Selected amphora %s as the initial " "failover amphora.", amp.id) except Exception: # This amphora is broken, so start failovers here. selected_amp = amp else: # We have already found a STANDBY, so add the rest to the # list without querying them. amps.append(amp.to_dict()) # Put the selected amphora at the end of the list so it is # first to failover. if selected_amp: amps.append(selected_amp.to_dict()) return amps LOG.error('Unknown load balancer topology found: %s, aborting ' 'failover.', load_balancer.topology) raise exceptions.InvalidTopology(topology=load_balancer.topology) def failover_loadbalancer(self, load_balancer_id): """Perform failover operations for a load balancer. Note: This expects the load balancer to already be in provisioning_status=PENDING_UPDATE state. :param load_balancer_id: ID for load balancer to failover :returns: None :raises octavia.commom.exceptions.NotFound: The load balancer was not found. """ try: lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id) if lb is None: raise exceptions.NotFound(resource=constants.LOADBALANCER, id=load_balancer_id) # Get the ordered list of amphorae to failover for this LB. amps = self._get_amphorae_for_failover(lb) if lb.topology == constants.TOPOLOGY_SINGLE: if len(amps) != 1: LOG.warning('%d amphorae found on load balancer %s where ' 'one should exist. Repairing.', len(amps), load_balancer_id) elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: if len(amps) != 2: LOG.warning('%d amphorae found on load balancer %s where ' 'two should exist. Repairing.', len(amps), load_balancer_id) else: LOG.error('Unknown load balancer topology found: %s, aborting ' 'failover!', lb.topology) raise exceptions.InvalidTopology(topology=lb.topology) # We must provide a topology in the flavor definition # here for the amphora to be created with the correct # configuration. if lb.flavor_id: flavor = self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), lb.flavor_id) flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology else: flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology} provider_lb_dict = ( provider_utils.db_loadbalancer_to_provider_loadbalancer( lb).to_dict() if lb else lb) provider_lb_dict[constants.FLAVOR] = flavor stored_params = {constants.LOADBALANCER: provider_lb_dict, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, constants.SERVER_GROUP_ID: lb.server_group_id, constants.LOADBALANCER_ID: lb.id, constants.FLAVOR: flavor} if lb.availability_zone: stored_params[constants.AVAILABILITY_ZONE] = ( self._az_repo.get_availability_zone_metadata_dict( db_apis.get_session(), lb.availability_zone)) else: stored_params[constants.AVAILABILITY_ZONE] = {} self.services_controller.run_poster( flow_utils.get_failover_LB_flow, amps, provider_lb_dict, store=stored_params, wait=True) LOG.info('Failover of load balancer %s completed successfully.', lb.id) except Exception as e: with excutils.save_and_reraise_exception(reraise=False): LOG.exception("LB %(lbid)s failover exception: %(exc)s", {'lbid': load_balancer_id, 'exc': str(e)}) self._lb_repo.update( db_apis.get_session(), load_balancer_id, provisioning_status=constants.ERROR) def amphora_cert_rotation(self, amphora_id): """Perform cert rotation for an amphora. :param amphora_id: ID for amphora to rotate :returns: None :raises AmphoraNotFound: The referenced amphora was not found """ amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) LOG.info("Start amphora cert rotation, amphora's id is: %s", amphora_id) store = {constants.AMPHORA: amp.to_dict(), constants.AMPHORA_ID: amphora_id} self.services_controller.run_poster( flow_utils.cert_rotate_amphora_flow, store=store) LOG.info("Finished amphora cert rotation, amphora's id was: %s", amphora_id) def update_amphora_agent_config(self, amphora_id): """Update the amphora agent configuration. Note: This will update the amphora agent configuration file and update the running configuration for mutatable configuration items. :param amphora_id: ID of the amphora to update. :returns: None """ LOG.info("Start amphora agent configuration update, amphora's id " "is: %s", amphora_id) amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(), amphora_id) flavor = {} if lb.flavor_id: flavor = self._flavor_repo.get_flavor_metadata_dict( db_apis.get_session(), lb.flavor_id) store = {constants.AMPHORA: amp.to_dict(), constants.FLAVOR: flavor} self.services_controller.run_poster( flow_utils.update_amphora_config_flow, store=store) LOG.info("Finished amphora agent configuration update, amphora's id " "was: %s", amphora_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3942165 octavia-6.2.2/octavia/controller/worker/v2/flows/0000775000175000017500000000000000000000000021775 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/__init__.py0000664000175000017500000000107400000000000024110 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/amphora_flows.py0000664000175000017500000010342700000000000025217 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from oslo_log import log as logging from taskflow.patterns import graph_flow from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants from octavia.common import utils from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import cert_task from octavia.controller.worker.v2.tasks import compute_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks from octavia.controller.worker.v2.tasks import network_tasks from octavia.controller.worker.v2.tasks import retry_tasks CONF = cfg.CONF LOG = logging.getLogger(__name__) class AmphoraFlows(object): def get_create_amphora_flow(self): """Creates a flow to create an amphora. :returns: The flow for creating the amphora """ create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW) create_amphora_flow.add(database_tasks.CreateAmphoraInDB( provides=constants.AMPHORA_ID)) create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask( requires=constants.AMPHORA_ID)) create_amphora_flow.add(cert_task.GenerateServerPEMTask( provides=constants.SERVER_PEM)) create_amphora_flow.add( database_tasks.UpdateAmphoraDBCertExpiration( requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) create_amphora_flow.add(compute_tasks.CertComputeCreate( requires=(constants.AMPHORA_ID, constants.SERVER_PEM, constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY, constants.FLAVOR), provides=constants.COMPUTE_ID)) create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB( requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) create_amphora_flow.add(compute_tasks.ComputeActiveWait( requires=(constants.COMPUTE_ID, constants.AMPHORA_ID), provides=constants.COMPUTE_OBJ)) create_amphora_flow.add(database_tasks.UpdateAmphoraInfo( requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), provides=constants.AMPHORA)) retry_subflow = linear_flow.Flow( constants.CREATE_AMPHORA_RETRY_SUBFLOW, retry=amphora_driver_tasks.AmpRetry()) retry_subflow.add( amphora_driver_tasks.AmphoraComputeConnectivityWait( requires=constants.AMPHORA, inject={'raise_retry_exception': True})) create_amphora_flow.add(retry_subflow) create_amphora_flow.add(database_tasks.ReloadAmphora( requires=constants.AMPHORA, provides=constants.AMPHORA)) create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize( requires=constants.AMPHORA)) create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB( requires=constants.AMPHORA)) return create_amphora_flow def _get_post_map_lb_subflow(self, prefix, role): """Set amphora type after mapped to lb.""" sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW post_map_amp_to_lb = linear_flow.Flow( sf_name) post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate( name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK, requires=(constants.AMPHORA, constants.FLAVOR))) if role == constants.ROLE_MASTER: post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB( name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_BACKUP: post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB( name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_STANDALONE: post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB( name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, requires=constants.AMPHORA)) return post_map_amp_to_lb def _get_create_amp_for_lb_subflow(self, prefix, role, is_spare=False): """Create a new amphora for lb.""" sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW create_amp_for_lb_subflow = linear_flow.Flow(sf_name) create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB( name=sf_name + '-' + constants.CREATE_AMPHORA_INDB, requires=constants.LOADBALANCER_ID, provides=constants.AMPHORA_ID)) create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( name=sf_name + '-' + constants.GENERATE_SERVER_PEM, provides=constants.SERVER_PEM)) create_amp_for_lb_subflow.add( database_tasks.UpdateAmphoraDBCertExpiration( name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, requires=(constants.AMPHORA_ID, constants.SERVER_PEM, constants.BUILD_TYPE_PRIORITY, constants.SERVER_GROUP_ID, constants.FLAVOR, constants.AVAILABILITY_ZONE), provides=constants.COMPUTE_ID)) create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId( name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID, requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB( name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB, requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait( name=sf_name + '-' + constants.COMPUTE_WAIT, requires=(constants.COMPUTE_ID, constants.AMPHORA_ID, constants.AVAILABILITY_ZONE), provides=constants.COMPUTE_OBJ)) create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo( name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO, requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), provides=constants.AMPHORA)) create_amp_for_lb_subflow.add(self._retry_flow(sf_name)) create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( name=sf_name + '-' + constants.AMPHORA_FINALIZE, requires=constants.AMPHORA)) if is_spare: create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraReadyInDB( name=sf_name + '-' + constants.MARK_AMPHORA_READY_INDB, requires=constants.AMPHORA)) else: create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraAllocatedInDB( name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) if role == constants.ROLE_MASTER: create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB( name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_BACKUP: create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB( name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_STANDALONE: create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraStandAloneInDB( name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, requires=constants.AMPHORA)) return create_amp_for_lb_subflow def _allocate_amp_to_lb_decider(self, history): """decides if the lb shall be mapped to a spare amphora :return: True if a spare amphora exists in DB """ return list(history.values())[0] is not None def _create_new_amp_for_lb_decider(self, history): """decides if a new amphora must be created for the lb :return: True if there is no spare amphora """ values = history.values() return not values or list(values)[0] is None def _retry_flow(self, sf_name): retry_task = sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT retry_subflow = linear_flow.Flow( sf_name + '-' + constants.CREATE_AMPHORA_RETRY_SUBFLOW, retry=amphora_driver_tasks.AmpRetry()) retry_subflow.add( amphora_driver_tasks.AmphoraComputeConnectivityWait( name=retry_task, requires=constants.AMPHORA, inject={'raise_retry_exception': True})) return retry_subflow def _finalize_flow(self, sf_name, role): sf_name = sf_name + constants.FINALIZE_AMPHORA_FLOW create_amp_for_lb_subflow = linear_flow.Flow(sf_name) create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( name=sf_name + '-' + constants.AMPHORA_FINALIZE, requires=constants.AMPHORA)) create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraAllocatedInDB( name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora( name=sf_name + '-' + constants.RELOAD_AMPHORA, requires=constants.AMPHORA, provides=constants.AMPHORA)) if role == constants.ROLE_MASTER: create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB( name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_BACKUP: create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB( name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, requires=constants.AMPHORA)) elif role == constants.ROLE_STANDALONE: create_amp_for_lb_subflow.add( database_tasks.MarkAmphoraStandAloneInDB( name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, requires=constants.AMPHORA)) return create_amp_for_lb_subflow def get_amphora_for_lb_subflow( self, prefix, role=constants.ROLE_STANDALONE, is_spare=False): """Tries to allocate a spare amphora to a loadbalancer if none exists, create a new amphora. """ sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW # Don't replace a spare with another spare, just build a fresh one. if is_spare: get_spare_amp_flow = linear_flow.Flow(sf_name) get_spare_amp_flow.add(self._get_create_amp_for_lb_subflow( prefix, role, is_spare=is_spare)) return get_spare_amp_flow # We need a graph flow here for a conditional flow amp_for_lb_flow = graph_flow.Flow(sf_name) # Setup the task that maps an amphora to a load balancer allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora( name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA, requires=(constants.LOADBALANCER_ID, constants.FLAVOR, constants.AVAILABILITY_ZONE), provides=constants.AMPHORA) # Define a subflow for if we successfully map an amphora map_lb_to_amp = self._get_post_map_lb_subflow(prefix, role) # Define a subflow for if we can't map an amphora create_amp = self._get_create_amp_for_lb_subflow(prefix, role) # TODO(ataraday): Have to split create flow due lack of functionality # in taskflow: related https://bugs.launchpad.net/taskflow/+bug/1480907 retry_flow = self._retry_flow(sf_name) finalize_flow = self._finalize_flow(sf_name, role) # Add them to the graph flow amp_for_lb_flow.add(allocate_and_associate_amp, map_lb_to_amp, create_amp, retry_flow, finalize_flow, resolve_requires=False) # Setup the decider for the path if we can map an amphora amp_for_lb_flow.link(allocate_and_associate_amp, map_lb_to_amp, decider=self._allocate_amp_to_lb_decider, decider_depth='flow') # Setup the decider for the path if we can't map an amphora amp_for_lb_flow.link(allocate_and_associate_amp, create_amp, decider=self._create_new_amp_for_lb_decider, decider_depth='flow') # TODO(ataraday): setup separate deciders as we need retry flow # properly ignored amp_for_lb_flow.link(create_amp, retry_flow, decider=self._create_new_amp_for_lb_decider, decider_depth='flow') amp_for_lb_flow.link(retry_flow, finalize_flow, decider=self._create_new_amp_for_lb_decider, decider_depth='flow') return amp_for_lb_flow def get_delete_amphora_flow( self, amphora, retry_attempts=CONF.controller_worker.amphora_delete_retries, retry_interval=( CONF.controller_worker.amphora_delete_retry_interval)): """Creates a subflow to delete an amphora and it's port. This flow is idempotent and safe to retry. :param amphora: An amphora dict object. :param retry_attempts: The number of times the flow is retried. :param retry_interval: The time to wait, in seconds, between retries. :returns: The subflow for deleting the amphora. :raises AmphoraNotFound: The referenced Amphora was not found. """ amphora_id = amphora[constants.ID] delete_amphora_flow = linear_flow.Flow( name=constants.DELETE_AMPHORA_FLOW + '-' + amphora_id, retry=retry_tasks.SleepingRetryTimesController( name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' + amphora_id, attempts=retry_attempts, interval=retry_interval)) delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora_id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add( database_tasks.MarkAmphoraPendingDeleteInDB( name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora_id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy( name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora_id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add(compute_tasks.ComputeDelete( name=constants.DELETE_AMPHORA + '-' + amphora_id, inject={constants.AMPHORA: amphora, constants.PASSIVE_FAILURE: True})) delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring( name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora_id, inject={constants.AMPHORA: amphora})) delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB( name=constants.MARK_AMPHORA_DELETED + '-' + amphora_id, inject={constants.AMPHORA: amphora})) if amphora.get(constants.VRRP_PORT_ID): delete_amphora_flow.add(network_tasks.DeletePort( name=(constants.DELETE_PORT + '-' + str(amphora_id) + '-' + str(amphora[constants.VRRP_PORT_ID])), inject={constants.PORT_ID: amphora[constants.VRRP_PORT_ID], constants.PASSIVE_FAILURE: True})) # TODO(johnsom) What about cleaning up any member ports? # maybe we should get the list of attached ports prior to delete # and call delete on them here. Fix this as part of # https://storyboard.openstack.org/#!/story/2007077 return delete_amphora_flow def get_vrrp_subflow(self, prefix, timeout_dict=None, create_vrrp_group=True): sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW vrrp_subflow = linear_flow.Flow(sf_name) # Optimization for failover flow. No reason to call this # when configuring the secondary amphora. if create_vrrp_group: vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, requires=constants.LOADBALANCER_ID)) vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE_NETWORK_CONFIG)) # VRRP update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow') # We have three tasks to run in order, per amphora amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow') amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, constants.AMP_VRRP_INT), inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-0-' + constants.AMP_VRRP_START, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow') amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict}, provides=constants.AMP_VRRP_INT)) amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, constants.AMP_VRRP_INT), inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( name=sf_name + '-1-' + constants.AMP_VRRP_START, requires=constants.AMPHORAE, inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) update_amps_subflow.add(amp_0_subflow) update_amps_subflow.add(amp_1_subflow) vrrp_subflow.add(update_amps_subflow) return vrrp_subflow def cert_rotate_amphora_flow(self): """Implement rotation for amphora's cert. 1. Create a new certificate 2. Upload the cert to amphora 3. update the newly created certificate info to amphora 4. update the cert_busy flag to be false after rotation :returns: The flow for updating an amphora """ rotated_amphora_flow = linear_flow.Flow( constants.CERT_ROTATE_AMPHORA_FLOW) rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( requires=constants.AMPHORA)) # create a new certificate, the returned value is the newly created # certificate rotated_amphora_flow.add(cert_task.GenerateServerPEMTask( provides=constants.SERVER_PEM)) # update it in amphora task rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload( requires=(constants.AMPHORA, constants.SERVER_PEM))) # update the newly created certificate info to amphora rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration( requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) # update the cert_busy flag to be false after rotation rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse( requires=constants.AMPHORA_ID)) return rotated_amphora_flow def update_amphora_config_flow(self): """Creates a flow to update the amphora agent configuration. :returns: The flow for updating an amphora """ update_amphora_flow = linear_flow.Flow( constants.UPDATE_AMPHORA_CONFIG_FLOW) update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( requires=constants.AMPHORA)) update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate( requires=(constants.AMPHORA, constants.FLAVOR))) return update_amphora_flow def get_amphora_for_lb_failover_subflow( self, prefix, role=constants.ROLE_STANDALONE, failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False, is_spare=False): """Creates a new amphora that will be used in a failover flow. :requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer :provides: amphora_id, amphora :param prefix: The flow name prefix to use on the flow and tasks. :param role: The role this amphora will have in the topology. :param failed_amp_vrrp_port_id: The base port ID of the failed amp. :param is_vrrp_ipv6: True if the base port IP is IPv6. :param is_spare: True if we are getting a spare amphroa. :return: A Taskflow sub-flow that will create the amphora. """ sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW amp_for_failover_flow = linear_flow.Flow(sf_name) # Try to allocate or boot an amphora instance (unconfigured) amp_for_failover_flow.add(self.get_amphora_for_lb_subflow( prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW, role=role, is_spare=is_spare)) # If we are getting a spare amphora, this is all we need to do. if is_spare: return amp_for_failover_flow # Create the VIP base (aka VRRP) port for the amphora. amp_for_failover_flow.add(network_tasks.CreateVIPBasePort( name=prefix + '-' + constants.CREATE_VIP_BASE_PORT, requires=(constants.VIP, constants.VIP_SG_ID, constants.AMPHORA_ID), provides=constants.BASE_PORT)) # Attach the VIP base (aka VRRP) port to the amphora. amp_for_failover_flow.add(compute_tasks.AttachPort( name=prefix + '-' + constants.ATTACH_PORT, requires=(constants.AMPHORA, constants.PORT), rebind={constants.PORT: constants.BASE_PORT})) # Update the amphora database record with the VIP base port info. amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails( name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS, requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT))) # Update the amphora networking for the plugged VIP port amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID, requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), provides=constants.AMPHORAE_NETWORK_CONFIG)) # Disable the base (vrrp) port on the failed amphora # This prevents a DAD failure when bringing up the new amphora. # Keepalived will handle this for act/stdby. if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and is_vrrp_ipv6): amp_for_failover_flow.add(network_tasks.AdminDownPort( name=prefix + '-' + constants.ADMIN_DOWN_PORT, inject={constants.PORT_ID: failed_amp_vrrp_port_id})) amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug( name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG, requires=(constants.AMPHORA, constants.LOADBALANCER, constants.AMPHORAE_NETWORK_CONFIG))) # Plug member ports amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta( name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA, requires=(constants.LOADBALANCER, constants.AMPHORA, constants.AVAILABILITY_ZONE, constants.VRRP_PORT), rebind={constants.VRRP_PORT: constants.BASE_PORT}, provides=constants.DELTA)) amp_for_failover_flow.add(network_tasks.HandleNetworkDelta( name=prefix + '-' + constants.HANDLE_NETWORK_DELTA, requires=(constants.AMPHORA, constants.DELTA), provides=constants.ADDED_PORTS)) amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG, requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) return amp_for_failover_flow def get_failover_amphora_flow(self, failed_amphora, lb_amp_count): """Get a Taskflow flow to failover an amphora. 1. Build a replacement amphora. 2. Delete the old amphora. 3. Update the amphorae listener configurations. 4. Update the VRRP configurations if needed. :param failed_amphora: The amphora dict to failover. :param lb_amp_count: The number of amphora on this load balancer. :returns: The flow that will provide the failover. """ failover_amp_flow = linear_flow.Flow( constants.FAILOVER_AMPHORA_FLOW) # Revert amphora to status ERROR if this flow goes wrong failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) if failed_amphora[constants.ROLE] in (constants.ROLE_MASTER, constants.ROLE_BACKUP): amp_role = 'master_or_backup' elif failed_amphora[constants.ROLE] == constants.ROLE_STANDALONE: amp_role = 'standalone' elif failed_amphora[constants.ROLE] is None: amp_role = 'spare' else: amp_role = 'undefined' LOG.info("Performing failover for amphora: %s", {"id": failed_amphora[constants.ID], "load_balancer_id": failed_amphora.get( constants.LOAD_BALANCER_ID), "lb_network_ip": failed_amphora.get(constants.LB_NETWORK_IP), "compute_id": failed_amphora.get(constants.COMPUTE_ID), "role": amp_role}) failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID( requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID)) is_spare = True is_vrrp_ipv6 = False if failed_amphora.get(constants.LOAD_BALANCER_ID): is_spare = False if failed_amphora.get(constants.VRRP_IP): is_vrrp_ipv6 = utils.is_ipv6(failed_amphora[constants.VRRP_IP]) # Get a replacement amphora and plug all of the networking. # # Do this early as the compute services have been observed to be # unreliable. The community decided the chance that deleting first # would open resources for an instance is less likely than the # compute service failing to boot an instance for other reasons. # TODO(johnsom) Move this back out to run for spares after # delete amphora API is available. failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow( prefix=constants.FAILOVER_LOADBALANCER_FLOW, role=failed_amphora[constants.ROLE], failed_amp_vrrp_port_id=failed_amphora.get( constants.VRRP_PORT_ID), is_vrrp_ipv6=is_vrrp_ipv6, is_spare=is_spare)) failover_amp_flow.add( self.get_delete_amphora_flow( failed_amphora, retry_attempts=CONF.controller_worker.amphora_delete_retries, retry_interval=( CONF.controller_worker.amphora_delete_retry_interval))) failover_amp_flow.add( database_tasks.DisableAmphoraHealthMonitoring( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amphora})) if not failed_amphora.get(constants.LOAD_BALANCER_ID): # This is an unallocated amphora (spares pool), we are done. return failover_amp_flow failover_amp_flow.add(database_tasks.GetLoadBalancer( requires=constants.LOADBALANCER_ID, inject={constants.LOADBALANCER_ID: failed_amphora[constants.LOAD_BALANCER_ID]}, provides=constants.LOADBALANCER)) failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( name=constants.GET_AMPHORAE_FROM_LB, requires=constants.LOADBALANCER_ID, inject={constants.LOADBALANCER_ID: failed_amphora[constants.LOAD_BALANCER_ID]}, provides=constants.AMPHORAE)) # Setup timeouts for our requests to the amphorae timeout_dict = { constants.CONN_MAX_RETRIES: CONF.haproxy_amphora.active_connection_max_retries, constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.active_connection_rety_interval} # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. update_amps_subflow = unordered_flow.Flow( constants.UPDATE_AMPS_SUBFLOW) for amp_index in range(0, lb_amp_count): update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE, requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) failover_amp_flow.add(update_amps_subflow) # Configure and enable keepalived in the amphora if lb_amp_count == 2: failover_amp_flow.add( self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW, timeout_dict, create_vrrp_group=False)) # Reload the listener. This needs to be done here because # it will create the required haproxy check scripts for # the VRRP deployed above. # A "U" or newer amphora-agent will remove the need for this # task here. # TODO(johnsom) Remove this in the "W" cycle reload_listener_subflow = unordered_flow.Flow( constants.AMPHORA_LISTENER_RELOAD_SUBFLOW) for amp_index in range(0, lb_amp_count): reload_listener_subflow.add( amphora_driver_tasks.AmphoraIndexListenersReload( name=(str(amp_index) + '-' + constants.AMPHORA_RELOAD_LISTENER), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: amp_index, constants.TIMEOUT_DICT: timeout_dict})) failover_amp_flow.add(reload_listener_subflow) # Remove any extraneous ports # Note: Nova sometimes fails to delete ports attached to an instance. # For example, if you create an LB with a listener, then # 'openstack server delete' the amphora, you will see the vrrp # port attached to that instance will remain after the instance # is deleted. # TODO(johnsom) Fix this as part of # https://storyboard.openstack.org/#!/story/2007077 # Mark LB ACTIVE failover_amp_flow.add( database_tasks.MarkLBActiveInDB(mark_subobjects=True, requires=constants.LOADBALANCER)) return failover_amp_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/flow_utils.py0000664000175000017500000001244100000000000024540 0ustar00zuulzuul00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.api.drivers import utils as provider_utils from octavia.controller.worker.v2.flows import amphora_flows from octavia.controller.worker.v2.flows import health_monitor_flows from octavia.controller.worker.v2.flows import l7policy_flows from octavia.controller.worker.v2.flows import l7rule_flows from octavia.controller.worker.v2.flows import listener_flows from octavia.controller.worker.v2.flows import load_balancer_flows from octavia.controller.worker.v2.flows import member_flows from octavia.controller.worker.v2.flows import pool_flows LB_FLOWS = load_balancer_flows.LoadBalancerFlows() AMP_FLOWS = amphora_flows.AmphoraFlows() HM_FLOWS = health_monitor_flows.HealthMonitorFlows() L7_POLICY_FLOWS = l7policy_flows.L7PolicyFlows() L7_RULES_FLOWS = l7rule_flows.L7RuleFlows() LISTENER_FLOWS = listener_flows.ListenerFlows() M_FLOWS = member_flows.MemberFlows() P_FLOWS = pool_flows.PoolFlows() def get_create_load_balancer_flow(topology, listeners=None): return LB_FLOWS.get_create_load_balancer_flow(topology, listeners=listeners) def get_delete_load_balancer_flow(lb): return LB_FLOWS.get_delete_load_balancer_flow(lb) def get_listeners_on_lb(db_lb): """Get a list of the listeners on a load balancer. :param db_lb: A load balancer database model object. :returns: A list of provider dict format listeners. """ listener_dicts = [] for listener in db_lb.listeners: prov_listener = provider_utils.db_listener_to_provider_listener( listener) listener_dicts.append(prov_listener.to_dict()) return listener_dicts def get_pools_on_lb(db_lb): """Get a list of the pools on a load balancer. :param db_lb: A load balancer database model object. :returns: A list of provider dict format pools. """ pool_dicts = [] for pool in db_lb.pools: prov_pool = provider_utils.db_pool_to_provider_pool(pool) pool_dicts.append(prov_pool.to_dict()) return pool_dicts def get_cascade_delete_load_balancer_flow(lb, listeners=(), pools=()): return LB_FLOWS.get_cascade_delete_load_balancer_flow(lb, listeners, pools) def get_update_load_balancer_flow(): return LB_FLOWS.get_update_load_balancer_flow() def get_create_amphora_flow(): return AMP_FLOWS.get_create_amphora_flow() def get_delete_amphora_flow(amphora, retry_attempts=None, retry_interval=None): return AMP_FLOWS.get_delete_amphora_flow(amphora, retry_attempts, retry_interval) def get_failover_LB_flow(amps, lb): return LB_FLOWS.get_failover_LB_flow(amps, lb) def get_failover_amphora_flow(amphora_dict, lb_amp_count): return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count) def cert_rotate_amphora_flow(): return AMP_FLOWS.cert_rotate_amphora_flow() def update_amphora_config_flow(): return AMP_FLOWS.update_amphora_config_flow() def get_create_health_monitor_flow(): return HM_FLOWS.get_create_health_monitor_flow() def get_delete_health_monitor_flow(): return HM_FLOWS.get_delete_health_monitor_flow() def get_update_health_monitor_flow(): return HM_FLOWS.get_update_health_monitor_flow() def get_create_l7policy_flow(): return L7_POLICY_FLOWS.get_create_l7policy_flow() def get_delete_l7policy_flow(): return L7_POLICY_FLOWS.get_delete_l7policy_flow() def get_update_l7policy_flow(): return L7_POLICY_FLOWS.get_update_l7policy_flow() def get_create_l7rule_flow(): return L7_RULES_FLOWS.get_create_l7rule_flow() def get_delete_l7rule_flow(): return L7_RULES_FLOWS.get_delete_l7rule_flow() def get_update_l7rule_flow(): return L7_RULES_FLOWS.get_update_l7rule_flow() def get_create_listener_flow(): return LISTENER_FLOWS.get_create_listener_flow() def get_create_all_listeners_flow(): return LISTENER_FLOWS.get_create_all_listeners_flow() def get_delete_listener_flow(): return LISTENER_FLOWS.get_delete_listener_flow() def get_update_listener_flow(): return LISTENER_FLOWS.get_update_listener_flow() def get_create_member_flow(): return M_FLOWS.get_create_member_flow() def get_delete_member_flow(): return M_FLOWS.get_delete_member_flow() def get_update_member_flow(): return M_FLOWS.get_update_member_flow() def get_batch_update_members_flow(old_members, new_members, updated_members): return M_FLOWS.get_batch_update_members_flow(old_members, new_members, updated_members) def get_create_pool_flow(): return P_FLOWS.get_create_pool_flow() def get_delete_pool_flow(): return P_FLOWS.get_delete_pool_flow() def get_update_pool_flow(): return P_FLOWS.get_update_pool_flow() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/health_monitor_flows.py0000664000175000017500000001103700000000000026577 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks class HealthMonitorFlows(object): def get_create_health_monitor_flow(self): """Create a flow to create a health monitor :returns: The flow for creating a health monitor """ create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW) create_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( requires=[constants.HEALTH_MON, constants.LISTENERS, constants.LOADBALANCER])) create_hm_flow.add(database_tasks.MarkHealthMonitorPendingCreateInDB( requires=constants.HEALTH_MON)) create_hm_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) create_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( requires=constants.HEALTH_MON)) create_hm_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return create_hm_flow def get_delete_health_monitor_flow(self): """Create a flow to delete a health monitor :returns: The flow for deleting a health monitor """ delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW) delete_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( requires=[constants.HEALTH_MON, constants.LISTENERS, constants.LOADBALANCER])) delete_hm_flow.add(database_tasks.MarkHealthMonitorPendingDeleteInDB( requires=constants.HEALTH_MON)) delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB( requires=constants.HEALTH_MON)) delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota( requires=constants.PROJECT_ID)) delete_hm_flow.add( database_tasks.UpdatePoolMembersOperatingStatusInDB( requires=constants.POOL_ID, inject={constants.OPERATING_STATUS: constants.NO_MONITOR})) delete_hm_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return delete_hm_flow def get_update_health_monitor_flow(self): """Create a flow to update a health monitor :returns: The flow for updating a health monitor """ update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW) update_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( requires=[constants.HEALTH_MON, constants.LISTENERS, constants.LOADBALANCER])) update_hm_flow.add(database_tasks.MarkHealthMonitorPendingUpdateInDB( requires=constants.HEALTH_MON)) update_hm_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) update_hm_flow.add(database_tasks.UpdateHealthMonInDB( requires=[constants.HEALTH_MON, constants.UPDATE_DICT])) update_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( requires=constants.HEALTH_MON)) update_hm_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return update_hm_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/l7policy_flows.py0000664000175000017500000000763300000000000025334 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks class L7PolicyFlows(object): def get_create_l7policy_flow(self): """Create a flow to create an L7 policy :returns: The flow for creating an L7 policy """ create_l7policy_flow = linear_flow.Flow(constants.CREATE_L7POLICY_FLOW) create_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( requires=[constants.L7POLICY, constants.LISTENERS, constants.LOADBALANCER_ID])) create_l7policy_flow.add(database_tasks.MarkL7PolicyPendingCreateInDB( requires=constants.L7POLICY)) create_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) create_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) create_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return create_l7policy_flow def get_delete_l7policy_flow(self): """Create a flow to delete an L7 policy :returns: The flow for deleting an L7 policy """ delete_l7policy_flow = linear_flow.Flow(constants.DELETE_L7POLICY_FLOW) delete_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( requires=[constants.L7POLICY, constants.LISTENERS, constants.LOADBALANCER_ID])) delete_l7policy_flow.add(database_tasks.MarkL7PolicyPendingDeleteInDB( requires=constants.L7POLICY)) delete_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) delete_l7policy_flow.add(database_tasks.DeleteL7PolicyInDB( requires=constants.L7POLICY)) delete_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return delete_l7policy_flow def get_update_l7policy_flow(self): """Create a flow to update an L7 policy :returns: The flow for updating an L7 policy """ update_l7policy_flow = linear_flow.Flow(constants.UPDATE_L7POLICY_FLOW) update_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( requires=[constants.L7POLICY, constants.LISTENERS, constants.LOADBALANCER_ID])) update_l7policy_flow.add(database_tasks.MarkL7PolicyPendingUpdateInDB( requires=constants.L7POLICY)) update_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) update_l7policy_flow.add(database_tasks.UpdateL7PolicyInDB( requires=[constants.L7POLICY, constants.UPDATE_DICT])) update_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) update_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return update_l7policy_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/l7rule_flows.py0000664000175000017500000001040400000000000024772 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks class L7RuleFlows(object): def get_create_l7rule_flow(self): """Create a flow to create an L7 rule :returns: The flow for creating an L7 rule """ create_l7rule_flow = linear_flow.Flow(constants.CREATE_L7RULE_FLOW) create_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( requires=[constants.L7RULE, constants.L7POLICY_ID, constants.LISTENERS, constants.LOADBALANCER_ID])) create_l7rule_flow.add(database_tasks.MarkL7RulePendingCreateInDB( requires=constants.L7RULE)) create_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) create_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( requires=constants.L7RULE)) create_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) create_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return create_l7rule_flow def get_delete_l7rule_flow(self): """Create a flow to delete an L7 rule :returns: The flow for deleting an L7 rule """ delete_l7rule_flow = linear_flow.Flow(constants.DELETE_L7RULE_FLOW) delete_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( requires=[constants.L7RULE, constants.L7POLICY_ID, constants.LISTENERS, constants.LOADBALANCER_ID])) delete_l7rule_flow.add(database_tasks.MarkL7RulePendingDeleteInDB( requires=constants.L7RULE)) delete_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) delete_l7rule_flow.add(database_tasks.DeleteL7RuleInDB( requires=constants.L7RULE)) delete_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) delete_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return delete_l7rule_flow def get_update_l7rule_flow(self): """Create a flow to update an L7 rule :returns: The flow for updating an L7 rule """ update_l7rule_flow = linear_flow.Flow(constants.UPDATE_L7RULE_FLOW) update_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( requires=[constants.L7RULE, constants.L7POLICY_ID, constants.LISTENERS, constants.LOADBALANCER_ID])) update_l7rule_flow.add(database_tasks.MarkL7RulePendingUpdateInDB( requires=constants.L7RULE)) update_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) update_l7rule_flow.add(database_tasks.UpdateL7RuleInDB( requires=[constants.L7RULE, constants.UPDATE_DICT])) update_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( requires=constants.L7RULE)) update_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( requires=constants.L7POLICY)) update_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return update_l7rule_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/listener_flows.py0000664000175000017500000001320400000000000025406 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks from octavia.controller.worker.v2.tasks import network_tasks class ListenerFlows(object): def get_create_listener_flow(self): """Create a flow to create a listener :returns: The flow for creating a listener """ create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW) create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( requires=constants.LISTENERS)) create_listener_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) create_listener_flow.add(network_tasks.UpdateVIP( requires=constants.LISTENERS)) create_listener_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return create_listener_flow def get_create_all_listeners_flow(self): """Create a flow to create all listeners :returns: The flow for creating all listeners """ create_all_listeners_flow = linear_flow.Flow( constants.CREATE_LISTENERS_FLOW) create_all_listeners_flow.add( database_tasks.GetListenersFromLoadbalancer( requires=constants.LOADBALANCER, provides=constants.LISTENERS)) create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer( requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) create_all_listeners_flow.add(network_tasks.UpdateVIP( requires=constants.LISTENERS)) return create_all_listeners_flow def get_delete_listener_flow(self): """Create a flow to delete a listener :returns: The flow for deleting a listener """ delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask( requires=constants.LISTENER)) delete_listener_flow.add(amphora_driver_tasks.ListenerDelete( requires=constants.LISTENER)) delete_listener_flow.add(network_tasks.UpdateVIPForDelete( requires=constants.LOADBALANCER_ID)) delete_listener_flow.add(database_tasks.DeleteListenerInDB( requires=constants.LISTENER)) delete_listener_flow.add(database_tasks.DecrementListenerQuota( requires=constants.PROJECT_ID)) delete_listener_flow.add(database_tasks.MarkLBActiveInDBByListener( requires=constants.LISTENER)) return delete_listener_flow def get_delete_listener_internal_flow(self, listener): """Create a flow to delete a listener and l7policies internally (will skip deletion on the amp and marking LB active) :returns: The flow for deleting a listener """ listener_id = listener[constants.LISTENER_ID] delete_listener_flow = linear_flow.Flow( constants.DELETE_LISTENER_FLOW + '-' + listener_id) # Should cascade delete all L7 policies delete_listener_flow.add(network_tasks.UpdateVIPForDelete( name='delete_update_vip_' + listener_id, requires=constants.LOADBALANCER_ID)) delete_listener_flow.add(database_tasks.DeleteListenerInDB( name='delete_listener_in_db_' + listener_id, requires=constants.LISTENER, inject={constants.LISTENER: listener})) delete_listener_flow.add(database_tasks.DecrementListenerQuota( name='decrement_listener_quota_' + listener_id, requires=constants.PROJECT_ID)) return delete_listener_flow def get_update_listener_flow(self): """Create a flow to update a listener :returns: The flow for updating a listener """ update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW) update_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask( requires=constants.LISTENER)) update_listener_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) update_listener_flow.add(network_tasks.UpdateVIP( requires=constants.LISTENERS)) update_listener_flow.add(database_tasks.UpdateListenerInDB( requires=[constants.LISTENER, constants.UPDATE_DICT])) update_listener_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return update_listener_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/load_balancer_flows.py0000664000175000017500000007454700000000000026350 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from oslo_log import log as logging from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants from octavia.common import exceptions from octavia.common import utils from octavia.controller.worker.v2.flows import amphora_flows from octavia.controller.worker.v2.flows import listener_flows from octavia.controller.worker.v2.flows import member_flows from octavia.controller.worker.v2.flows import pool_flows from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import compute_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks from octavia.controller.worker.v2.tasks import network_tasks from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) class LoadBalancerFlows(object): def __init__(self): self.amp_flows = amphora_flows.AmphoraFlows() self.listener_flows = listener_flows.ListenerFlows() self.pool_flows = pool_flows.PoolFlows() self.member_flows = member_flows.MemberFlows() self.lb_repo = repo.LoadBalancerRepository() def get_create_load_balancer_flow(self, topology, listeners=None): """Creates a conditional graph flow that allocates a loadbalancer to two spare amphorae. :raises InvalidTopology: Invalid topology specified :return: The graph flow for creating a loadbalancer. """ f_name = constants.CREATE_LOADBALANCER_FLOW lb_create_flow = linear_flow.Flow(f_name) lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask( requires=constants.LOADBALANCER_ID)) # allocate VIP lb_create_flow.add(database_tasks.ReloadLoadBalancer( name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER )) lb_create_flow.add(network_tasks.AllocateVIP( requires=constants.LOADBALANCER, provides=constants.VIP)) lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation( requires=(constants.LOADBALANCER_ID, constants.VIP), provides=constants.LOADBALANCER)) lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup( requires=constants.LOADBALANCER_ID)) lb_create_flow.add(network_tasks.GetSubnetFromVIP( requires=constants.LOADBALANCER, provides=constants.SUBNET)) if topology == constants.TOPOLOGY_ACTIVE_STANDBY: lb_create_flow.add(*self._create_active_standby_topology()) elif topology == constants.TOPOLOGY_SINGLE: lb_create_flow.add(*self._create_single_topology()) else: LOG.error("Unknown topology: %s. Unable to build load balancer.", topology) raise exceptions.InvalidTopology(topology=topology) post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW lb_create_flow.add( self.get_post_lb_amp_association_flow( post_amp_prefix, topology, mark_active=(not listeners))) if listeners: lb_create_flow.add(*self._create_listeners_flow()) return lb_create_flow def _create_single_topology(self): sf_name = (constants.ROLE_STANDALONE + '-' + constants.AMP_PLUG_NET_SUBFLOW) amp_for_lb_net_flow = linear_flow.Flow(sf_name) amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow( prefix=constants.ROLE_STANDALONE, role=constants.ROLE_STANDALONE) amp_for_lb_net_flow.add(amp_for_lb_flow) amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name)) return amp_for_lb_net_flow def _create_active_standby_topology( self, lf_name=constants.CREATE_LOADBALANCER_FLOW): # When we boot up amphora for an active/standby topology, # we should leverage the Nova anti-affinity capabilities # to place the amphora on different hosts, also we need to check # if anti-affinity-flag is enabled or not: anti_affinity = CONF.nova.enable_anti_affinity flows = [] if anti_affinity: # we need to create a server group first flows.append( compute_tasks.NovaServerGroupCreate( name=lf_name + '-' + constants.CREATE_SERVER_GROUP_FLOW, requires=(constants.LOADBALANCER_ID), provides=constants.SERVER_GROUP_ID)) # update server group id in lb table flows.append( database_tasks.UpdateLBServerGroupInDB( name=lf_name + '-' + constants.UPDATE_LB_SERVERGROUPID_FLOW, requires=(constants.LOADBALANCER_ID, constants.SERVER_GROUP_ID))) f_name = constants.CREATE_LOADBALANCER_FLOW amps_flow = unordered_flow.Flow(f_name) master_sf_name = (constants.ROLE_MASTER + '-' + constants.AMP_PLUG_NET_SUBFLOW) master_amp_sf = linear_flow.Flow(master_sf_name) master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER)) master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name)) backup_sf_name = (constants.ROLE_BACKUP + '-' + constants.AMP_PLUG_NET_SUBFLOW) backup_amp_sf = linear_flow.Flow(backup_sf_name) backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)) backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name)) amps_flow.add(master_amp_sf, backup_amp_sf) return flows + [amps_flow] def _get_amp_net_subflow(self, sf_name): flows = [] flows.append(network_tasks.PlugVIPAmphora( name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, requires=(constants.LOADBALANCER, constants.AMPHORA, constants.SUBNET), provides=constants.AMP_DATA)) flows.append(network_tasks.ApplyQosAmphora( name=sf_name + '-' + constants.APPLY_QOS_AMP, requires=(constants.LOADBALANCER, constants.AMP_DATA, constants.UPDATE_DICT))) flows.append(database_tasks.UpdateAmphoraVIPData( name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA, requires=constants.AMP_DATA)) flows.append(network_tasks.GetAmphoraNetworkConfigs( name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, requires=(constants.LOADBALANCER, constants.AMPHORA), provides=constants.AMPHORA_NETWORK_CONFIG)) flows.append(amphora_driver_tasks.AmphoraPostVIPPlug( name=sf_name + '-' + constants.AMP_POST_VIP_PLUG, rebind={constants.AMPHORAE_NETWORK_CONFIG: constants.AMPHORA_NETWORK_CONFIG}, requires=(constants.LOADBALANCER, constants.AMPHORAE_NETWORK_CONFIG))) return flows def _create_listeners_flow(self): flows = [] flows.append( database_tasks.ReloadLoadBalancer( name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER ) ) flows.append( network_tasks.CalculateDelta( requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), provides=constants.DELTAS ) ) flows.append( network_tasks.HandleNetworkDeltas( requires=constants.DELTAS, provides=constants.ADDED_PORTS ) ) flows.append( amphora_driver_tasks.AmphoraePostNetworkPlug( requires=(constants.LOADBALANCER, constants.ADDED_PORTS) ) ) flows.append( self.listener_flows.get_create_all_listeners_flow() ) flows.append( database_tasks.MarkLBActiveInDB( mark_subobjects=True, requires=constants.LOADBALANCER ) ) return flows def get_post_lb_amp_association_flow(self, prefix, topology, mark_active=True): """Reload the loadbalancer and create networking subflows for created/allocated amphorae. :return: Post amphorae association subflow """ sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW post_create_LB_flow = linear_flow.Flow(sf_name) post_create_LB_flow.add( database_tasks.ReloadLoadBalancer( name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) if topology == constants.TOPOLOGY_ACTIVE_STANDBY: post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE)) vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix) post_create_LB_flow.add(vrrp_subflow) post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) if mark_active: post_create_LB_flow.add(database_tasks.MarkLBActiveInDB( name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB, requires=constants.LOADBALANCER)) return post_create_LB_flow def _get_delete_listeners_flow(self, listeners): """Sets up an internal delete flow :param listeners: A list of listener dicts :return: The flow for the deletion """ listeners_delete_flow = unordered_flow.Flow('listeners_delete_flow') for listener in listeners: listeners_delete_flow.add( self.listener_flows.get_delete_listener_internal_flow( listener)) return listeners_delete_flow def get_delete_load_balancer_flow(self, lb): """Creates a flow to delete a load balancer. :returns: The flow for deleting a load balancer """ return self._get_delete_load_balancer_flow(lb, False) def _get_delete_pools_flow(self, pools): """Sets up an internal delete flow Because task flow doesn't support loops we store each pool we want to delete in the store part and then rebind :param lb: load balancer :return: (flow, store) -- flow for the deletion and store with all the listeners stored properly """ pools_delete_flow = unordered_flow.Flow('pool_delete_flow') for pool in pools: pools_delete_flow.add( self.pool_flows.get_delete_pool_flow_internal( pool[constants.POOL_ID])) return pools_delete_flow def _get_delete_load_balancer_flow(self, lb, cascade, listeners=(), pools=()): delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW) delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( requires=constants.LOADBALANCER)) delete_LB_flow.add(compute_tasks.NovaServerGroupDelete( requires=constants.SERVER_GROUP_ID)) delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy( requires=constants.LOADBALANCER)) if cascade: listeners_delete = self._get_delete_listeners_flow(listeners) pools_delete = self._get_delete_pools_flow(pools) delete_LB_flow.add(pools_delete) delete_LB_flow.add(listeners_delete) delete_LB_flow.add(network_tasks.UnplugVIP( requires=constants.LOADBALANCER)) delete_LB_flow.add(network_tasks.DeallocateVIP( requires=constants.LOADBALANCER)) delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.MarkLBDeletedInDB( requires=constants.LOADBALANCER)) delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota( requires=constants.PROJECT_ID)) return delete_LB_flow def get_cascade_delete_load_balancer_flow(self, lb, listeners, pools): """Creates a flow to delete a load balancer. :returns: The flow for deleting a load balancer """ return self._get_delete_load_balancer_flow(lb, True, listeners=listeners, pools=pools) def get_update_load_balancer_flow(self): """Creates a flow to update a load balancer. :returns: The flow for update a load balancer """ update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW) update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( requires=constants.LOADBALANCER)) update_LB_flow.add(network_tasks.ApplyQos( requires=(constants.LOADBALANCER, constants.UPDATE_DICT))) update_LB_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) update_LB_flow.add(database_tasks.MarkLBActiveInDB( requires=constants.LOADBALANCER)) return update_LB_flow def get_failover_LB_flow(self, amps, lb): """Failover a load balancer. 1. Validate the VIP port is correct and present. 2. Build a replacement amphora. 3. Delete the failed amphora. 4. Configure the replacement amphora listeners. 5. Configure VRRP for the listeners. 6. Build the second replacement amphora. 7. Delete the second failed amphora. 8. Delete any extraneous amphora. 9. Configure the listeners on the new amphorae. 10. Configure the VRRP on the new amphorae. 11. Reload the listener configurations to pick up VRRP changes. 12. Mark the load balancer back to ACTIVE. :returns: The flow that will provide the failover. """ lb_topology = lb[constants.FLAVOR][constants.LOADBALANCER_TOPOLOGY] # Pick one amphora to be failed over if any exist. failed_amp = None if amps: failed_amp = amps.pop() failover_LB_flow = linear_flow.Flow( constants.FAILOVER_LOADBALANCER_FLOW) # Revert LB to provisioning_status ERROR if this flow goes wrong failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( requires=constants.LOADBALANCER)) # Setup timeouts for our requests to the amphorae timeout_dict = { constants.CONN_MAX_RETRIES: CONF.haproxy_amphora.active_connection_max_retries, constants.CONN_RETRY_INTERVAL: CONF.haproxy_amphora.active_connection_rety_interval} if failed_amp: failed_amp_role = failed_amp.get(constants.ROLE) if failed_amp_role in (constants.ROLE_MASTER, constants.ROLE_BACKUP): amp_role = 'master_or_backup' elif failed_amp_role == constants.ROLE_STANDALONE: amp_role = 'standalone' elif failed_amp_role is None: amp_role = 'spare' else: amp_role = 'undefined' LOG.info("Performing failover for amphora: %s", {"id": failed_amp.get(constants.ID), "load_balancer_id": lb.get(constants.ID), "lb_network_ip": failed_amp.get(constants.LB_NETWORK_IP), "compute_id": failed_amp.get(constants.COMPUTE_ID), "role": amp_role}) failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) # Check that the VIP port exists and is ok failover_LB_flow.add( network_tasks.AllocateVIPforFailover( requires=constants.LOADBALANCER, provides=constants.VIP)) # Update the database with the VIP information failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation( requires=(constants.LOADBALANCER_ID, constants.VIP), provides=constants.LOADBALANCER)) # Make sure the SG has the correct rules and re-apply to the # VIP port. It is not used on the VIP port, but will help lock # the SG as in use. failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup( requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID)) new_amp_role = constants.ROLE_STANDALONE if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: new_amp_role = constants.ROLE_BACKUP # Get a replacement amphora and plug all of the networking. # # Do this early as the compute services have been observed to be # unreliable. The community decided the chance that deleting first # would open resources for an instance is less likely than the compute # service failing to boot an instance for other reasons. if failed_amp: failed_vrrp_is_ipv6 = False if failed_amp.get(constants.VRRP_IP): failed_vrrp_is_ipv6 = utils.is_ipv6( failed_amp[constants.VRRP_IP]) failover_LB_flow.add( self.amp_flows.get_amphora_for_lb_failover_subflow( prefix=constants.FAILOVER_LOADBALANCER_FLOW, role=new_amp_role, failed_amp_vrrp_port_id=failed_amp.get( constants.VRRP_PORT_ID), is_vrrp_ipv6=failed_vrrp_is_ipv6)) else: failover_LB_flow.add( self.amp_flows.get_amphora_for_lb_failover_subflow( prefix=constants.FAILOVER_LOADBALANCER_FLOW, role=new_amp_role)) if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB( name=constants.MARK_AMP_BACKUP_INDB, requires=constants.AMPHORA)) # Delete the failed amp if failed_amp: failover_LB_flow.add( self.amp_flows.get_delete_amphora_flow(failed_amp)) # Update the data stored in the flow from the database failover_LB_flow.add(database_tasks.ReloadLoadBalancer( requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) # Configure the listener(s) # We will run update on this amphora again later if this is # an active/standby load balancer because we want this amp # functional as soon as possible. It must run again to update # the configurations for the new peers. failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate( name=constants.AMP_LISTENER_UPDATE, requires=(constants.LOADBALANCER, constants.AMPHORA), inject={constants.TIMEOUT_DICT: timeout_dict})) # Bring up the new "backup" amphora VIP now to reduce the outage # on the final failover. This dropped the outage from 8-9 seconds # to less than one in my lab. # This does mean some steps have to be repeated later to reconfigure # for the second amphora as a peer. if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB( name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, requires=constants.LOADBALANCER_ID)) failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( name=(new_amp_role + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID), requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), provides=constants.FIRST_AMP_NETWORK_CONFIGS)) failover_LB_flow.add( amphora_driver_tasks.AmphoraUpdateVRRPInterface( name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF, requires=constants.AMPHORA, inject={constants.TIMEOUT_DICT: timeout_dict}, provides=constants.FIRST_AMP_VRRP_INTERFACE)) failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate( name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE, requires=(constants.LOADBALANCER_ID, constants.AMPHORA), rebind={constants.AMPHORAE_NETWORK_CONFIG: constants.FIRST_AMP_NETWORK_CONFIGS, constants.AMP_VRRP_INT: constants.FIRST_AMP_VRRP_INTERFACE}, inject={constants.TIMEOUT_DICT: timeout_dict})) failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart( name=new_amp_role + '-' + constants.AMP_VRRP_START, requires=constants.AMPHORA, inject={constants.TIMEOUT_DICT: timeout_dict})) # Start the listener. This needs to be done here because # it will create the required haproxy check scripts for # the VRRP deployed above. # A "V" or newer amphora-agent will remove the need for this # task here. # TODO(johnsom) Remove this in the "X" cycle failover_LB_flow.add(amphora_driver_tasks.ListenersStart( name=new_amp_role + '-' + constants.AMP_LISTENER_START, requires=(constants.LOADBALANCER, constants.AMPHORA))) # #### Work on standby amphora if needed ##### new_amp_role = constants.ROLE_MASTER failed_amp = None if amps: failed_amp = amps.pop() if failed_amp: failed_amp_role = failed_amp.get(constants.ROLE) if failed_amp_role in (constants.ROLE_MASTER, constants.ROLE_BACKUP): amp_role = 'master_or_backup' elif failed_amp_role == constants.ROLE_STANDALONE: amp_role = 'standalone' elif failed_amp_role is None: amp_role = 'spare' else: amp_role = 'undefined' LOG.info("Performing failover for amphora: %s", {"id": failed_amp.get(constants.ID), "load_balancer_id": lb.get(constants.ID), "lb_network_ip": failed_amp.get( constants.LB_NETWORK_IP), "compute_id": failed_amp.get(constants.COMPUTE_ID), "role": amp_role}) failover_LB_flow.add( database_tasks.MarkAmphoraPendingDeleteInDB( name=(new_amp_role + '-' + constants.MARK_AMPHORA_PENDING_DELETE), requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( name=(new_amp_role + '-' + constants.MARK_AMPHORA_HEALTH_BUSY), requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) # Get a replacement amphora and plug all of the networking. # # Do this early as the compute services have been observed to be # unreliable. The community decided the chance that deleting first # would open resources for an instance is less likely than the # compute service failing to boot an instance for other reasons. failover_LB_flow.add( self.amp_flows.get_amphora_for_lb_failover_subflow( prefix=(new_amp_role + '-' + constants.FAILOVER_LOADBALANCER_FLOW), role=new_amp_role)) failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB( name=constants.MARK_AMP_MASTER_INDB, requires=constants.AMPHORA)) # Delete the failed amp if failed_amp: failover_LB_flow.add( self.amp_flows.get_delete_amphora_flow( failed_amp)) failover_LB_flow.add( database_tasks.DisableAmphoraHealthMonitoring( name=(new_amp_role + '-' + constants.DISABLE_AMP_HEALTH_MONITORING), requires=constants.AMPHORA, inject={constants.AMPHORA: failed_amp})) # Remove any extraneous amphora # Note: This runs in all topology situations. # It should run before the act/stdby final listener update so # that we don't bother attempting to update dead amphorae. delete_extra_amps_flow = unordered_flow.Flow( constants.DELETE_EXTRA_AMPHORAE_FLOW) for amp in amps: LOG.debug('Found extraneous amphora %s on load balancer %s. ' 'Deleting.', amp.get(constants.ID), lb.get(id)) delete_extra_amps_flow.add( self.amp_flows.get_delete_amphora_flow(amp)) failover_LB_flow.add(delete_extra_amps_flow) if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: # Update the data stored in the flow from the database failover_LB_flow.add(database_tasks.ReloadLoadBalancer( name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, requires=constants.LOADBALANCER_ID, provides=constants.LOADBALANCER)) failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB, requires=constants.LOADBALANCER_ID, provides=constants.AMPHORAE)) # Listeners update needs to be run on all amphora to update # their peer configurations. So parallelize this with an # unordered subflow. update_amps_subflow = unordered_flow.Flow( constants.UPDATE_AMPS_SUBFLOW) # Setup parallel flows for each amp. We don't know the new amp # details at flow creation time, so setup a subflow for each # amp on the LB, they let the task index into a list of amps # to find the amphora it should work on. update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-0-' + constants.AMP_LISTENER_UPDATE), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: 0, constants.TIMEOUT_DICT: timeout_dict})) update_amps_subflow.add( amphora_driver_tasks.AmphoraIndexListenerUpdate( name=(constants.AMPHORA + '-1-' + constants.AMP_LISTENER_UPDATE), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) failover_LB_flow.add(update_amps_subflow) # Configure and enable keepalived in the amphora failover_LB_flow.add(self.amp_flows.get_vrrp_subflow( new_amp_role + '-' + constants.GET_VRRP_SUBFLOW, timeout_dict, create_vrrp_group=False)) # #### End of standby #### # Reload the listener. This needs to be done here because # it will create the required haproxy check scripts for # the VRRP deployed above. # A "V" or newer amphora-agent will remove the need for this # task here. # TODO(johnsom) Remove this in the "X" cycle failover_LB_flow.add( amphora_driver_tasks.AmphoraIndexListenersReload( name=(new_amp_role + '-' + constants.AMPHORA_RELOAD_LISTENER), requires=(constants.LOADBALANCER, constants.AMPHORAE), inject={constants.AMPHORA_INDEX: 1, constants.TIMEOUT_DICT: timeout_dict})) # Remove any extraneous ports # Note: Nova sometimes fails to delete ports attached to an instance. # For example, if you create an LB with a listener, then # 'openstack server delete' the amphora, you will see the vrrp # port attached to that instance will remain after the instance # is deleted. # TODO(johnsom) Fix this as part of # https://storyboard.openstack.org/#!/story/2007077 # Mark LB ACTIVE failover_LB_flow.add( database_tasks.MarkLBActiveInDB(mark_subobjects=True, requires=constants.LOADBALANCER)) return failover_LB_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/member_flows.py0000664000175000017500000002233700000000000025037 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks from octavia.controller.worker.v2.tasks import network_tasks class MemberFlows(object): def get_create_member_flow(self): """Create a flow to create a member :returns: The flow for creating a member """ create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL_ID])) create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB( requires=constants.MEMBER)) create_member_flow.add(network_tasks.CalculateDelta( requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), provides=constants.DELTAS)) create_member_flow.add(network_tasks.HandleNetworkDeltas( requires=constants.DELTAS, provides=constants.ADDED_PORTS)) create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( requires=(constants.LOADBALANCER, constants.ADDED_PORTS) )) create_member_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) create_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) create_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) create_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LISTENERS, constants.LOADBALANCER_ID))) return create_member_flow def get_delete_member_flow(self): """Create a flow to delete a member :returns: The flow for deleting a member """ delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL_ID])) delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB( requires=constants.MEMBER)) delete_member_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) delete_member_flow.add(database_tasks.DeleteMemberInDB( requires=constants.MEMBER)) delete_member_flow.add(database_tasks.DecrementMemberQuota( requires=constants.PROJECT_ID)) delete_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) delete_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LISTENERS, constants.LOADBALANCER_ID))) return delete_member_flow def get_update_member_flow(self): """Create a flow to update a member :returns: The flow for updating a member """ update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL_ID])) update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB( requires=constants.MEMBER)) update_member_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) update_member_flow.add(database_tasks.UpdateMemberInDB( requires=[constants.MEMBER, constants.UPDATE_DICT])) update_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) update_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) update_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LISTENERS, constants.LOADBALANCER_ID))) return update_member_flow def get_batch_update_members_flow(self, old_members, new_members, updated_members): """Create a flow to batch update members :returns: The flow for batch updating members """ batch_update_members_flow = linear_flow.Flow( constants.BATCH_UPDATE_MEMBERS_FLOW) unordered_members_flow = unordered_flow.Flow( constants.UNORDERED_MEMBER_UPDATES_FLOW) unordered_members_active_flow = unordered_flow.Flow( constants.UNORDERED_MEMBER_ACTIVE_FLOW) # Delete old members unordered_members_flow.add( lifecycle_tasks.MembersToErrorOnRevertTask( inject={constants.MEMBERS: old_members}, name='{flow}-deleted'.format( flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) for m in old_members: unordered_members_flow.add(database_tasks.DeleteMemberInDB( inject={constants.MEMBER: m}, name='{flow}-{id}'.format( id=m[constants.MEMBER_ID], flow=constants.DELETE_MEMBER_INDB))) unordered_members_flow.add(database_tasks.DecrementMemberQuota( requires=constants.PROJECT_ID, name='{flow}-{id}'.format( id=m[constants.MEMBER_ID], flow=constants.DECREMENT_MEMBER_QUOTA_FLOW))) # Create new members unordered_members_flow.add( lifecycle_tasks.MembersToErrorOnRevertTask( inject={constants.MEMBERS: new_members}, name='{flow}-created'.format( flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) for m in new_members: unordered_members_active_flow.add( database_tasks.MarkMemberActiveInDB( inject={constants.MEMBER: m}, name='{flow}-{id}'.format( id=m[constants.MEMBER_ID], flow=constants.MARK_MEMBER_ACTIVE_INDB))) # Update existing members unordered_members_flow.add( lifecycle_tasks.MembersToErrorOnRevertTask( # updated_members is a list of (obj, dict), only pass `obj` inject={constants.MEMBERS: [m[0] for m in updated_members]}, name='{flow}-updated'.format( flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) for m, um in updated_members: um.pop(constants.ID, None) unordered_members_active_flow.add( database_tasks.MarkMemberActiveInDB( inject={constants.MEMBER: m}, name='{flow}-{id}'.format( id=m[constants.MEMBER_ID], flow=constants.MARK_MEMBER_ACTIVE_INDB))) batch_update_members_flow.add(unordered_members_flow) # Done, do real updates batch_update_members_flow.add(network_tasks.CalculateDelta( requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), provides=constants.DELTAS)) batch_update_members_flow.add(network_tasks.HandleNetworkDeltas( requires=constants.DELTAS, provides=constants.ADDED_PORTS)) batch_update_members_flow.add( amphora_driver_tasks.AmphoraePostNetworkPlug( requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) # Update the Listener (this makes the changes active on the Amp) batch_update_members_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) # Mark all the members ACTIVE here, then pool then LB/Listeners batch_update_members_flow.add(unordered_members_active_flow) batch_update_members_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) batch_update_members_flow.add( database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LISTENERS, constants.LOADBALANCER_ID))) return batch_update_members_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/flows/pool_flows.py0000664000175000017500000001237400000000000024541 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks class PoolFlows(object): def get_create_pool_flow(self): """Create a flow to create a pool :returns: The flow for creating a pool """ create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW) create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( requires=[constants.POOL_ID, constants.LISTENERS, constants.LOADBALANCER])) create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB( requires=constants.POOL_ID)) create_pool_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) create_pool_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return create_pool_flow def get_delete_pool_flow(self): """Create a flow to delete a pool :returns: The flow for deleting a pool """ delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( requires=[constants.POOL_ID, constants.LISTENERS, constants.LOADBALANCER])) delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( requires=constants.POOL_ID)) delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( requires=constants.POOL_ID, provides=constants.POOL_CHILD_COUNT)) delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) delete_pool_flow.add(database_tasks.DeletePoolInDB( requires=constants.POOL_ID)) delete_pool_flow.add(database_tasks.DecrementPoolQuota( requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT])) delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return delete_pool_flow def get_delete_pool_flow_internal(self, pool_id): """Create a flow to delete a pool, etc. :returns: The flow for deleting a pool """ delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW + '-' + pool_id) # health monitor should cascade # members should cascade delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( name='mark_pool_pending_delete_in_db_' + pool_id, requires=constants.POOL_ID, inject={constants.POOL_ID: pool_id})) delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( name='count_pool_children_for_quota_' + pool_id, requires=constants.POOL_ID, provides=constants.POOL_CHILD_COUNT, inject={constants.POOL_ID: pool_id})) delete_pool_flow.add(database_tasks.DeletePoolInDB( name='delete_pool_in_db_' + pool_id, requires=constants.POOL_ID, inject={constants.POOL_ID: pool_id})) delete_pool_flow.add(database_tasks.DecrementPoolQuota( name='decrement_pool_quota_' + pool_id, requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT])) return delete_pool_flow def get_update_pool_flow(self): """Create a flow to update a pool :returns: The flow for updating a pool """ update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW) update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( requires=[constants.POOL_ID, constants.LISTENERS, constants.LOADBALANCER])) update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB( requires=constants.POOL_ID)) update_pool_flow.add(amphora_driver_tasks.ListenersUpdate( requires=constants.LOADBALANCER_ID)) update_pool_flow.add(database_tasks.UpdatePoolInDB( requires=[constants.POOL_ID, constants.UPDATE_DICT])) update_pool_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL_ID)) update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) return update_pool_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/taskflow_jobboard_driver.py0000664000175000017500000000732100000000000026267 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import contextlib from oslo_config import cfg from oslo_log import log from taskflow.jobs import backends as job_backends from taskflow.persistence import backends as persistence_backends LOG = log.getLogger(__name__) CONF = cfg.CONF class JobboardTaskFlowDriver(object, metaclass=abc.ABCMeta): @abc.abstractmethod def job_board(self, persistence): """Setting up jobboard backend based on configuration setting. :param persistence: taskflow persistence backend instance :return: taskflow jobboard backend instance """ class MysqlPersistenceDriver(object): def __init__(self): self.persistence_conf = { 'connection': CONF.task_flow.persistence_connection, 'max_pool_size': CONF.database.max_pool_size, 'max_overflow': CONF.database.max_overflow, 'pool_timeout': CONF.database.pool_timeout, } def initialize(self): # Run migrations once on service start. backend = persistence_backends.fetch(self.persistence_conf) with contextlib.closing(backend): with contextlib.closing(backend.get_connection()) as connection: connection.upgrade() @contextlib.contextmanager def get_persistence(self): # Rewrite taskflow get backend, so it won't run migrations on each call backend = persistence_backends.fetch(self.persistence_conf) with contextlib.closing(backend): with contextlib.closing(backend.get_connection()) as conn: conn.validate() yield backend class ZookeeperTaskFlowDriver(JobboardTaskFlowDriver): def __init__(self, persistence_driver): self.persistence_driver = persistence_driver def job_board(self, persistence): job_backends_hosts = ','.join( ['%s:%s' % (host, CONF.task_flow.jobboard_backend_port) for host in CONF.task_flow.jobboard_backend_hosts]) jobboard_backend_conf = { 'board': 'zookeeper', 'hosts': job_backends_hosts, 'path': '/' + CONF.task_flow.jobboard_backend_namespace, } jobboard_backend_conf.update( CONF.task_flow.jobboard_zookeeper_ssl_options) return job_backends.backend(CONF.task_flow.jobboard_name, jobboard_backend_conf, persistence=persistence) class RedisTaskFlowDriver(JobboardTaskFlowDriver): def __init__(self, persistence_driver): self.persistence_driver = persistence_driver def job_board(self, persistence): jobboard_backend_conf = { 'board': 'redis', 'host': CONF.task_flow.jobboard_backend_hosts[0], 'port': CONF.task_flow.jobboard_backend_port, 'password': CONF.task_flow.jobboard_backend_password, 'namespace': CONF.task_flow.jobboard_backend_namespace, } jobboard_backend_conf.update( CONF.task_flow.jobboard_redis_backend_ssl_options) return job_backends.backend( CONF.task_flow.jobboard_backend_namespace, jobboard_backend_conf, persistence=persistence) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3942165 octavia-6.2.2/octavia/controller/worker/v2/tasks/0000775000175000017500000000000000000000000021770 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/__init__.py0000664000175000017500000000107400000000000024103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py0000664000175000017500000006370200000000000026561 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from cryptography import fernet from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from taskflow import retry from taskflow import task from taskflow.types import failure from octavia.amphorae.backends.agent import agent_jinja_cfg from octavia.amphorae.driver_exceptions import exceptions as driver_except from octavia.common import constants from octavia.common import utils from octavia.controller.worker import task_utils as task_utilities from octavia.db import api as db_apis from octavia.db import repositories as repo from octavia.network import data_models CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseAmphoraTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseAmphoraTask, self).__init__(**kwargs) self.amphora_driver = stevedore_driver.DriverManager( namespace='octavia.amphora.drivers', name=CONF.controller_worker.amphora_driver, invoke_on_load=True ).driver self.amphora_repo = repo.AmphoraRepository() self.listener_repo = repo.ListenerRepository() self.loadbalancer_repo = repo.LoadBalancerRepository() self.task_utils = task_utilities.TaskUtils() class AmpRetry(retry.Times): def on_failure(self, history, *args, **kwargs): last_errors = history[-1][1] max_retry_attempt = CONF.haproxy_amphora.connection_max_retries for task_name, ex_info in last_errors.items(): if len(history) <= max_retry_attempt: # When taskflow persistance is enabled and flow/task state is # saved in the backend. If flow(task) is restored(restart of # worker,etc) we are getting ex_info as None - we need to RETRY # task to check its real state. if ex_info is None or ex_info._exc_info is None: return retry.RETRY excp = ex_info._exc_info[1] if isinstance(excp, driver_except.AmpConnectionRetry): return retry.RETRY return retry.REVERT_ALL class AmpListenersUpdate(BaseAmphoraTask): """Task to update the listeners on one amphora.""" def execute(self, loadbalancer, amphora, timeout_dict=None): # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) try: db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) self.amphora_driver.update_amphora_listeners( db_lb, db_amp, timeout_dict) except Exception as e: LOG.error('Failed to update listeners on amphora %s. Skipping ' 'this amphora as it is failing to update due to: %s', db_amp.id, str(e)) self.amphora_repo.update(db_apis.get_session(), db_amp.id, status=constants.ERROR) class AmphoraIndexListenerUpdate(BaseAmphoraTask): """Task to update the listeners on one amphora.""" def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=()): # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get( db_apis.get_session(), id=amphorae[amphora_index][constants.ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) self.amphora_driver.update_amphora_listeners( db_lb, db_amp, timeout_dict) except Exception as e: amphora_id = amphorae[amphora_index].get(constants.ID) LOG.error('Failed to update listeners on amphora %s. Skipping ' 'this amphora as it is failing to update due to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) class ListenersUpdate(BaseAmphoraTask): """Task to update amphora with all specified listeners' configurations.""" def execute(self, loadbalancer_id): """Execute updates per listener for an amphora.""" loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) if loadbalancer: self.amphora_driver.update(loadbalancer) else: LOG.error('Load balancer %s for listeners update not found. ' 'Skipping update.', loadbalancer_id) def revert(self, loadbalancer_id, *args, **kwargs): """Handle failed listeners updates.""" LOG.warning("Reverting listeners updates.") loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) for listener in loadbalancer.listeners: self.task_utils.mark_listener_prov_status_error( listener.id) class ListenersStart(BaseAmphoraTask): """Task to start all listeners on the vip.""" def execute(self, loadbalancer, amphora=None): """Execute listener start routines for listeners on an amphora.""" db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) if db_lb.listeners: if amphora is not None: db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) else: db_amp = amphora self.amphora_driver.start(db_lb, db_amp) LOG.debug("Started the listeners on the vip") def revert(self, loadbalancer, *args, **kwargs): """Handle failed listeners starts.""" LOG.warning("Reverting listeners starts.") db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for listener in db_lb.listeners: self.task_utils.mark_listener_prov_status_error(listener.id) class AmphoraIndexListenersReload(BaseAmphoraTask): """Task to reload all listeners on an amphora.""" def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=None): """Execute listener reload routines for listeners on an amphora.""" if amphorae is None: return # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get( db_apis.get_session(), id=amphorae[amphora_index][constants.ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) if db_lb.listeners: try: self.amphora_driver.reload(db_lb, db_amp, timeout_dict) except Exception as e: amphora_id = amphorae[amphora_index][constants.ID] LOG.warning('Failed to reload listeners on amphora %s. ' 'Skipping this amphora as it is failing to ' 'reload due to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) class ListenerDelete(BaseAmphoraTask): """Task to delete the listener on the vip.""" def execute(self, listener): """Execute listener delete routines for an amphora.""" db_listener = self.listener_repo.get( db_apis.get_session(), id=listener[constants.LISTENER_ID]) self.amphora_driver.delete(db_listener) LOG.debug("Deleted the listener on the vip") def revert(self, listener, *args, **kwargs): """Handle a failed listener delete.""" LOG.warning("Reverting listener delete.") self.task_utils.mark_listener_prov_status_error( listener[constants.LISTENER_ID]) class AmphoraGetInfo(BaseAmphoraTask): """Task to get information on an amphora.""" def execute(self, amphora): """Execute get_info routine for an amphora.""" db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) self.amphora_driver.get_info(db_amp) class AmphoraGetDiagnostics(BaseAmphoraTask): """Task to get diagnostics on the amphora and the loadbalancers.""" def execute(self, amphora): """Execute get_diagnostic routine for an amphora.""" self.amphora_driver.get_diagnostics(amphora) class AmphoraFinalize(BaseAmphoraTask): """Task to finalize the amphora before any listeners are configured.""" def execute(self, amphora): """Execute finalize_amphora routine.""" db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora.get(constants.ID)) self.amphora_driver.finalize_amphora(db_amp) LOG.debug("Finalized the amphora.") def revert(self, result, amphora, *args, **kwargs): """Handle a failed amphora finalize.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting amphora finalize.") self.task_utils.mark_amphora_status_error( amphora.get(constants.ID)) class AmphoraPostNetworkPlug(BaseAmphoraTask): """Task to notify the amphora post network plug.""" def execute(self, amphora, ports): """Execute post_network_plug routine.""" db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) for port in ports: net = data_models.Network(**port.pop(constants.NETWORK)) ips = port.pop(constants.FIXED_IPS) fixed_ips = [] for ip in ips: subnet_arg = ip.pop(constants.SUBNET) host_routes = subnet_arg.get('host_routes') if host_routes: subnet_arg['host_routes'] = [ data_models.HostRoute(**hr) for hr in host_routes ] fixed_ips.append(data_models.FixedIP( subnet=data_models.Subnet(**subnet_arg), **ip)) self.amphora_driver.post_network_plug( db_amp, data_models.Port(network=net, fixed_ips=fixed_ips, **port)) LOG.debug("post_network_plug called on compute instance " "%(compute_id)s for port %(port_id)s", {"compute_id": amphora[constants.COMPUTE_ID], "port_id": port[constants.ID]}) def revert(self, result, amphora, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting post network plug.") self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) class AmphoraePostNetworkPlug(BaseAmphoraTask): """Task to notify the amphorae post network plug.""" def execute(self, loadbalancer, added_ports): """Execute post_network_plug routine.""" amp_post_plug = AmphoraPostNetworkPlug() db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for amphora in db_lb.amphorae: if amphora.id in added_ports: amp_post_plug.execute(amphora.to_dict(), added_ports[amphora.id]) def revert(self, result, loadbalancer, added_ports, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) LOG.warning("Reverting post network plug.") for amphora in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, db_lb.amphorae): self.task_utils.mark_amphora_status_error(amphora.id) class AmphoraPostVIPPlug(BaseAmphoraTask): """Task to notify the amphora post VIP plug.""" def execute(self, amphora, loadbalancer, amphorae_network_config): """Execute post_vip_routine.""" db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora.get(constants.ID)) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) vrrp_port = data_models.Port( **amphorae_network_config[ amphora.get(constants.ID)][constants.VRRP_PORT]) # Required for noop-case vip_arg = copy.deepcopy( amphorae_network_config[ amphora.get(constants.ID)][constants.VIP_SUBNET]) if vip_arg: host_routes = vip_arg.get('host_routes') if host_routes: vip_arg['host_routes'] = [ data_models.HostRoute(**hr) for hr in host_routes ] vip_subnet = data_models.Subnet(**vip_arg) else: vip_subnet = data_models.Subnet() self.amphora_driver.post_vip_plug( db_amp, db_lb, amphorae_network_config, vrrp_port=vrrp_port, vip_subnet=vip_subnet) LOG.debug("Notified amphora of vip plug") def revert(self, result, amphora, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting post vip plug.") self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) self.task_utils.mark_loadbalancer_prov_status_error( loadbalancer[constants.LOADBALANCER_ID]) class AmphoraePostVIPPlug(BaseAmphoraTask): """Task to notify the amphorae post VIP plug.""" def execute(self, loadbalancer, amphorae_network_config): """Execute post_vip_plug across the amphorae.""" amp_post_vip_plug = AmphoraPostVIPPlug() db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for amphora in db_lb.amphorae: amp_post_vip_plug.execute(amphora.to_dict(), loadbalancer, amphorae_network_config) def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warning("Reverting amphorae post vip plug.") self.task_utils.mark_loadbalancer_prov_status_error( loadbalancer[constants.LOADBALANCER_ID]) class AmphoraCertUpload(BaseAmphoraTask): """Upload a certificate to the amphora.""" def execute(self, amphora, server_pem): """Execute cert_update_amphora routine.""" LOG.debug("Upload cert in amphora REST driver") key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora.get(constants.ID)) self.amphora_driver.upload_cert_amp( db_amp, fer.decrypt(server_pem.encode('utf-8'))) # TODO(johnsom) REMOVE ME! class AmphoraUpdateVRRPInterface(BaseAmphoraTask): """Task to get and update the VRRP interface device name from amphora.""" def execute(self, amphora, timeout_dict=None): try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) interface = self.amphora_driver.get_interface_from_ip( db_amp, db_amp.vrrp_ip, timeout_dict=timeout_dict) except Exception as e: # This can occur when an active/standby LB has no listener LOG.error('Failed to get amphora VRRP interface on amphora ' '%s. Skipping this amphora as it is failing due to: ' '%s', amphora.get(constants.ID), str(e)) self.amphora_repo.update(db_apis.get_session(), amphora.get(constants.ID), status=constants.ERROR) return None self.amphora_repo.update(db_apis.get_session(), amphora[constants.ID], vrrp_interface=interface) return interface class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): """Task to get and update the VRRP interface device name from amphora.""" def execute(self, amphora_index, amphorae, timeout_dict=None): amphora_id = amphorae[amphora_index][constants.ID] try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora_id) interface = self.amphora_driver.get_interface_from_ip( db_amp, db_amp.vrrp_ip, timeout_dict=timeout_dict) except Exception as e: # This can occur when an active/standby LB has no listener LOG.error('Failed to get amphora VRRP interface on amphora ' '%s. Skipping this amphora as it is failing due to: ' '%s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) return None self.amphora_repo.update(db_apis.get_session(), amphora_id, vrrp_interface=interface) return interface class AmphoraVRRPUpdate(BaseAmphoraTask): """Task to update the VRRP configuration of an amphora.""" def execute(self, loadbalancer_id, amphorae_network_config, amphora, amp_vrrp_int, timeout_dict=None): """Execute update_vrrp_conf.""" # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. amphora_id = amphora[constants.ID] try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora_id) loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) db_amp.vrrp_interface = amp_vrrp_int self.amphora_driver.update_vrrp_conf( loadbalancer, amphorae_network_config, db_amp, timeout_dict) except Exception as e: LOG.error('Failed to update VRRP configuration amphora %s. ' 'Skipping this amphora as it is failing to update due ' 'to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) class AmphoraIndexVRRPUpdate(BaseAmphoraTask): """Task to update the VRRP configuration of an amphora.""" def execute(self, loadbalancer_id, amphorae_network_config, amphora_index, amphorae, amp_vrrp_int, timeout_dict=None): """Execute update_vrrp_conf.""" # Note, we don't want this to cause a revert as it may be used # in a failover flow with both amps failing. Skip it and let # health manager fix it. amphora_id = amphorae[amphora_index][constants.ID] try: # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora_id) loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) db_amp.vrrp_interface = amp_vrrp_int self.amphora_driver.update_vrrp_conf( loadbalancer, amphorae_network_config, db_amp, timeout_dict) except Exception as e: LOG.error('Failed to update VRRP configuration amphora %s. ' 'Skipping this amphora as it is failing to update due ' 'to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) return LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) class AmphoraVRRPStart(BaseAmphoraTask): """Task to start keepalived on an amphora. This will reload keepalived if it is already running. """ def execute(self, amphora, timeout_dict=None): # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups db_amp = self.amphora_repo.get( db_apis.get_session(), id=amphora[constants.ID]) self.amphora_driver.start_vrrp_service(db_amp, timeout_dict) LOG.debug("Started VRRP on amphora %s.", amphora[constants.ID]) class AmphoraIndexVRRPStart(BaseAmphoraTask): """Task to start keepalived on an amphora. This will reload keepalived if it is already running. """ def execute(self, amphora_index, amphorae, timeout_dict=None): # TODO(johnsom) Optimize this to use the dicts and not need the # DB lookups amphora_id = amphorae[amphora_index][constants.ID] db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora_id) try: self.amphora_driver.start_vrrp_service(db_amp, timeout_dict) except Exception as e: LOG.error('Failed to start VRRP on amphora %s. ' 'Skipping this amphora as it is failing to start due ' 'to: %s', amphora_id, str(e)) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR) return LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index][constants.ID]) class AmphoraComputeConnectivityWait(BaseAmphoraTask): """Task to wait for the compute instance to be up.""" def execute(self, amphora, raise_retry_exception=False): """Execute get_info routine for an amphora until it responds.""" try: db_amphora = self.amphora_repo.get( db_apis.get_session(), id=amphora.get(constants.ID)) amp_info = self.amphora_driver.get_info( db_amphora, raise_retry_exception=raise_retry_exception) LOG.debug('Successfuly connected to amphora %s: %s', amphora.get(constants.ID), amp_info) except driver_except.TimeOutException: LOG.error("Amphora compute instance failed to become reachable. " "This either means the compute driver failed to fully " "boot the instance inside the timeout interval or the " "instance is not reachable via the lb-mgmt-net.") self.amphora_repo.update(db_apis.get_session(), amphora.get(constants.ID), status=constants.ERROR) raise class AmphoraConfigUpdate(BaseAmphoraTask): """Task to push a new amphora agent configuration to the amphroa.""" def execute(self, amphora, flavor): # Extract any flavor based settings if flavor: topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, CONF.controller_worker.loadbalancer_topology) else: topology = CONF.controller_worker.loadbalancer_topology # Build the amphora agent config agent_cfg_tmpl = agent_jinja_cfg.AgentJinjaTemplater() agent_config = agent_cfg_tmpl.build_agent_config( amphora.get(constants.ID), topology) db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) # Push the new configuration to the amphroa try: self.amphora_driver.update_amphora_agent_config(db_amp, agent_config) except driver_except.AmpDriverNotImplementedError: LOG.error('Amphora {} does not support agent configuration ' 'update. Please update the amphora image for this ' 'amphora. Skipping.'. format(amphora.get(constants.ID))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/cert_task.py0000664000175000017500000000335200000000000024324 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cryptography import fernet from oslo_config import cfg from stevedore import driver as stevedore_driver from taskflow import task from octavia.common import utils CONF = cfg.CONF class BaseCertTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseCertTask, self).__init__(**kwargs) self.cert_generator = stevedore_driver.DriverManager( namespace='octavia.cert_generator', name=CONF.certificates.cert_generator, invoke_on_load=True, ).driver class GenerateServerPEMTask(BaseCertTask): """Create the server certs for the agent comm Use the amphora_id for the CN """ def execute(self, amphora_id): cert = self.cert_generator.generate_cert_key_pair( cn=amphora_id, validity=CONF.certificates.cert_validity_time) key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) # storing in db requires conversion bytes to string # (required for python3) return fer.encrypt(cert.certificate + cert.private_key).decode('utf-8') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/compute_tasks.py0000664000175000017500000003417400000000000025234 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from cryptography import fernet from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from taskflow import task from taskflow.types import failure import tenacity from octavia.amphorae.backends.agent import agent_jinja_cfg from octavia.common import constants from octavia.common import exceptions from octavia.common.jinja.logging import logging_jinja_cfg from octavia.common.jinja import user_data_jinja_cfg from octavia.common import utils from octavia.controller.worker import amphora_rate_limit from octavia.db import api as db_apis from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseComputeTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseComputeTask, self).__init__(**kwargs) self.compute = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver self.loadbalancer_repo = repo.LoadBalancerRepository() self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit() class ComputeCreate(BaseComputeTask): """Create the compute instance for a new amphora.""" def execute(self, amphora_id, server_group_id, config_drive_files=None, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, ports=None, flavor=None, availability_zone=None): """Create an amphora :param availability_zone: availability zone metadata dictionary :returns: an amphora """ ports = ports or [] network_ids = CONF.controller_worker.amp_boot_network_list[:] config_drive_files = config_drive_files or {} user_data = None LOG.debug("Compute create execute for amphora with id %s", amphora_id) user_data_config_drive = CONF.controller_worker.user_data_config_drive key_name = CONF.controller_worker.amp_ssh_key_name # Apply an Octavia flavor customizations if flavor: topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, CONF.controller_worker.loadbalancer_topology) amp_compute_flavor = flavor.get( constants.COMPUTE_FLAVOR, CONF.controller_worker.amp_flavor_id) else: topology = CONF.controller_worker.loadbalancer_topology amp_compute_flavor = CONF.controller_worker.amp_flavor_id if availability_zone: amp_availability_zone = availability_zone.get( constants.COMPUTE_ZONE) amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) if amp_network: network_ids = [amp_network] else: amp_availability_zone = None try: if CONF.haproxy_amphora.build_rate_limit != -1: self.rate_limit.add_to_build_request_queue( amphora_id, build_type_priority) agent_cfg = agent_jinja_cfg.AgentJinjaTemplater() config_drive_files['/etc/octavia/amphora-agent.conf'] = ( agent_cfg.build_agent_config(amphora_id, topology)) logging_cfg = logging_jinja_cfg.LoggingJinjaTemplater( CONF.amphora_agent.logging_template_override) config_drive_files['/etc/rsyslog.d/10-rsyslog.conf'] = ( logging_cfg.build_logging_config()) if user_data_config_drive: udtemplater = user_data_jinja_cfg.UserDataJinjaCfg() user_data = udtemplater.build_user_data_config( config_drive_files) config_drive_files = None compute_id = self.compute.build( name="amphora-" + amphora_id, amphora_flavor=amp_compute_flavor, image_id=CONF.controller_worker.amp_image_id, image_tag=CONF.controller_worker.amp_image_tag, image_owner=CONF.controller_worker.amp_image_owner_id, key_name=key_name, sec_groups=CONF.controller_worker.amp_secgroup_list, network_ids=network_ids, port_ids=[port.id for port in ports], config_drive_files=config_drive_files, user_data=user_data, server_group_id=server_group_id, availability_zone=amp_availability_zone) LOG.debug("Server created with id: %s for amphora id: %s", compute_id, amphora_id) return compute_id except Exception: LOG.exception("Compute create for amphora id: %s failed", amphora_id) raise def revert(self, result, amphora_id, *args, **kwargs): """This method will revert the creation of the amphora. So it will just delete it in this flow """ if isinstance(result, failure.Failure): return compute_id = result LOG.warning("Reverting compute create for amphora with id " "%(amp)s and compute id: %(comp)s", {'amp': amphora_id, 'comp': compute_id}) try: self.compute.delete(compute_id) except Exception: LOG.exception("Reverting compute create failed") class CertComputeCreate(ComputeCreate): def execute(self, amphora_id, server_pem, server_group_id, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, ports=None, flavor=None, availability_zone=None): """Create an amphora :param availability_zone: availability zone metadata dictionary :returns: an amphora """ # load client certificate with open(CONF.controller_worker.client_ca, 'r') as client_ca: ca = client_ca.read() key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) config_drive_files = { '/etc/octavia/certs/server.pem': fer.decrypt( server_pem.encode("utf-8")), '/etc/octavia/certs/client_ca.pem': ca} return super(CertComputeCreate, self).execute( amphora_id, config_drive_files=config_drive_files, build_type_priority=build_type_priority, server_group_id=server_group_id, ports=ports, flavor=flavor, availability_zone=availability_zone) class DeleteAmphoraeOnLoadBalancer(BaseComputeTask): """Delete the amphorae on a load balancer. Iterate through amphorae, deleting them """ def execute(self, loadbalancer): db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for amp in db_lb.amphorae: # The compute driver will already handle NotFound try: self.compute.delete(amp.compute_id) except Exception: LOG.exception("Compute delete for amphora id: %s failed", amp.id) raise class ComputeDelete(BaseComputeTask): @tenacity.retry(retry=tenacity.retry_if_exception_type(), stop=tenacity.stop_after_attempt(CONF.compute.max_retries), wait=tenacity.wait_exponential( multiplier=CONF.compute.retry_backoff, min=CONF.compute.retry_interval, max=CONF.compute.retry_max), reraise=True) def execute(self, amphora, passive_failure=False): amphora_id = amphora.get(constants.ID) compute_id = amphora[constants.COMPUTE_ID] if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: LOG.debug('Compute delete execute for amphora with ID %s and ' 'compute ID: %s', amphora_id, compute_id) else: LOG.warning('Retrying compute delete of %s attempt %s of %s.', compute_id, self.execute.retry.statistics[ constants.ATTEMPT_NUMBER], self.execute.retry.stop.max_attempt_number) # Let the Taskflow engine know we are working and alive # Don't use get with a default for 'attempt_number', we need to fail # if that number is missing. self.update_progress( self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / self.execute.retry.stop.max_attempt_number) try: self.compute.delete(compute_id) except Exception: if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != self.execute.retry.stop.max_attempt_number): LOG.warning('Compute delete for amphora id: %s failed. ' 'Retrying.', amphora_id) raise if passive_failure: LOG.exception('Compute delete for compute ID: %s on amphora ' 'ID: %s failed. This resource will be abandoned ' 'and should manually be cleaned up once the ' 'compute service is functional.', compute_id, amphora_id) else: LOG.exception('Compute delete for compute ID: %s on amphora ' 'ID: %s failed. The compute service has failed. ' 'Aborting and reverting.', compute_id, amphora_id) raise class ComputeActiveWait(BaseComputeTask): """Wait for the compute driver to mark the amphora active.""" def execute(self, compute_id, amphora_id, availability_zone): """Wait for the compute driver to mark the amphora active :param compute_id: virtual machine UUID :param amphora_id: id of the amphora object :param availability_zone: availability zone metadata dictionary :raises: Generic exception if the amphora is not active :returns: An amphora object """ if availability_zone: amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) else: amp_network = None for i in range(CONF.controller_worker.amp_active_retries): amp, fault = self.compute.get_amphora(compute_id, amp_network) if amp.status == constants.ACTIVE: if CONF.haproxy_amphora.build_rate_limit != -1: self.rate_limit.remove_from_build_req_queue(amphora_id) return amp.to_dict() if amp.status == constants.ERROR: raise exceptions.ComputeBuildException(fault=fault) time.sleep(CONF.controller_worker.amp_active_wait_sec) raise exceptions.ComputeWaitTimeoutException(id=compute_id) class NovaServerGroupCreate(BaseComputeTask): def execute(self, loadbalancer_id): """Create a server group by nova client api :param loadbalancer_id: will be used for server group's name :param policy: will used for server group's policy :raises: Generic exception if the server group is not created :returns: server group's id """ name = 'octavia-lb-' + loadbalancer_id server_group = self.compute.create_server_group( name, CONF.nova.anti_affinity_policy) LOG.debug("Server Group created with id: %s for load balancer id: " "%s", server_group.id, loadbalancer_id) return server_group.id def revert(self, result, *args, **kwargs): """This method will revert the creation of the :param result: here it refers to server group id """ server_group_id = result LOG.warning("Reverting server group create with id:%s", server_group_id) try: self.compute.delete_server_group(server_group_id) except Exception as e: LOG.error("Failed to delete server group. Resources may " "still be in use for server group: %(sg)s due to " "error: %(except)s", {'sg': server_group_id, 'except': str(e)}) class NovaServerGroupDelete(BaseComputeTask): def execute(self, server_group_id): if server_group_id is not None: self.compute.delete_server_group(server_group_id) else: return class AttachPort(BaseComputeTask): def execute(self, amphora, port): """Attach a port to an amphora instance. :param amphora: The amphora to attach the port to. :param port: The port to attach to the amphora. :returns: None """ LOG.debug('Attaching port: %s to compute: %s', port[constants.ID], amphora[constants.COMPUTE_ID]) self.compute.attach_network_or_port(amphora[constants.COMPUTE_ID], port_id=port[constants.ID]) def revert(self, amphora, port, *args, **kwargs): """Revert our port attach. :param amphora: The amphora to detach the port from. :param port: The port to attach to the amphora. """ LOG.warning('Reverting port: %s attach to compute: %s', port[constants.ID], amphora[constants.COMPUTE_ID]) try: self.compute.detach_port(amphora[constants.COMPUTE_ID], port[constants.ID]) except Exception as e: LOG.error('Failed to detach port %s from compute %s for revert ' 'due to %s.', port[constants.ID], amphora[constants.COMPUTE_ID], str(e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/database_tasks.py0000664000175000017500000032150700000000000025323 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cryptography import fernet from oslo_config import cfg from oslo_db import exception as odb_exceptions from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils import sqlalchemy from sqlalchemy.orm import exc from taskflow import task from taskflow.types import failure from octavia.api.drivers import utils as provider_utils from octavia.common import constants from octavia.common import data_models import octavia.common.tls_utils.cert_parser as cert_parser from octavia.common import utils from octavia.common import validate from octavia.controller.worker import task_utils as task_utilities from octavia.db import api as db_apis from octavia.db import repositories as repo CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseDatabaseTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): self.repos = repo.Repositories() self.amphora_repo = repo.AmphoraRepository() self.health_mon_repo = repo.HealthMonitorRepository() self.listener_repo = repo.ListenerRepository() self.loadbalancer_repo = repo.LoadBalancerRepository() self.vip_repo = repo.VipRepository() self.member_repo = repo.MemberRepository() self.pool_repo = repo.PoolRepository() self.amp_health_repo = repo.AmphoraHealthRepository() self.l7policy_repo = repo.L7PolicyRepository() self.l7rule_repo = repo.L7RuleRepository() self.task_utils = task_utilities.TaskUtils() super(BaseDatabaseTask, self).__init__(**kwargs) def _delete_from_amp_health(self, amphora_id): """Delete the amphora_health record for an amphora. :param amphora_id: The amphora id to delete """ LOG.debug('Disabling health monitoring on amphora: %s', amphora_id) try: self.amp_health_repo.delete(db_apis.get_session(), amphora_id=amphora_id) except (sqlalchemy.orm.exc.NoResultFound, sqlalchemy.orm.exc.UnmappedInstanceError): LOG.debug('No existing amphora health record to delete ' 'for amphora: %s, skipping.', amphora_id) def _mark_amp_health_busy(self, amphora_id): """Mark the amphora_health record busy for an amphora. :param amphora_id: The amphora id to mark busy """ LOG.debug('Marking health monitoring busy on amphora: %s', amphora_id) try: self.amp_health_repo.update(db_apis.get_session(), amphora_id=amphora_id, busy=True) except (sqlalchemy.orm.exc.NoResultFound, sqlalchemy.orm.exc.UnmappedInstanceError): LOG.debug('No existing amphora health record to mark busy ' 'for amphora: %s, skipping.', amphora_id) class CreateAmphoraInDB(BaseDatabaseTask): """Task to create an initial amphora in the Database.""" def execute(self, *args, loadbalancer_id=None, **kwargs): """Creates an pending create amphora record in the database. :returns: The created amphora object """ amphora = self.amphora_repo.create(db_apis.get_session(), id=uuidutils.generate_uuid(), load_balancer_id=loadbalancer_id, status=constants.PENDING_CREATE, cert_busy=False) LOG.info("Created Amphora in DB with id %s", amphora.id) return amphora.id def revert(self, result, *args, **kwargs): """Revert by storing the amphora in error state in the DB In a future version we might change the status to DELETED if deleting the amphora was successful :param result: Id of created amphora. :returns: None """ if isinstance(result, failure.Failure): # This task's execute failed, so nothing needed to be done to # revert return # At this point the revert is being called because another task # executed after this failed so we will need to do something and # result is the amphora's id LOG.warning("Reverting create amphora in DB for amp id %s ", result) # Delete the amphora for now. May want to just update status later try: self.amphora_repo.delete(db_apis.get_session(), id=result) except Exception as e: LOG.error("Failed to delete amphora %(amp)s " "in the database due to: " "%(except)s", {'amp': result, 'except': str(e)}) class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask): """Task to mark a list of amphora deleted in the Database.""" def execute(self, loadbalancer): """Update load balancer's amphorae statuses to DELETED in the database. :param loadbalancer: The load balancer which amphorae should be marked DELETED. :returns: None """ db_lb = self.repos.load_balancer.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for amp in db_lb.amphorae: LOG.debug("Marking amphora %s DELETED ", amp.id) self.amphora_repo.update(db_apis.get_session(), id=amp.id, status=constants.DELETED) class DeleteHealthMonitorInDB(BaseDatabaseTask): """Delete the health monitor in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Delete the health monitor in DB :param health_mon: The health monitor which should be deleted :returns: None """ LOG.debug("DB delete health monitor: %s ", health_mon[constants.HEALTHMONITOR_ID]) try: self.health_mon_repo.delete( db_apis.get_session(), id=health_mon[constants.HEALTHMONITOR_ID]) except exc.NoResultFound: # ignore if the HealthMonitor was not found pass def revert(self, health_mon, *args, **kwargs): """Mark the health monitor ERROR since the mark active couldn't happen :param health_mon: The health monitor which couldn't be deleted :returns: None """ LOG.warning("Reverting mark health monitor delete in DB " "for health monitor with id %s", health_mon[constants.HEALTHMONITOR_ID]) self.health_mon_repo.update(db_apis.get_session(), id=health_mon[constants.HEALTHMONITOR_ID], provisioning_status=constants.ERROR) class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB): """Delete the health monitor in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id): """Delete the health monitor in the DB. :param pool_id: ID of pool which health monitor should be deleted. :returns: None """ db_pool = self.pool_repo.get(db_apis.get_session(), id=pool_id) provider_hm = provider_utils.db_HM_to_provider_HM( db_pool.health_monitor).to_dict() super(DeleteHealthMonitorInDBByPool, self).execute( provider_hm) def revert(self, pool_id, *args, **kwargs): """Mark the health monitor ERROR since the mark active couldn't happen :param pool_id: ID of pool which health monitor couldn't be deleted :returns: None """ db_pool = self.pool_repo.get(db_apis.get_session(), id=pool_id) provider_hm = provider_utils.db_HM_to_provider_HM( db_pool.health_monitor).to_dict() super(DeleteHealthMonitorInDBByPool, self).revert( provider_hm, *args, **kwargs) class DeleteMemberInDB(BaseDatabaseTask): """Delete the member in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Delete the member in the DB :param member: The member to be deleted :returns: None """ LOG.debug("DB delete member for id: %s ", member[constants.MEMBER_ID]) self.member_repo.delete(db_apis.get_session(), id=member[constants.MEMBER_ID]) def revert(self, member, *args, **kwargs): """Mark the member ERROR since the delete couldn't happen :param member: Member that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for member id %s", member[constants.MEMBER_ID]) try: self.member_repo.update(db_apis.get_session(), member[constants.MEMBER_ID], provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update member %(mem)s " "provisioning_status to ERROR due to: %(except)s", {'mem': member[constants.MEMBER_ID], 'except': str(e)}) class DeleteListenerInDB(BaseDatabaseTask): """Delete the listener in the DB.""" def execute(self, listener): """Delete the listener in DB :param listener: The listener to delete :returns: None """ LOG.debug("Delete in DB for listener id: %s", listener[constants.LISTENER_ID]) self.listener_repo.delete(db_apis.get_session(), id=listener[constants.LISTENER_ID]) def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the listener didn't delete :param listener: Listener that failed to get deleted :returns: None """ # TODO(johnsom) Fix this, it doesn't revert anything LOG.warning("Reverting mark listener delete in DB for listener id %s", listener[constants.LISTENER_ID]) class DeletePoolInDB(BaseDatabaseTask): """Delete the pool in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id): """Delete the pool in DB :param pool_id: The pool_id to be deleted :returns: None """ LOG.debug("Delete in DB for pool id: %s ", pool_id) self.pool_repo.delete(db_apis.get_session(), id=pool_id) def revert(self, pool_id, *args, **kwargs): """Mark the pool ERROR since the delete couldn't happen :param pool_id: pool_id that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for pool id %s", pool_id) try: self.pool_repo.update(db_apis.get_session(), pool_id, provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update pool %(pool)s " "provisioning_status to ERROR due to: %(except)s", {'pool': pool_id, 'except': str(e)}) class DeleteL7PolicyInDB(BaseDatabaseTask): """Delete the L7 policy in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Delete the l7policy in DB :param l7policy: The l7policy to be deleted :returns: None """ LOG.debug("Delete in DB for l7policy id: %s ", l7policy[constants.L7POLICY_ID]) self.l7policy_repo.delete(db_apis.get_session(), id=l7policy[constants.L7POLICY_ID]) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy ERROR since the delete couldn't happen :param l7policy: L7 policy that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for l7policy id %s", l7policy[constants.L7POLICY_ID]) try: self.l7policy_repo.update(db_apis.get_session(), l7policy[constants.L7POLICY_ID], provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7policy %(l7policy)s " "provisioning_status to ERROR due to: %(except)s", {'l7policy': l7policy[constants.L7POLICY_ID], 'except': str(e)}) class DeleteL7RuleInDB(BaseDatabaseTask): """Delete the L7 rule in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Delete the l7rule in DB :param l7rule: The l7rule to be deleted :returns: None """ LOG.debug("Delete in DB for l7rule id: %s", l7rule[constants.L7RULE_ID]) self.l7rule_repo.delete(db_apis.get_session(), id=l7rule[constants.L7RULE_ID]) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule ERROR since the delete couldn't happen :param l7rule: L7 rule that failed to get deleted :returns: None """ LOG.warning("Reverting delete in DB for l7rule id %s", l7rule[constants.L7RULE_ID]) try: self.l7rule_repo.update(db_apis.get_session(), l7rule[constants.L7RULE_ID], provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7rule %(l7rule)s " "provisioning_status to ERROR due to: %(except)s", {'l7rule': l7rule[constants.L7RULE_ID], 'except': str(e)}) class ReloadAmphora(BaseDatabaseTask): """Get an amphora object from the database.""" def execute(self, amphora): """Get an amphora object from the database. :param amphora_id: The amphora ID to lookup :returns: The amphora object """ LOG.debug("Get amphora from DB for amphora id: %s ", amphora[constants.ID]) return self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]).to_dict() class ReloadLoadBalancer(BaseDatabaseTask): """Get an load balancer object from the database.""" def execute(self, loadbalancer_id, *args, **kwargs): """Get an load balancer object from the database. :param loadbalancer_id: The load balancer ID to lookup :returns: The load balancer object """ LOG.debug("Get load balancer from DB for load balancer id: %s ", loadbalancer_id) db_lb = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) lb_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer( db_lb) return lb_dict.to_dict() class UpdateVIPAfterAllocation(BaseDatabaseTask): """Update a VIP associated with a given load balancer.""" def execute(self, loadbalancer_id, vip): """Update a VIP associated with a given load balancer. :param loadbalancer_id: Id of a load balancer which VIP should be updated. :param vip: data_models.Vip object with update data. :returns: The load balancer object. """ self.repos.vip.update(db_apis.get_session(), loadbalancer_id, port_id=vip[constants.PORT_ID], subnet_id=vip[constants.SUBNET_ID], ip_address=vip[constants.IP_ADDRESS]) db_lb = self.repos.load_balancer.get(db_apis.get_session(), id=loadbalancer_id) return provider_utils.db_loadbalancer_to_provider_loadbalancer( db_lb).to_dict() class UpdateAmphoraeVIPData(BaseDatabaseTask): """Update amphorae VIP data.""" def execute(self, amps_data): """Update amphorae VIP data. :param amps_data: Amphorae update dicts. :returns: None """ for amp_data in amps_data: self.repos.amphora.update( db_apis.get_session(), amp_data.get(constants.ID), vrrp_ip=amp_data[constants.VRRP_IP], ha_ip=amp_data[constants.HA_IP], vrrp_port_id=amp_data[constants.VRRP_PORT_ID], ha_port_id=amp_data[constants.HA_PORT_ID], vrrp_id=1) class UpdateAmphoraVIPData(BaseDatabaseTask): """Update amphorae VIP data.""" def execute(self, amp_data): """Update amphorae VIP data. :param amps_data: Amphorae update dicts. :returns: None """ self.repos.amphora.update( db_apis.get_session(), amp_data.get(constants.ID), vrrp_ip=amp_data[constants.VRRP_IP], ha_ip=amp_data[constants.HA_IP], vrrp_port_id=amp_data[constants.VRRP_PORT_ID], ha_port_id=amp_data[constants.HA_PORT_ID], vrrp_id=1) class UpdateAmpFailoverDetails(BaseDatabaseTask): """Update amphora failover details in the database.""" def execute(self, amphora, vip, base_port): """Update amphora failover details in the database. :param amphora: The amphora to update :param vip: The VIP object associated with this amphora. :param base_port: The base port object associated with the amphora. :returns: None """ # role and vrrp_priority will be updated later. self.repos.amphora.update( db_apis.get_session(), amphora.get(constants.ID), # TODO(johnsom) We should do a better job getting the fixed_ip # as this could be a problem with dual stack. # Fix this during the multi-vip patch. vrrp_ip=base_port[constants.FIXED_IPS][0][constants.IP_ADDRESS], ha_ip=vip[constants.IP_ADDRESS], vrrp_port_id=base_port[constants.ID], ha_port_id=vip[constants.PORT_ID], vrrp_id=1) class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): """Associate failover amphora with loadbalancer in the database.""" def execute(self, amphora_id, loadbalancer_id): """Associate failover amphora with loadbalancer in the database. :param amphora_id: Id of an amphora to update :param loadbalancer_id: Id of a load balancer to be associated with a given amphora. :returns: None """ self.repos.amphora.associate(db_apis.get_session(), load_balancer_id=loadbalancer_id, amphora_id=amphora_id) def revert(self, amphora_id, *args, **kwargs): """Remove amphora-load balancer association. :param amphora_id: Id of an amphora that couldn't be associated with a load balancer. :returns: None """ try: self.repos.amphora.update(db_apis.get_session(), amphora_id, loadbalancer_id=None) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "load balancer id to None due to: " "%(except)s", {'amp': amphora_id, 'except': str(e)}) class MapLoadbalancerToAmphora(BaseDatabaseTask): """Maps and assigns a load balancer to an amphora in the database.""" def execute(self, loadbalancer_id, server_group_id=None, flavor=None, availability_zone=None): """Allocates an Amphora for the load balancer in the database. :param loadbalancer_id: The load balancer id to map to an amphora :returns: Amphora ID if one was allocated, None if it was unable to allocate an Amphora """ LOG.debug("Allocating an Amphora for load balancer with id %s", loadbalancer_id) if server_group_id is not None: LOG.debug("Load balancer is using anti-affinity. Skipping spares " "pool allocation.") return None # Validate the flavor is spares compatible if not validate.is_flavor_spares_compatible(flavor): LOG.debug("Load balancer has a flavor that is not compatible with " "using spares pool amphora. Skipping spares pool " "allocation.") return None if availability_zone: amp_az = availability_zone.get(constants.COMPUTE_ZONE) else: amp_az = CONF.nova.availability_zone amp = self.amphora_repo.allocate_and_associate( db_apis.get_session(), loadbalancer_id, amp_az) if amp is None: LOG.debug("No Amphora available for load balancer with id %s", loadbalancer_id) return None LOG.debug("Allocated Amphora with id %(amp)s for load balancer " "with id %(lb)s", {'amp': amp.id, 'lb': loadbalancer_id}) # TODO(ataraday): return AMP here so refactored spit of create amp for # loadbalancer flow can executed properly return amp.to_dict() def revert(self, result, loadbalancer_id, *args, **kwargs): LOG.warning("Reverting Amphora allocation for the load " "balancer %s in the database.", loadbalancer_id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask): """Alter the amphora role and priority in DB.""" def _execute(self, amphora_id, amp_role, vrrp_priority): """Alter the amphora role and priority in DB. :param amphora_id: Amphora ID to update. :param amp_role: Amphora role to be set. :param vrrp_priority: VRRP priority to set. :returns: None """ LOG.debug("Mark %(role)s in DB for amphora: %(amp)s", {constants.ROLE: amp_role, 'amp': amphora_id}) self.amphora_repo.update(db_apis.get_session(), amphora_id, role=amp_role, vrrp_priority=vrrp_priority) def _revert(self, result, amphora_id, *args, **kwargs): """Removes role and vrrp_priority association. :param result: Result of the association. :param amphora_id: Amphora ID which role/vrrp_priority association failed. :returns: None """ if isinstance(result, failure.Failure): return LOG.warning("Reverting amphora role in DB for amp id %(amp)s", {'amp': amphora_id}) try: self.amphora_repo.update(db_apis.get_session(), amphora_id, role=None, vrrp_priority=None) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "role and vrrp_priority to None due to: " "%(except)s", {'amp': amphora_id, 'except': str(e)}) class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB): """Alter the amphora role to: MASTER.""" def execute(self, amphora): """Mark amphora as MASTER in db. :param amphora: Amphora to update role. :returns: None """ amp_role = constants.ROLE_MASTER self._execute(amphora[constants.ID], amp_role, constants.ROLE_MASTER_PRIORITY) def revert(self, result, amphora, *args, **kwargs): """Removes amphora role association. :param amphora: Amphora to update role. :returns: None """ self._revert(result, amphora[constants.ID], *args, **kwargs) class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB): """Alter the amphora role to: Backup.""" def execute(self, amphora): """Mark amphora as BACKUP in db. :param amphora: Amphora to update role. :returns: None """ amp_role = constants.ROLE_BACKUP self._execute(amphora[constants.ID], amp_role, constants.ROLE_BACKUP_PRIORITY) def revert(self, result, amphora, *args, **kwargs): """Removes amphora role association. :param amphora: Amphora to update role. :returns: None """ self._revert(result, amphora[constants.ID], *args, **kwargs) class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB): """Alter the amphora role to: Standalone.""" def execute(self, amphora): """Mark amphora as STANDALONE in db. :param amphora: Amphora to update role. :returns: None """ amp_role = constants.ROLE_STANDALONE self._execute(amphora[constants.ID], amp_role, None) def revert(self, result, amphora, *args, **kwargs): """Removes amphora role association. :param amphora: Amphora to update role. :returns: None """ self._revert(result, amphora[constants.ID], *args, **kwargs) class MarkAmphoraAllocatedInDB(BaseDatabaseTask): """Will mark an amphora as allocated to a load balancer in the database. Assume sqlalchemy made sure the DB got retried sufficiently - so just abort """ def execute(self, amphora, loadbalancer_id): """Mark amphora as allocated to a load balancer in DB. :param amphora: Amphora to be updated. :param loadbalancer_id: Id of a load balancer to which an amphora should be allocated. :returns: None """ LOG.info('Mark ALLOCATED in DB for amphora: %(amp)s with ' 'compute id %(comp)s for load balancer: %(lb)s', { 'amp': amphora.get(constants.ID), 'comp': amphora[constants.COMPUTE_ID], 'lb': loadbalancer_id }) self.amphora_repo.update( db_apis.get_session(), amphora.get(constants.ID), status=constants.AMPHORA_ALLOCATED, compute_id=amphora[constants.COMPUTE_ID], lb_network_ip=amphora[constants.LB_NETWORK_IP], load_balancer_id=loadbalancer_id) def revert(self, result, amphora, loadbalancer_id, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param result: Execute task result :param amphora: Amphora that was updated. :param loadbalancer_id: Id of a load balancer to which an amphora failed to be allocated. :returns: None """ if isinstance(result, failure.Failure): return LOG.warning("Reverting mark amphora ready in DB for amp " "id %(amp)s and compute id %(comp)s", {'amp': amphora.get(constants.ID), 'comp': amphora[constants.COMPUTE_ID]}) self.task_utils.mark_amphora_status_error( amphora.get(constants.ID)) class MarkAmphoraBootingInDB(BaseDatabaseTask): """Mark the amphora as booting in the database.""" def execute(self, amphora_id, compute_id): """Mark amphora booting in DB. :param amphora_id: Id of the amphora to update :param compute_id: Id of a compute on which an amphora resides :returns: None """ LOG.debug("Mark BOOTING in DB for amphora: %(amp)s with " "compute id %(id)s", {'amp': amphora_id, constants.ID: compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.AMPHORA_BOOTING, compute_id=compute_id) def revert(self, result, amphora_id, compute_id, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param result: Execute task result :param amphora_id: Id of the amphora that failed to update :param compute_id: Id of a compute on which an amphora resides :returns: None """ if isinstance(result, failure.Failure): return LOG.warning("Reverting mark amphora booting in DB for amp " "id %(amp)s and compute id %(comp)s", {'amp': amphora_id, 'comp': compute_id}) try: self.amphora_repo.update(db_apis.get_session(), amphora_id, status=constants.ERROR, compute_id=compute_id) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "status to ERROR due to: " "%(except)s", {'amp': amphora_id, 'except': str(e)}) class MarkAmphoraDeletedInDB(BaseDatabaseTask): """Mark the amphora deleted in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, amphora): """Mark the amphora as deleted in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Mark DELETED in DB for amphora: %(amp)s with " "compute id %(comp)s", {'amp': amphora[constants.ID], 'comp': amphora[constants.COMPUTE_ID]}) self.amphora_repo.update(db_apis.get_session(), amphora[constants.ID], status=constants.DELETED) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora deleted in DB " "for amp id %(amp)s and compute id %(comp)s", {'amp': amphora[constants.ID], 'comp': amphora[constants.COMPUTE_ID]}) self.task_utils.mark_amphora_status_error(amphora[constants.ID]) class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask): """Mark the amphora pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, amphora): """Mark the amphora as pending delete in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Mark PENDING DELETE in DB for amphora: %(amp)s " "with compute id %(id)s", {'amp': amphora[constants.ID], 'id': amphora[constants.COMPUTE_ID]}) self.amphora_repo.update(db_apis.get_session(), amphora[constants.ID], status=constants.PENDING_DELETE) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora pending delete in DB " "for amp id %(amp)s and compute id %(comp)s", {'amp': amphora[constants.ID], 'comp': amphora[constants.COMPUTE_ID]}) self.task_utils.mark_amphora_status_error(amphora[constants.ID]) class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask): """Mark the amphora pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, amphora): """Mark the amphora as pending update in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for amphora: %(amp)s " "with compute id %(id)s", {'amp': amphora.get(constants.ID), 'id': amphora[constants.COMPUTE_ID]}) self.amphora_repo.update(db_apis.get_session(), amphora.get(constants.ID), status=constants.PENDING_UPDATE) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora pending update in DB " "for amp id %(amp)s and compute id %(comp)s", {'amp': amphora.get(constants.ID), 'comp': amphora[constants.COMPUTE_ID]}) self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) class MarkAmphoraReadyInDB(BaseDatabaseTask): """This task will mark an amphora as ready in the database. Assume sqlalchemy made sure the DB got retried sufficiently - so just abort """ def execute(self, amphora): """Mark amphora as ready in DB. :param amphora: Amphora to be updated. :returns: None """ LOG.info("Mark READY in DB for amphora: %(amp)s with compute " "id %(comp)s", {"amp": amphora.get(constants.ID), "comp": amphora[constants.COMPUTE_ID]}) self.amphora_repo.update( db_apis.get_session(), amphora.get(constants.ID), status=constants.AMPHORA_READY, compute_id=amphora[constants.COMPUTE_ID], lb_network_ip=amphora[constants.LB_NETWORK_IP]) def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up. :param amphora: Amphora that was updated. :returns: None """ LOG.warning("Reverting mark amphora ready in DB for amp " "id %(amp)s and compute id %(comp)s", {'amp': amphora.get(constants.ID), 'comp': amphora[constants.COMPUTE_ID]}) try: self.amphora_repo.update( db_apis.get_session(), amphora.get(constants.ID), status=constants.ERROR, compute_id=amphora[constants.COMPUTE_ID], lb_network_ip=amphora[constants.LB_NETWORK_IP]) except Exception as e: LOG.error("Failed to update amphora %(amp)s " "status to ERROR due to: " "%(except)s", {'amp': amphora.get(constants.ID), 'except': str(e)}) class UpdateAmphoraComputeId(BaseDatabaseTask): """Associate amphora with a compute in DB.""" def execute(self, amphora_id, compute_id): """Associate amphora with a compute in DB. :param amphora_id: Id of the amphora to update :param compute_id: Id of a compute on which an amphora resides :returns: None """ self.amphora_repo.update(db_apis.get_session(), amphora_id, compute_id=compute_id) class UpdateAmphoraInfo(BaseDatabaseTask): """Update amphora with compute instance details.""" def execute(self, amphora_id, compute_obj): """Update amphora with compute instance details. :param amphora_id: Id of the amphora to update :param compute_obj: Compute on which an amphora resides :returns: Updated amphora object """ self.amphora_repo.update( db_apis.get_session(), amphora_id, lb_network_ip=compute_obj[constants.LB_NETWORK_IP], cached_zone=compute_obj[constants.CACHED_ZONE], image_id=compute_obj[constants.IMAGE_ID], compute_flavor=compute_obj[constants.COMPUTE_FLAVOR]) return self.amphora_repo.get(db_apis.get_session(), id=amphora_id).to_dict() class UpdateAmphoraDBCertExpiration(BaseDatabaseTask): """Update the amphora expiration date with new cert file date.""" def execute(self, amphora_id, server_pem): """Update the amphora expiration date with new cert file date. :param amphora_id: Id of the amphora to update :param server_pem: Certificate in PEM format :returns: None """ LOG.debug("Update DB cert expiry date of amphora id: %s", amphora_id) key = utils.get_compatible_server_certs_key_passphrase() fer = fernet.Fernet(key) cert_expiration = cert_parser.get_cert_expiration( fer.decrypt(server_pem.encode("utf-8"))) LOG.debug("Certificate expiration date is %s ", cert_expiration) self.amphora_repo.update(db_apis.get_session(), amphora_id, cert_expiration=cert_expiration) class UpdateAmphoraCertBusyToFalse(BaseDatabaseTask): """Update the amphora cert_busy flag to be false.""" def execute(self, amphora_id): """Update the amphora cert_busy flag to be false. :param amphora: Amphora to be updated. :returns: None """ LOG.debug("Update cert_busy flag of amphora id %s to False", amphora_id) self.amphora_repo.update(db_apis.get_session(), amphora_id, cert_busy=False) class MarkLBActiveInDB(BaseDatabaseTask): """Mark the load balancer active in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def __init__(self, mark_subobjects=False, **kwargs): super(MarkLBActiveInDB, self).__init__(**kwargs) self.mark_subobjects = mark_subobjects def execute(self, loadbalancer): """Mark the load balancer as active in DB. This also marks ACTIVE all sub-objects of the load balancer if self.mark_subobjects is True. :param loadbalancer: Load balancer object to be updated :returns: None """ if self.mark_subobjects: LOG.debug("Marking all listeners of loadbalancer %s ACTIVE", loadbalancer[constants.LOADBALANCER_ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for listener in db_lb.listeners: self._mark_listener_status(listener, constants.ACTIVE) LOG.info("Mark ACTIVE in DB for load balancer id: %s", loadbalancer[constants.LOADBALANCER_ID]) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer[constants.LOADBALANCER_ID], provisioning_status=constants.ACTIVE) def _mark_listener_status(self, listener, status): self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=status) LOG.debug("Marking all l7policies of listener %s %s", listener.id, status) for l7policy in listener.l7policies: self._mark_l7policy_status(l7policy, status) if listener.default_pool: LOG.debug("Marking default pool of listener %s %s", listener.id, status) self._mark_pool_status(listener.default_pool, status) def _mark_l7policy_status(self, l7policy, status): self.l7policy_repo.update( db_apis.get_session(), l7policy.id, provisioning_status=status) LOG.debug("Marking all l7rules of l7policy %s %s", l7policy.id, status) for l7rule in l7policy.l7rules: self._mark_l7rule_status(l7rule, status) if l7policy.redirect_pool: LOG.debug("Marking redirect pool of l7policy %s %s", l7policy.id, status) self._mark_pool_status(l7policy.redirect_pool, status) def _mark_l7rule_status(self, l7rule, status): self.l7rule_repo.update( db_apis.get_session(), l7rule.id, provisioning_status=status) def _mark_pool_status(self, pool, status): self.pool_repo.update( db_apis.get_session(), pool.id, provisioning_status=status) if pool.health_monitor: LOG.debug("Marking health monitor of pool %s %s", pool.id, status) self._mark_hm_status(pool.health_monitor, status) LOG.debug("Marking all members of pool %s %s", pool.id, status) for member in pool.members: self._mark_member_status(member, status) def _mark_hm_status(self, hm, status): self.health_mon_repo.update( db_apis.get_session(), hm.id, provisioning_status=status) def _mark_member_status(self, member, status): self.member_repo.update( db_apis.get_session(), member.id, provisioning_status=status) def revert(self, loadbalancer, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. This also puts all sub-objects of the load balancer to ERROR state if self.mark_subobjects is True :param loadbalancer: Load balancer object that failed to update :returns: None """ if self.mark_subobjects: LOG.debug("Marking all listeners of loadbalancer %s ERROR", loadbalancer[constants.LOADBALANCER_ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for listener in db_lb.listeners: try: self._mark_listener_status(listener, constants.ERROR) except Exception: LOG.warning("Error updating listener %s provisioning " "status", listener.id) LOG.warning("Reverting mark load balancer deleted in DB " "for load balancer id %s", loadbalancer[constants.LOADBALANCER_ID]) self.task_utils.mark_loadbalancer_prov_status_error( loadbalancer[constants.LOADBALANCER_ID]) class MarkLBActiveInDBByListener(BaseDatabaseTask): """Mark the load balancer active in the DB using a listener dict. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener): """Mark the load balancer as active in DB. :param listener: Listener dictionary :returns: None """ LOG.info("Mark ACTIVE in DB for load balancer id: %s", listener[constants.LOADBALANCER_ID]) self.loadbalancer_repo.update(db_apis.get_session(), listener[constants.LOADBALANCER_ID], provisioning_status=constants.ACTIVE) def revert(self, listener, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. This also puts all sub-objects of the load balancer to ERROR state if self.mark_subobjects is True :param listener: Listener dictionary :returns: None """ LOG.warning("Reverting mark load balancer active in DB " "for load balancer id %s", listener[constants.LOADBALANCER_ID]) self.task_utils.mark_loadbalancer_prov_status_error( listener[constants.LOADBALANCER_ID]) class UpdateLBServerGroupInDB(BaseDatabaseTask): """Update the server group id info for load balancer in DB.""" def execute(self, loadbalancer_id, server_group_id): """Update the server group id info for load balancer in DB. :param loadbalancer_id: Id of a load balancer to update :param server_group_id: Id of a server group to associate with the load balancer :returns: None """ LOG.debug("Server Group updated with id: %s for load balancer id: %s:", server_group_id, loadbalancer_id) self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, server_group_id=server_group_id) def revert(self, loadbalancer_id, server_group_id, *args, **kwargs): """Remove server group information from a load balancer in DB. :param loadbalancer_id: Id of a load balancer that failed to update :param server_group_id: Id of a server group that couldn't be associated with the load balancer :returns: None """ LOG.warning('Reverting Server Group updated with id: %(s1)s for ' 'load balancer id: %(s2)s ', {'s1': server_group_id, 's2': loadbalancer_id}) try: self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, server_group_id=None) except Exception as e: LOG.error("Failed to update load balancer %(lb)s " "server_group_id to None due to: " "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) class MarkLBDeletedInDB(BaseDatabaseTask): """Mark the load balancer deleted in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer): """Mark the load balancer as deleted in DB. :param loadbalancer: Load balancer object to be updated :returns: None """ LOG.debug("Mark DELETED in DB for load balancer id: %s", loadbalancer[constants.LOADBALANCER_ID]) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer[constants.LOADBALANCER_ID], provisioning_status=constants.DELETED) def revert(self, loadbalancer, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. :param loadbalancer: Load balancer object that failed to update :returns: None """ LOG.warning("Reverting mark load balancer deleted in DB " "for load balancer id %s", loadbalancer[constants.LOADBALANCER_ID]) self.task_utils.mark_loadbalancer_prov_status_error( loadbalancer[constants.LOADBALANCER_ID]) class MarkLBPendingDeleteInDB(BaseDatabaseTask): """Mark the load balancer pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer): """Mark the load balancer as pending delete in DB. :param loadbalancer: Load balancer object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s", loadbalancer[constants.LOADBALANCER_ID]) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer[constants.LOADBALANCER_ID], provisioning_status=(constants. PENDING_DELETE)) def revert(self, loadbalancer, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. :param loadbalancer: Load balancer object that failed to update :returns: None """ LOG.warning("Reverting mark load balancer pending delete in DB " "for load balancer id %s", loadbalancer[constants.LOADBALANCER_ID]) self.task_utils.mark_loadbalancer_prov_status_error( loadbalancer[constants.LOADBALANCER_ID]) class MarkLBAndListenersActiveInDB(BaseDatabaseTask): """Mark the load balancer and specified listeners active in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer_id, listeners): """Mark the load balancer and listeners as active in DB. :param loadbalancer_id: The load balancer ID to be updated :param listeners: Listener objects to be updated :returns: None """ lb_id = None if loadbalancer_id: lb_id = loadbalancer_id elif listeners: lb_id = listeners[0][constants.LOADBALANCER_ID] if lb_id: LOG.debug("Mark ACTIVE in DB for load balancer id: %s " "and updating status for listener ids: %s", lb_id, ', '.join([listener[constants.LISTENER_ID] for listener in listeners])) self.loadbalancer_repo.update(db_apis.get_session(), lb_id, provisioning_status=constants.ACTIVE) for listener in listeners: self.listener_repo.prov_status_active_if_not_error( db_apis.get_session(), listener[constants.LISTENER_ID]) def revert(self, loadbalancer_id, listeners, *args, **kwargs): """Mark the load balancer and listeners as broken. :param loadbalancer_id: The load balancer ID to be updated :param listeners: Listener objects that failed to update :returns: None """ lb_id = None if loadbalancer_id: lb_id = loadbalancer_id elif listeners: lb_id = listeners[0][constants.LOADBALANCER_ID] if lb_id: lists = ', '.join([listener[constants.LISTENER_ID] for listener in listeners]) LOG.warning("Reverting mark load balancer and listeners active in " "DB for load balancer id %(LB)s and listener ids: " "%(list)s", {'LB': lb_id, 'list': lists}) self.task_utils.mark_loadbalancer_prov_status_error(lb_id) for listener in listeners: self.task_utils.mark_listener_prov_status_error( listener[constants.LISTENER_ID]) class MarkListenerDeletedInDB(BaseDatabaseTask): """Mark the listener deleted in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener): """Mark the listener as deleted in DB :param listener: The listener to be marked deleted :returns: None """ LOG.debug("Mark DELETED in DB for listener id: %s ", listener.id) self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.DELETED) def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the delete couldn't happen :param listener: The listener that couldn't be updated :returns: None """ LOG.warning("Reverting mark listener deleted in DB " "for listener id %s", listener.id) self.task_utils.mark_listener_prov_status_error(listener.id) class MarkListenerPendingDeleteInDB(BaseDatabaseTask): """Mark the listener pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener): """Mark the listener as pending delete in DB. :param listener: The listener to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for listener id: %s", listener.id) self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.PENDING_DELETE) def revert(self, listener, *args, **kwargs): """Mark the listener as broken and ready to be cleaned up. :param listener: The listener that couldn't be updated :returns: None """ LOG.warning("Reverting mark listener pending delete in DB " "for listener id %s", listener.id) self.task_utils.mark_listener_prov_status_error(listener.id) class UpdateLoadbalancerInDB(BaseDatabaseTask): """Update the loadbalancer in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, loadbalancer, update_dict): """Update the loadbalancer in the DB :param loadbalancer: The load balancer to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for loadbalancer id: %s ", loadbalancer[constants.LOADBALANCER_ID]) if update_dict.get('vip'): vip_dict = update_dict.pop('vip') self.vip_repo.update(db_apis.get_session(), loadbalancer[constants.LOADBALANCER_ID], **vip_dict) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer[constants.LOADBALANCER_ID], **update_dict) def revert(self, loadbalancer, *args, **kwargs): """Mark the loadbalancer ERROR since the update couldn't happen :param loadbalancer: The load balancer that couldn't be updated :returns: None """ LOG.warning("Reverting update loadbalancer in DB " "for loadbalancer id %s", loadbalancer[constants.LOADBALANCER_ID]) self.task_utils.mark_loadbalancer_prov_status_error( loadbalancer[constants.LOADBALANCER_ID]) class UpdateHealthMonInDB(BaseDatabaseTask): """Update the health monitor in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon, update_dict): """Update the health monitor in the DB :param health_mon: The health monitor to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for health monitor id: %s ", health_mon[constants.HEALTHMONITOR_ID]) self.health_mon_repo.update(db_apis.get_session(), health_mon[constants.HEALTHMONITOR_ID], **update_dict) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor ERROR since the update couldn't happen :param health_mon: The health monitor that couldn't be updated :returns: None """ LOG.warning("Reverting update health monitor in DB " "for health monitor id %s", health_mon[constants.HEALTHMONITOR_ID]) try: self.health_mon_repo.update( db_apis.get_session(), health_mon[constants.HEALTHMONITOR_ID], provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update health monitor %(hm)s " "provisioning_status to ERROR due to: %(except)s", {'hm': health_mon[constants.HEALTHMONITOR_ID], 'except': str(e)}) class UpdateListenerInDB(BaseDatabaseTask): """Update the listener in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, listener, update_dict): """Update the listener in the DB :param listener: The listener to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for listener id: %s ", listener[constants.LISTENER_ID]) self.listener_repo.update(db_apis.get_session(), listener[constants.LISTENER_ID], **update_dict) def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the update couldn't happen :param listener: The listener that couldn't be updated :returns: None """ LOG.warning("Reverting update listener in DB " "for listener id %s", listener[constants.LISTENER_ID]) self.task_utils.mark_listener_prov_status_error( listener[constants.LISTENER_ID]) class UpdateMemberInDB(BaseDatabaseTask): """Update the member in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member, update_dict): """Update the member in the DB :param member: The member to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for member id: %s ", member[constants.MEMBER_ID]) self.member_repo.update(db_apis.get_session(), member[constants.MEMBER_ID], **update_dict) def revert(self, member, *args, **kwargs): """Mark the member ERROR since the update couldn't happen :param member: The member that couldn't be updated :returns: None """ LOG.warning("Reverting update member in DB " "for member id %s", member[constants.MEMBER_ID]) try: self.member_repo.update(db_apis.get_session(), member[constants.MEMBER_ID], provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update member %(member)s provisioning_status " "to ERROR due to: %(except)s", {'member': member[constants.MEMBER_ID], 'except': str(e)}) class UpdatePoolInDB(BaseDatabaseTask): """Update the pool in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id, update_dict): """Update the pool in the DB :param pool_id: The pool_id to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for pool id: %s ", pool_id) self.repos.update_pool_and_sp(db_apis.get_session(), pool_id, update_dict) def revert(self, pool_id, *args, **kwargs): """Mark the pool ERROR since the update couldn't happen :param pool_id: The pool_id that couldn't be updated :returns: None """ LOG.warning("Reverting update pool in DB for pool id %s", pool_id) try: self.repos.update_pool_and_sp( db_apis.get_session(), pool_id, dict(provisioning_status=constants.ERROR)) except Exception as e: LOG.error("Failed to update pool %(pool)s provisioning_status to " "ERROR due to: %(except)s", {'pool': pool_id, 'except': str(e)}) class UpdateL7PolicyInDB(BaseDatabaseTask): """Update the L7 policy in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy, update_dict): """Update the L7 policy in the DB :param l7policy: The L7 policy to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for l7policy id: %s", l7policy[constants.L7POLICY_ID]) self.l7policy_repo.update(db_apis.get_session(), l7policy[constants.L7POLICY_ID], **update_dict) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy ERROR since the update couldn't happen :param l7policy: L7 policy that couldn't be updated :returns: None """ LOG.warning("Reverting update l7policy in DB " "for l7policy id %s", l7policy[constants.L7POLICY_ID]) try: self.l7policy_repo.update(db_apis.get_session(), l7policy[constants.L7POLICY_ID], provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update l7policy %(l7p)s provisioning_status " "to ERROR due to: %(except)s", {'l7p': l7policy[constants.L7POLICY_ID], 'except': str(e)}) class UpdateL7RuleInDB(BaseDatabaseTask): """Update the L7 rule in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule, update_dict): """Update the L7 rule in the DB :param l7rule: The L7 rule to be updated :param update_dict: The dictionary of updates to apply :returns: None """ LOG.debug("Update DB for l7rule id: %s", l7rule[constants.L7RULE_ID]) self.l7rule_repo.update(db_apis.get_session(), l7rule[constants.L7RULE_ID], **update_dict) def revert(self, l7rule, *args, **kwargs): """Mark the L7 rule ERROR since the update couldn't happen :param l7rule: L7 rule that couldn't be updated :returns: None """ LOG.warning("Reverting update l7rule in DB " "for l7rule id %s", l7rule[constants.L7RULE_ID]) try: self.l7policy_repo.update(db_apis.get_session(), l7rule[constants.L7POLICY_ID], provisioning_status=constants.ERROR) except Exception as e: LOG.error("Failed to update L7rule %(l7r)s provisioning_status to " "ERROR due to: %(except)s", {'l7r': l7rule[constants.L7POLICY_ID], 'except': str(e)}) class GetAmphoraDetails(BaseDatabaseTask): """Task to retrieve amphora network details.""" def execute(self, amphora): """Retrieve amphora network details. :param amphora: Amphora which network details are required :returns: Amphora data dict """ db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora.get(constants.ID)) amphora.update({ constants.VRRP_IP: db_amp.vrrp_ip, constants.HA_IP: db_amp.ha_ip, constants.HA_PORT_ID: db_amp.ha_port_id, constants.ROLE: db_amp.role, constants.VRRP_ID: db_amp.vrrp_id, constants.VRRP_PRIORITY: db_amp.vrrp_priority }) return amphora class GetAmphoraeFromLoadbalancer(BaseDatabaseTask): """Task to pull the amphorae from a loadbalancer.""" def execute(self, loadbalancer_id): """Pull the amphorae from a loadbalancer. :param loadbalancer_id: Load balancer ID to get amphorae from :returns: A list of Listener objects """ amphorae = [] db_lb = self.repos.load_balancer.get(db_apis.get_session(), id=loadbalancer_id) for amp in db_lb.amphorae: a = self.amphora_repo.get(db_apis.get_session(), id=amp.id, show_deleted=False) if a is None: continue amphorae.append(a.to_dict()) return amphorae class GetListenersFromLoadbalancer(BaseDatabaseTask): """Task to pull the listeners from a loadbalancer.""" def execute(self, loadbalancer): """Pull the listeners from a loadbalancer. :param loadbalancer: Load balancer which listeners are required :returns: A list of Listener objects """ listeners = [] db_lb = self.repos.load_balancer.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for listener in db_lb.listeners: db_l = self.listener_repo.get(db_apis.get_session(), id=listener.id) prov_listener = provider_utils.db_listener_to_provider_listener( db_l) listeners.append(prov_listener.to_dict()) return listeners class GetVipFromLoadbalancer(BaseDatabaseTask): """Task to pull the vip from a loadbalancer.""" def execute(self, loadbalancer): """Pull the vip from a loadbalancer. :param loadbalancer: Load balancer which VIP is required :returns: VIP associated with a given load balancer """ db_lb = self.repos.load_balancer.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) return db_lb.vip.to_dict(recurse=True) class GetLoadBalancer(BaseDatabaseTask): """Get an load balancer object from the database.""" def execute(self, loadbalancer_id, *args, **kwargs): """Get an load balancer object from the database. :param loadbalancer_id: The load balancer ID to lookup :returns: The load balancer object """ LOG.debug("Get load balancer from DB for load balancer id: %s", loadbalancer_id) db_lb = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( db_lb) return provider_lb.to_dict() class CreateVRRPGroupForLB(BaseDatabaseTask): """Create a VRRP group for a load balancer.""" def execute(self, loadbalancer_id): """Create a VRRP group for a load balancer. :param loadbalancer_id: Load balancer ID for which a VRRP group should be created """ try: self.repos.vrrpgroup.create( db_apis.get_session(), load_balancer_id=loadbalancer_id, vrrp_group_name=str(loadbalancer_id).replace('-', ''), vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], advert_int=CONF.keepalived_vrrp.vrrp_advert_int) except odb_exceptions.DBDuplicateEntry: LOG.debug('VRRP_GROUP entry already exists for load balancer, ' 'skipping create.') class DisableAmphoraHealthMonitoring(BaseDatabaseTask): """Disable amphora health monitoring. This disables amphora health monitoring by removing it from the amphora_health table. """ def execute(self, amphora): """Disable health monitoring for an amphora :param amphora: The amphora to disable health monitoring for :returns: None """ self._delete_from_amp_health(amphora[constants.ID]) class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask): """Disable health monitoring on the LB amphorae. This disables amphora health monitoring by removing it from the amphora_health table for each amphora on a load balancer. """ def execute(self, loadbalancer): """Disable health monitoring for amphora on a load balancer :param loadbalancer: The load balancer to disable health monitoring on :returns: None """ db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for amphora in db_lb.amphorae: self._delete_from_amp_health(amphora.id) class MarkAmphoraHealthBusy(BaseDatabaseTask): """Mark amphora health monitoring busy. This prevents amphora failover by marking the amphora busy in the amphora_health table. """ def execute(self, amphora): """Mark amphora health monitoring busy :param amphora: The amphora to mark amphora health busy :returns: None """ self._mark_amp_health_busy(amphora[constants.ID]) class MarkLBAmphoraeHealthBusy(BaseDatabaseTask): """Mark amphorae health monitoring busy for the LB. This prevents amphorae failover by marking each amphora of a given load balancer busy in the amphora_health table. """ def execute(self, loadbalancer): """Marks amphorae health busy for each amphora on a load balancer :param loadbalancer: The load balancer to mark amphorae health busy :returns: None """ db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for amphora in db_lb.amphorae: self._mark_amp_health_busy(amphora.id) class MarkHealthMonitorActiveInDB(BaseDatabaseTask): """Mark the health monitor ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor ACTIVE in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for health monitor id: %s", health_mon[constants.HEALTHMONITOR_ID]) db_health_mon = self.health_mon_repo.get( db_apis.get_session(), id=health_mon[constants.HEALTHMONITOR_ID]) op_status = (constants.ONLINE if db_health_mon.enabled else constants.OFFLINE) self.health_mon_repo.update(db_apis.get_session(), health_mon[constants.HEALTHMONITOR_ID], provisioning_status=constants.ACTIVE, operating_status=op_status) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health montor ACTIVE in DB " "for health monitor id %s", health_mon[constants.HEALTHMONITOR_ID]) self.task_utils.mark_health_mon_prov_status_error( health_mon[constants.HEALTHMONITOR_ID]) class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask): """Mark the health monitor pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor as pending create in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for health monitor id: %s", health_mon[constants.HEALTHMONITOR_ID]) self.health_mon_repo.update(db_apis.get_session(), health_mon[constants.HEALTHMONITOR_ID], provisioning_status=(constants. PENDING_CREATE)) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health monitor pending create in DB " "for health monitor id %s", health_mon[constants.HEALTHMONITOR_ID]) self.task_utils.mark_health_mon_prov_status_error( health_mon[constants.HEALTHMONITOR_ID]) class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask): """Mark the health monitor pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor as pending delete in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for health monitor id: %s", health_mon[constants.HEALTHMONITOR_ID]) self.health_mon_repo.update(db_apis.get_session(), health_mon[constants.HEALTHMONITOR_ID], provisioning_status=(constants. PENDING_DELETE)) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health monitor pending delete in DB " "for health monitor id %s", health_mon[constants.HEALTHMONITOR_ID]) self.task_utils.mark_health_mon_prov_status_error( health_mon[constants.HEALTHMONITOR_ID]) class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask): """Mark the health monitor pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, health_mon): """Mark the health monitor as pending update in DB. :param health_mon: Health Monitor object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for health monitor id: %s", health_mon[constants.HEALTHMONITOR_ID]) self.health_mon_repo.update(db_apis.get_session(), health_mon[constants.HEALTHMONITOR_ID], provisioning_status=(constants. PENDING_UPDATE)) def revert(self, health_mon, *args, **kwargs): """Mark the health monitor as broken :param health_mon: Health Monitor object that failed to update :returns: None """ LOG.warning("Reverting mark health monitor pending update in DB " "for health monitor id %s", health_mon[constants.HEALTHMONITOR_ID]) self.task_utils.mark_health_mon_prov_status_error( health_mon[constants.HEALTHMONITOR_ID]) class MarkL7PolicyActiveInDB(BaseDatabaseTask): """Mark the l7policy ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy ACTIVE in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for l7policy id: %s", l7policy[constants.L7POLICY_ID]) db_l7policy = self.l7policy_repo.get( db_apis.get_session(), id=l7policy[constants.L7POLICY_ID]) op_status = (constants.ONLINE if db_l7policy.enabled else constants.OFFLINE) self.l7policy_repo.update(db_apis.get_session(), l7policy[constants.L7POLICY_ID], provisioning_status=constants.ACTIVE, operating_status=op_status) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy ACTIVE in DB " "for l7policy id %s", l7policy[constants.L7POLICY_ID]) self.task_utils.mark_l7policy_prov_status_error( l7policy[constants.L7POLICY_ID]) class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask): """Mark the l7policy pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy as pending create in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for l7policy id: %s", l7policy[constants.L7POLICY_ID]) self.l7policy_repo.update(db_apis.get_session(), l7policy[constants.L7POLICY_ID], provisioning_status=constants.PENDING_CREATE) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy pending create in DB " "for l7policy id %s", l7policy[constants.L7POLICY_ID]) self.task_utils.mark_l7policy_prov_status_error( l7policy[constants.L7POLICY_ID]) class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask): """Mark the l7policy pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy as pending delete in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for l7policy id: %s", l7policy[constants.L7POLICY_ID]) self.l7policy_repo.update(db_apis.get_session(), l7policy[constants.L7POLICY_ID], provisioning_status=constants.PENDING_DELETE) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy pending delete in DB " "for l7policy id %s", l7policy[constants.L7POLICY_ID]) self.task_utils.mark_l7policy_prov_status_error( l7policy[constants.L7POLICY_ID]) class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask): """Mark the l7policy pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7policy): """Mark the l7policy as pending update in DB. :param l7policy: L7Policy object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for l7policy id: %s", l7policy[constants.L7POLICY_ID]) self.l7policy_repo.update(db_apis.get_session(), l7policy[constants.L7POLICY_ID], provisioning_status=(constants. PENDING_UPDATE)) def revert(self, l7policy, *args, **kwargs): """Mark the l7policy as broken :param l7policy: L7Policy object that failed to update :returns: None """ LOG.warning("Reverting mark l7policy pending update in DB " "for l7policy id %s", l7policy[constants.L7POLICY_ID]) self.task_utils.mark_l7policy_prov_status_error( l7policy[constants.L7POLICY_ID]) class MarkL7RuleActiveInDB(BaseDatabaseTask): """Mark the l7rule ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule ACTIVE in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for l7rule id: %s", l7rule[constants.L7RULE_ID]) db_rule = self.l7rule_repo.get(db_apis.get_session(), id=l7rule[constants.L7RULE_ID]) op_status = (constants.ONLINE if db_rule.enabled else constants.OFFLINE) self.l7rule_repo.update(db_apis.get_session(), l7rule[constants.L7RULE_ID], provisioning_status=constants.ACTIVE, operating_status=op_status) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule ACTIVE in DB " "for l7rule id %s", l7rule[constants.L7RULE_ID]) self.task_utils.mark_l7rule_prov_status_error( l7rule[constants.L7RULE_ID]) class MarkL7RulePendingCreateInDB(BaseDatabaseTask): """Mark the l7rule pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule as pending create in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for l7rule id: %s", l7rule[constants.L7RULE_ID]) self.l7rule_repo.update(db_apis.get_session(), l7rule[constants.L7RULE_ID], provisioning_status=constants.PENDING_CREATE) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule pending create in DB " "for l7rule id %s", l7rule[constants.L7RULE_ID]) self.task_utils.mark_l7rule_prov_status_error( l7rule[constants.L7RULE_ID]) class MarkL7RulePendingDeleteInDB(BaseDatabaseTask): """Mark the l7rule pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule as pending delete in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for l7rule id: %s", l7rule[constants.L7RULE_ID]) self.l7rule_repo.update(db_apis.get_session(), l7rule[constants.L7RULE_ID], provisioning_status=constants.PENDING_DELETE) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule pending delete in DB " "for l7rule id %s", l7rule[constants.L7RULE_ID]) self.task_utils.mark_l7rule_prov_status_error( l7rule[constants.L7RULE_ID]) class MarkL7RulePendingUpdateInDB(BaseDatabaseTask): """Mark the l7rule pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, l7rule): """Mark the l7rule as pending update in DB. :param l7rule: L7Rule object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for l7rule id: %s", l7rule[constants.L7RULE_ID]) self.l7rule_repo.update(db_apis.get_session(), l7rule[constants.L7RULE_ID], provisioning_status=constants.PENDING_UPDATE) def revert(self, l7rule, *args, **kwargs): """Mark the l7rule as broken :param l7rule: L7Rule object that failed to update :returns: None """ LOG.warning("Reverting mark l7rule pending update in DB " "for l7rule id %s", l7rule[constants.L7RULE_ID]) self.task_utils.mark_l7rule_prov_status_error( l7rule[constants.L7RULE_ID]) class MarkMemberActiveInDB(BaseDatabaseTask): """Mark the member ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member ACTIVE in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for member id: %s", member[constants.MEMBER_ID]) self.member_repo.update(db_apis.get_session(), member[constants.MEMBER_ID], provisioning_status=constants.ACTIVE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member ACTIVE in DB " "for member id %s", member[constants.MEMBER_ID]) self.task_utils.mark_member_prov_status_error( member[constants.MEMBER_ID]) class MarkMemberPendingCreateInDB(BaseDatabaseTask): """Mark the member pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member as pending create in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for member id: %s", member[constants.MEMBER_ID]) self.member_repo.update(db_apis.get_session(), member[constants.MEMBER_ID], provisioning_status=constants.PENDING_CREATE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member pending create in DB " "for member id %s", member[constants.MEMBER_ID]) self.task_utils.mark_member_prov_status_error( member[constants.MEMBER_ID]) class MarkMemberPendingDeleteInDB(BaseDatabaseTask): """Mark the member pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member as pending delete in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for member id: %s", member[constants.MEMBER_ID]) self.member_repo.update(db_apis.get_session(), member[constants.MEMBER_ID], provisioning_status=constants.PENDING_DELETE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member pending delete in DB " "for member id %s", member[constants.MEMBER_ID]) self.task_utils.mark_member_prov_status_error( member[constants.MEMBER_ID]) class MarkMemberPendingUpdateInDB(BaseDatabaseTask): """Mark the member pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, member): """Mark the member as pending update in DB. :param member: Member object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for member id: %s", member[constants.MEMBER_ID]) self.member_repo.update(db_apis.get_session(), member[constants.MEMBER_ID], provisioning_status=constants.PENDING_UPDATE) def revert(self, member, *args, **kwargs): """Mark the member as broken :param member: Member object that failed to update :returns: None """ LOG.warning("Reverting mark member pending update in DB " "for member id %s", member[constants.MEMBER_ID]) self.task_utils.mark_member_prov_status_error( member[constants.MEMBER_ID]) class MarkPoolActiveInDB(BaseDatabaseTask): """Mark the pool ACTIVE in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id): """Mark the pool ACTIVE in DB. :param pool_id: pool_id to be updated :returns: None """ LOG.debug("Mark ACTIVE in DB for pool id: %s", pool_id) self.pool_repo.update(db_apis.get_session(), pool_id, provisioning_status=constants.ACTIVE) def revert(self, pool_id, *args, **kwargs): """Mark the pool as broken :param pool_id: pool_id that failed to update :returns: None """ LOG.warning("Reverting mark pool ACTIVE in DB for pool id %s", pool_id) self.task_utils.mark_pool_prov_status_error(pool_id) class MarkPoolPendingCreateInDB(BaseDatabaseTask): """Mark the pool pending create in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id): """Mark the pool as pending create in DB. :param pool_id: pool_id of pool object to be updated :returns: None """ LOG.debug("Mark PENDING CREATE in DB for pool id: %s", pool_id) self.pool_repo.update(db_apis.get_session(), pool_id, provisioning_status=constants.PENDING_CREATE) def revert(self, pool_id, *args, **kwargs): """Mark the pool as broken :param pool_id: pool_id of pool object that failed to update :returns: None """ LOG.warning("Reverting mark pool pending create in DB " "for pool id %s", pool_id) self.task_utils.mark_pool_prov_status_error(pool_id) class MarkPoolPendingDeleteInDB(BaseDatabaseTask): """Mark the pool pending delete in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id): """Mark the pool as pending delete in DB. :param pool_id: pool_id of pool object to be updated :returns: None """ LOG.debug("Mark PENDING DELETE in DB for pool id: %s", pool_id) self.pool_repo.update(db_apis.get_session(), pool_id, provisioning_status=constants.PENDING_DELETE) def revert(self, pool_id, *args, **kwargs): """Mark the pool as broken :param pool_id: pool_id of pool object that failed to update :returns: None """ LOG.warning("Reverting mark pool pending delete in DB " "for pool id %s", pool_id) self.task_utils.mark_pool_prov_status_error(pool_id) class MarkPoolPendingUpdateInDB(BaseDatabaseTask): """Mark the pool pending update in the DB. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id): """Mark the pool as pending update in DB. :param pool_id: pool_id of pool object to be updated :returns: None """ LOG.debug("Mark PENDING UPDATE in DB for pool id: %s", pool_id) self.pool_repo.update(db_apis.get_session(), pool_id, provisioning_status=constants.PENDING_UPDATE) def revert(self, pool_id, *args, **kwargs): """Mark the pool as broken :param pool_id: pool_id of pool object that failed to update :returns: None """ LOG.warning("Reverting mark pool pending update in DB " "for pool id %s", pool_id) self.task_utils.mark_pool_prov_status_error(pool_id) class DecrementHealthMonitorQuota(BaseDatabaseTask): """Decrements the health monitor quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, project_id): """Decrements the health monitor quota. :param project_id: The project_id to decrement the quota on. :returns: None """ LOG.debug("Decrementing health monitor quota for " "project: %s ", project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.HealthMonitor, project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement health monitor quota for ' 'project: %(proj)s the project may have excess ' 'quota in use.', {'proj': project_id}) lock_session.rollback() def revert(self, project_id, result, *args, **kwargs): """Re-apply the quota :param project_id: The project_id to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for health monitor on project' ' %(proj)s Project quota counts may be incorrect.', {'proj': project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.HealthMonitor, project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementListenerQuota(BaseDatabaseTask): """Decrements the listener quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, project_id): """Decrements the listener quota. :param project_id: The project_id to decrement the quota on. :returns: None """ LOG.debug("Decrementing listener quota for " "project: %s ", project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.Listener, project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement listener quota for project: ' '%(proj)s the project may have excess quota in use.', {'proj': project_id}) lock_session.rollback() def revert(self, project_id, result, *args, **kwargs): """Re-apply the quota :param project_id: The project_id to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for listener on project ' '%(proj)s Project quota counts may be incorrect.', {'proj': project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Listener, project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementLoadBalancerQuota(BaseDatabaseTask): """Decrements the load balancer quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, project_id): """Decrements the load balancer quota. :param project_id: Project id where quota should be reduced :returns: None """ LOG.debug("Decrementing load balancer quota for " "project: %s ", project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.LoadBalancer, project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement load balancer quota for ' 'project: %(proj)s the project may have excess ' 'quota in use.', {'proj': project_id}) lock_session.rollback() def revert(self, project_id, result, *args, **kwargs): """Re-apply the quota :param project_id: The project id to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for load balancer on project ' '%(proj)s Project quota counts may be incorrect.', {'proj': project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.LoadBalancer, project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementMemberQuota(BaseDatabaseTask): """Decrements the member quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, project_id): """Decrements the member quota. :param member: The member to decrement the quota on. :returns: None """ LOG.debug("Decrementing member quota for " "project: %s ", project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.Member, project_id) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement member quota for project: ' '%(proj)s the project may have excess quota in use.', {'proj': project_id}) lock_session.rollback() def revert(self, project_id, result, *args, **kwargs): """Re-apply the quota :param member: The member to decrement the quota on. :returns: None """ LOG.warning('Reverting decrement quota for member on project %(proj)s ' 'Project quota counts may be incorrect.', {'proj': project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Member, project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class DecrementPoolQuota(BaseDatabaseTask): """Decrements the pool quota for a project. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, project_id, pool_child_count): """Decrements the pool quota. :param project_id: project_id where the pool to decrement the quota on :returns: None """ LOG.debug("Decrementing pool quota for " "project: %s ", project_id) lock_session = db_apis.get_session(autocommit=False) try: self.repos.decrement_quota(lock_session, data_models.Pool, project_id) # Pools cascade delete members and health monitors # update the quota for those items as well. if pool_child_count['HM'] > 0: self.repos.decrement_quota(lock_session, data_models.HealthMonitor, project_id) if pool_child_count['member'] > 0: self.repos.decrement_quota( lock_session, data_models.Member, project_id, quantity=pool_child_count['member']) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to decrement pool quota for project: ' '%(proj)s the project may have excess quota in use.', {'proj': project_id}) lock_session.rollback() def revert(self, project_id, pool_child_count, result, *args, **kwargs): """Re-apply the quota :param project_id: The id of project to decrement the quota on :returns: None """ LOG.warning('Reverting decrement quota for pool on project %(proj)s ' 'Project quota counts may be incorrect.', {'proj': project_id}) # Increment the quota back if this task wasn't the failure if not isinstance(result, failure.Failure): # These are all independent to maximize the correction # in case other quota actions have occurred try: session = db_apis.get_session() lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Pool, project_id) lock_session.commit() except Exception: lock_session.rollback() # Attempt to increment back the health monitor quota if pool_child_count['HM'] > 0: lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.HealthMonitor, project_id) lock_session.commit() except Exception: lock_session.rollback() # Attempt to increment back the member quota # This is separate calls to maximize the correction # should other factors have increased the in use quota # before this point in the revert flow for i in range(pool_child_count['member']): lock_session = db_apis.get_session(autocommit=False) try: self.repos.check_quota_met(session, lock_session, data_models.Member, project_id) lock_session.commit() except Exception: lock_session.rollback() except Exception: # Don't fail the revert flow pass class CountPoolChildrenForQuota(BaseDatabaseTask): """Counts the pool child resources for quota management. Since the children of pools are cleaned up by the sqlalchemy cascade delete settings, we need to collect the quota counts for the child objects early. """ def execute(self, pool_id): """Count the pool child resources for quota management :param pool_id: pool_id of pool object to count children on :returns: None """ session = db_apis.get_session() hm_count, member_count = ( self.pool_repo.get_children_count(session, pool_id)) return {'HM': hm_count, 'member': member_count} class UpdatePoolMembersOperatingStatusInDB(BaseDatabaseTask): """Updates the members of a pool operating status. Since sqlalchemy will likely retry by itself always revert if it fails """ def execute(self, pool_id, operating_status): """Update the members of a pool operating status in DB. :param pool_id: pool_id of pool object to be updated :param operating_status: Operating status to set :returns: None """ LOG.debug("Updating member operating status to %(status)s in DB for " "pool id: %(pool)s", {'status': operating_status, 'pool': pool_id}) self.member_repo.update_pool_members(db_apis.get_session(), pool_id, operating_status=operating_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/lifecycle_tasks.py0000664000175000017500000001572600000000000025521 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from taskflow import task from octavia.common import constants from octavia.controller.worker import task_utils as task_utilities class BaseLifecycleTask(task.Task): """Base task to instansiate common classes.""" def __init__(self, **kwargs): self.task_utils = task_utilities.TaskUtils() super(BaseLifecycleTask, self).__init__(**kwargs) class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask): """Task to checkpoint Amphora lifecycle milestones.""" def execute(self, amphora_id): pass def revert(self, amphora_id, *args, **kwargs): self.task_utils.mark_amphora_status_error(amphora_id) class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask): """Task to checkpoint Amphora lifecycle milestones.""" def execute(self, amphora): pass def revert(self, amphora, *args, **kwargs): super(AmphoraToErrorOnRevertTask, self).revert( amphora.get(constants.ID)) class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask): """Task to set a member to ERROR on revert.""" def execute(self, health_mon, listeners, loadbalancer): pass def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs): self.task_utils.mark_health_mon_prov_status_error( health_mon[constants.POOL_ID]) self.task_utils.mark_pool_prov_status_active( health_mon[constants.POOL_ID]) self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) for listener in listeners: self.task_utils.mark_listener_prov_status_active( listener[constants.LISTENER_ID]) class L7PolicyToErrorOnRevertTask(BaseLifecycleTask): """Task to set a l7policy to ERROR on revert.""" def execute(self, l7policy, listeners, loadbalancer_id): pass def revert(self, l7policy, listeners, loadbalancer_id, *args, **kwargs): self.task_utils.mark_l7policy_prov_status_error( l7policy[constants.L7POLICY_ID]) self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer_id) for listener in listeners: self.task_utils.mark_listener_prov_status_active( listener[constants.LISTENER_ID]) class L7RuleToErrorOnRevertTask(BaseLifecycleTask): """Task to set a l7rule to ERROR on revert.""" def execute(self, l7rule, l7policy_id, listeners, loadbalancer_id): pass def revert(self, l7rule, l7policy_id, listeners, loadbalancer_id, *args, **kwargs): self.task_utils.mark_l7rule_prov_status_error( l7rule[constants.L7RULE_ID]) self.task_utils.mark_l7policy_prov_status_active(l7policy_id) self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer_id) for listener in listeners: self.task_utils.mark_listener_prov_status_active( listener[constants.LISTENER_ID]) class ListenerToErrorOnRevertTask(BaseLifecycleTask): """Task to set a listener to ERROR on revert.""" def execute(self, listener): pass def revert(self, listener, *args, **kwargs): self.task_utils.mark_listener_prov_status_error( listener[constants.LISTENER_ID]) self.task_utils.mark_loadbalancer_prov_status_active( listener[constants.LOADBALANCER_ID]) class ListenersToErrorOnRevertTask(BaseLifecycleTask): """Task to set a listener to ERROR on revert.""" def execute(self, listeners): pass def revert(self, listeners, *args, **kwargs): for listener in listeners: self.task_utils.mark_listener_prov_status_error( listener[constants.LISTENER_ID]) self.task_utils.mark_loadbalancer_prov_status_active( listeners[0][constants.LOADBALANCER_ID]) class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask): """Task to set the load balancer to ERROR on revert.""" def execute(self, loadbalancer_id): pass def revert(self, loadbalancer_id, *args, **kwargs): self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask): """Task to set the load balancer to ERROR on revert.""" def execute(self, loadbalancer): pass def revert(self, loadbalancer, *args, **kwargs): super(LoadBalancerToErrorOnRevertTask, self).revert( loadbalancer[constants.LOADBALANCER_ID]) class MemberToErrorOnRevertTask(BaseLifecycleTask): """Task to set a member to ERROR on revert.""" def execute(self, member, listeners, loadbalancer, pool_id): pass def revert(self, member, listeners, loadbalancer, pool_id, *args, **kwargs): self.task_utils.mark_member_prov_status_error( member[constants.MEMBER_ID]) for listener in listeners: self.task_utils.mark_listener_prov_status_active( listener[constants.LISTENER_ID]) self.task_utils.mark_pool_prov_status_active(pool_id) self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) class MembersToErrorOnRevertTask(BaseLifecycleTask): """Task to set members to ERROR on revert.""" def execute(self, members, listeners, loadbalancer, pool_id): pass def revert(self, members, listeners, loadbalancer, pool_id, *args, **kwargs): for m in members: self.task_utils.mark_member_prov_status_error( m[constants.MEMBER_ID]) for listener in listeners: self.task_utils.mark_listener_prov_status_active( listener[constants.LISTENER_ID]) self.task_utils.mark_pool_prov_status_active(pool_id) self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) class PoolToErrorOnRevertTask(BaseLifecycleTask): """Task to set a pool to ERROR on revert.""" def execute(self, pool_id, listeners, loadbalancer): pass def revert(self, pool_id, listeners, loadbalancer, *args, **kwargs): self.task_utils.mark_pool_prov_status_error(pool_id) self.task_utils.mark_loadbalancer_prov_status_active( loadbalancer[constants.LOADBALANCER_ID]) for listener in listeners: self.task_utils.mark_listener_prov_status_active( listener[constants.LISTENER_ID]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/network_tasks.py0000664000175000017500000010704700000000000025251 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from taskflow import task from taskflow.types import failure import tenacity from octavia.common import constants from octavia.common import data_models from octavia.common import utils from octavia.controller.worker import task_utils from octavia.db import api as db_apis from octavia.db import repositories as repo from octavia.network import base from octavia.network import data_models as n_data_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class BaseNetworkTask(task.Task): """Base task to load drivers common to the tasks.""" def __init__(self, **kwargs): super(BaseNetworkTask, self).__init__(**kwargs) self._network_driver = None self.task_utils = task_utils.TaskUtils() self.loadbalancer_repo = repo.LoadBalancerRepository() self.amphora_repo = repo.AmphoraRepository() @property def network_driver(self): if self._network_driver is None: self._network_driver = utils.get_network_driver() return self._network_driver class CalculateAmphoraDelta(BaseNetworkTask): default_provides = constants.DELTA def execute(self, loadbalancer, amphora, availability_zone, vrrp_port=None): LOG.debug("Calculating network delta for amphora id: %s", amphora.get(constants.ID)) if vrrp_port is None: vrrp_port = self.network_driver.get_port( amphora[constants.VRRP_PORT_ID]) vrrp_port_network_id = vrrp_port.network_id else: vrrp_port_network_id = vrrp_port[constants.NETWORK_ID] # Figure out what networks we want # seed with lb network(s) if (availability_zone and availability_zone.get(constants.MANAGEMENT_NETWORK)): management_nets = [ availability_zone.get(constants.MANAGEMENT_NETWORK)] else: management_nets = CONF.controller_worker.amp_boot_network_list desired_network_ids = {vrrp_port_network_id}.union(management_nets) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for pool in db_lb.pools: member_networks = [ self.network_driver.get_subnet(member.subnet_id).network_id for member in pool.members if member.subnet_id ] desired_network_ids.update(member_networks) nics = self.network_driver.get_plugged_networks( amphora[constants.COMPUTE_ID]) # assume we don't have two nics in the same network actual_network_nics = dict((nic.network_id, nic) for nic in nics) del_ids = set(actual_network_nics) - desired_network_ids delete_nics = list( n_data_models.Interface(network_id=net_id) for net_id in del_ids) add_ids = desired_network_ids - set(actual_network_nics) add_nics = list(n_data_models.Interface( network_id=net_id) for net_id in add_ids) delta = n_data_models.Delta( amphora_id=amphora[constants.ID], compute_id=amphora[constants.COMPUTE_ID], add_nics=add_nics, delete_nics=delete_nics) return delta.to_dict(recurse=True) class CalculateDelta(BaseNetworkTask): """Task to calculate the delta between the nics on the amphora and the ones we need. Returns a list for plumbing them. """ default_provides = constants.DELTAS def execute(self, loadbalancer, availability_zone): """Compute which NICs need to be plugged for the amphora to become operational. :param loadbalancer: the loadbalancer to calculate deltas for all amphorae :param availability_zone: availability zone metadata dict :returns: dict of octavia.network.data_models.Delta keyed off amphora id """ calculate_amp = CalculateAmphoraDelta() deltas = {} db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) for amphora in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, db_lb.amphorae): delta = calculate_amp.execute(loadbalancer, amphora.to_dict(), availability_zone) deltas[amphora.id] = delta return deltas class GetPlumbedNetworks(BaseNetworkTask): """Task to figure out the NICS on an amphora. This will likely move into the amphora driver :returns: Array of networks """ default_provides = constants.NICS def execute(self, amphora): """Get plumbed networks for the amphora.""" LOG.debug("Getting plumbed networks for amphora id: %s", amphora[constants.ID]) return self.network_driver.get_plugged_networks( amphora[constants.COMPUTE_ID]) class PlugNetworks(BaseNetworkTask): """Task to plug the networks. This uses the delta to add all missing networks/nics """ def execute(self, amphora, delta): """Update the amphora networks for the delta.""" LOG.debug("Plug or unplug networks for amphora id: %s", amphora[constants.ID]) if not delta: LOG.debug("No network deltas for amphora id: %s", amphora[constants.ID]) return # add nics for nic in delta[constants.ADD_NICS]: self.network_driver.plug_network(amphora[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) def revert(self, amphora, delta, *args, **kwargs): """Handle a failed network plug by removing all nics added.""" LOG.warning("Unable to plug networks for amp id %s", amphora[constants.ID]) if not delta: return for nic in delta[constants.ADD_NICS]: try: self.network_driver.unplug_network( amphora[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) except base.NetworkNotFound: pass class UnPlugNetworks(BaseNetworkTask): """Task to unplug the networks Loop over all nics and unplug them based on delta """ def execute(self, amphora, delta): """Unplug the networks.""" LOG.debug("Unplug network for amphora") if not delta: LOG.debug("No network deltas for amphora id: %s", amphora[constants.ID]) return for nic in delta[constants.DELETE_NICS]: try: self.network_driver.unplug_network( amphora[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) except base.NetworkNotFound: LOG.debug("Network %d not found", nic[constants.NETWORK_ID]) except Exception: LOG.exception("Unable to unplug network") # TODO(xgerman) follow up if that makes sense class GetMemberPorts(BaseNetworkTask): def execute(self, loadbalancer, amphora): vip_port = self.network_driver.get_port(loadbalancer['vip_port_id']) member_ports = [] interfaces = self.network_driver.get_plugged_networks( amphora[constants.COMPUTE_ID]) for interface in interfaces: port = self.network_driver.get_port(interface.port_id) if vip_port.network_id == port.network_id: continue port.network = self.network_driver.get_network(port.network_id) for fixed_ip in port.fixed_ips: if amphora['lb_network_ip'] == fixed_ip.ip_address: break fixed_ip.subnet = self.network_driver.get_subnet( fixed_ip.subnet_id) # Only add the port to the list if the IP wasn't the mgmt IP else: member_ports.append(port) return member_ports class HandleNetworkDelta(BaseNetworkTask): """Task to plug and unplug networks Plug or unplug networks based on delta """ def execute(self, amphora, delta): """Handle network plugging based off deltas.""" added_ports = {} added_ports[amphora[constants.ID]] = [] for nic in delta[constants.ADD_NICS]: interface = self.network_driver.plug_network( delta[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) port = self.network_driver.get_port(interface.port_id) port.network = self.network_driver.get_network(port.network_id) for fixed_ip in port.fixed_ips: fixed_ip.subnet = self.network_driver.get_subnet( fixed_ip.subnet_id) added_ports[amphora[constants.ID]].append(port.to_dict( recurse=True)) for nic in delta[constants.DELETE_NICS]: try: self.network_driver.unplug_network( delta[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) except base.NetworkNotFound: LOG.debug("Network %d not found ", nic[constants.NETWORK_ID]) except Exception: LOG.exception("Unable to unplug network") return added_ports def revert(self, result, amphora, delta, *args, **kwargs): """Handle a network plug or unplug failures.""" if isinstance(result, failure.Failure): return if not delta: return LOG.warning("Unable to plug networks for amp id %s", delta['amphora_id']) for nic in delta[constants.ADD_NICS]: try: self.network_driver.unplug_network(delta[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) except Exception: pass class HandleNetworkDeltas(BaseNetworkTask): """Task to plug and unplug networks Loop through the deltas and plug or unplug networks based on delta """ def execute(self, deltas): """Handle network plugging based off deltas.""" added_ports = {} for amp_id, delta in deltas.items(): added_ports[amp_id] = [] for nic in delta[constants.ADD_NICS]: interface = self.network_driver.plug_network( delta[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) port = self.network_driver.get_port(interface.port_id) port.network = self.network_driver.get_network(port.network_id) for fixed_ip in port.fixed_ips: fixed_ip.subnet = self.network_driver.get_subnet( fixed_ip.subnet_id) added_ports[amp_id].append(port.to_dict(recurse=True)) for nic in delta[constants.DELETE_NICS]: try: self.network_driver.unplug_network( delta[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) except base.NetworkNotFound: LOG.debug("Network %d not found ", nic[constants.NETWORK_ID]) except Exception: LOG.exception("Unable to unplug network") return added_ports def revert(self, result, deltas, *args, **kwargs): """Handle a network plug or unplug failures.""" if isinstance(result, failure.Failure): return for amp_id, delta in deltas.items(): LOG.warning("Unable to plug networks for amp id %s", delta[constants.AMPHORA_ID]) if not delta: return for nic in delta[constants.ADD_NICS]: try: self.network_driver.unplug_network( delta[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) except base.NetworkNotFound: pass class PlugVIP(BaseNetworkTask): """Task to plumb a VIP.""" def execute(self, loadbalancer): """Plumb a vip to an amphora.""" LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer[constants.LOADBALANCER_ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) amps_data = self.network_driver.plug_vip(db_lb, db_lb.vip) return [amp.to_dict() for amp in amps_data] def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to plumb a vip.""" if isinstance(result, failure.Failure): return LOG.warning("Unable to plug VIP for loadbalancer id %s", loadbalancer[constants.LOADBALANCER_ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) try: # Make sure we have the current port IDs for cleanup for amp_data in result: for amphora in filter( # pylint: disable=cell-var-from-loop lambda amp: amp.id == amp_data['id'], db_lb.amphorae): amphora.vrrp_port_id = amp_data['vrrp_port_id'] amphora.ha_port_id = amp_data['ha_port_id'] self.network_driver.unplug_vip(db_lb, db_lb.vip) except Exception as e: LOG.error("Failed to unplug VIP. Resources may still " "be in use from vip: %(vip)s due to error: %(except)s", {'vip': loadbalancer['vip_address'], 'except': str(e)}) class UpdateVIPSecurityGroup(BaseNetworkTask): """Task to setup SG for LB.""" def execute(self, loadbalancer_id): """Task to setup SG for LB.""" LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer_id) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer_id) return self.network_driver.update_vip_sg(db_lb, db_lb.vip) class GetSubnetFromVIP(BaseNetworkTask): """Task to plumb a VIP.""" def execute(self, loadbalancer): """Plumb a vip to an amphora.""" LOG.debug("Getting subnet for LB: %s", loadbalancer[constants.LOADBALANCER_ID]) return self.network_driver.get_subnet( loadbalancer['vip_subnet_id']).to_dict() class PlugVIPAmphora(BaseNetworkTask): """Task to plumb a VIP.""" def execute(self, loadbalancer, amphora, subnet): """Plumb a vip to an amphora.""" LOG.debug("Plumbing VIP for amphora id: %s", amphora.get(constants.ID)) db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora.get(constants.ID)) db_subnet = self.network_driver.get_subnet(subnet[constants.ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) amp_data = self.network_driver.plug_aap_port( db_lb, db_lb.vip, db_amp, db_subnet) return amp_data.to_dict() def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): """Handle a failure to plumb a vip.""" if isinstance(result, failure.Failure): return LOG.warning("Unable to plug VIP for amphora id %s " "load balancer id %s", amphora.get(constants.ID), loadbalancer[constants.LOADBALANCER_ID]) try: db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora.get(constants.ID)) db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID] db_amp.ha_port_id = result[constants.HA_PORT_ID] db_subnet = self.network_driver.get_subnet(subnet[constants.ID]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) self.network_driver.unplug_aap_port(db_lb.vip, db_amp, db_subnet) except Exception as e: LOG.error('Failed to unplug AAP port. Resources may still be in ' 'use for VIP: %s due to error: %s', db_lb.vip, str(e)) class UnplugVIP(BaseNetworkTask): """Task to unplug the vip.""" def execute(self, loadbalancer): """Unplug the vip.""" LOG.debug("Unplug vip on amphora") try: db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) self.network_driver.unplug_vip(db_lb, db_lb.vip) except Exception: LOG.exception("Unable to unplug vip from load balancer %s", loadbalancer[constants.LOADBALANCER_ID]) class AllocateVIP(BaseNetworkTask): """Task to allocate a VIP.""" def execute(self, loadbalancer): """Allocate a vip to the loadbalancer.""" LOG.debug("Allocate_vip port_id %s, subnet_id %s," "ip_address %s", loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS]) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) vip = self.network_driver.allocate_vip(db_lb) return vip.to_dict() def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to allocate vip.""" if isinstance(result, failure.Failure): LOG.exception("Unable to allocate VIP") return vip = data_models.Vip(**result) LOG.warning("Deallocating vip %s", vip.ip_address) try: self.network_driver.deallocate_vip(vip) except Exception as e: LOG.error("Failed to deallocate VIP. Resources may still " "be in use from vip: %(vip)s due to error: %(except)s", {'vip': vip.ip_address, 'except': str(e)}) class AllocateVIPforFailover(AllocateVIP): """Task to allocate/validate the VIP for a failover flow.""" def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to allocate vip.""" if isinstance(result, failure.Failure): LOG.exception("Unable to allocate VIP") return vip = data_models.Vip(**result) LOG.info("Failover revert is not deallocating vip %s because this is " "a failover.", vip.ip_address) class DeallocateVIP(BaseNetworkTask): """Task to deallocate a VIP.""" def execute(self, loadbalancer): """Deallocate a VIP.""" LOG.debug("Deallocating a VIP %s", loadbalancer[constants.VIP_ADDRESS]) # NOTE(blogan): this is kind of ugly but sufficient for now. Drivers # will need access to the load balancer that the vip is/was attached # to. However the data model serialization for the vip does not give a # backref to the loadbalancer if accessed through the loadbalancer. db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) vip = db_lb.vip vip.load_balancer = db_lb self.network_driver.deallocate_vip(vip) class UpdateVIP(BaseNetworkTask): """Task to update a VIP.""" def execute(self, listeners): loadbalancer = self.loadbalancer_repo.get( db_apis.get_session(), id=listeners[0][constants.LOADBALANCER_ID]) LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id) self.network_driver.update_vip(loadbalancer) class UpdateVIPForDelete(BaseNetworkTask): """Task to update a VIP for listener delete flows.""" def execute(self, loadbalancer_id): loadbalancer = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer_id) LOG.debug("Updating VIP for listener delete on load_balancer %s.", loadbalancer.id) self.network_driver.update_vip(loadbalancer, for_delete=True) class GetAmphoraNetworkConfigs(BaseNetworkTask): """Task to retrieve amphora network details.""" def execute(self, loadbalancer, amphora=None): LOG.debug("Retrieving vip network details.") db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora.get(constants.ID)) db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) db_configs = self.network_driver.get_network_configs( db_lb, amphora=db_amp) provider_dict = {} for amp_id, amp_conf in db_configs.items(): provider_dict[amp_id] = amp_conf.to_dict(recurse=True) return provider_dict class GetAmphoraNetworkConfigsByID(BaseNetworkTask): """Task to retrieve amphora network details.""" def execute(self, loadbalancer_id, amphora_id=None): LOG.debug("Retrieving vip network details.") loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), id=loadbalancer_id) amphora = self.amphora_repo.get(db_apis.get_session(), id=amphora_id) db_configs = self.network_driver.get_network_configs(loadbalancer, amphora=amphora) provider_dict = {} for amp_id, amp_conf in db_configs.items(): provider_dict[amp_id] = amp_conf.to_dict(recurse=True) return provider_dict class GetAmphoraeNetworkConfigs(BaseNetworkTask): """Task to retrieve amphorae network details.""" def execute(self, loadbalancer_id): LOG.debug("Retrieving vip network details.") db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer_id) db_configs = self.network_driver.get_network_configs(db_lb) provider_dict = {} for amp_id, amp_conf in db_configs.items(): provider_dict[amp_id] = amp_conf.to_dict(recurse=True) return provider_dict class FailoverPreparationForAmphora(BaseNetworkTask): """Task to prepare an amphora for failover.""" def execute(self, amphora): db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) LOG.debug("Prepare amphora %s for failover.", amphora[constants.ID]) self.network_driver.failover_preparation(db_amp) class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask): """Task retrieving all the port ids on an amphora, except lb network.""" def execute(self, amphora): LOG.debug("Retrieve all but the lb network port id on amphora %s.", amphora[constants.ID]) interfaces = self.network_driver.get_plugged_networks( compute_id=amphora[constants.COMPUTE_ID]) ports = [] for interface_ in interfaces: if interface_.port_id not in ports: port = self.network_driver.get_port(port_id=interface_.port_id) ips = port.fixed_ips lb_network = False for ip in ips: if ip.ip_address == amphora[constants.LB_NETWORK_IP]: lb_network = True if not lb_network: ports.append(port) return ports class PlugPorts(BaseNetworkTask): """Task to plug neutron ports into a compute instance.""" def execute(self, amphora, ports): db_amp = self.amphora_repo.get(db_apis.get_session(), id=amphora[constants.ID]) for port in ports: LOG.debug('Plugging port ID: %(port_id)s into compute instance: ' '%(compute_id)s.', {constants.PORT_ID: port.id, constants.COMPUTE_ID: amphora[constants.COMPUTE_ID]}) self.network_driver.plug_port(db_amp, port) class ApplyQos(BaseNetworkTask): """Apply Quality of Services to the VIP""" def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id, is_revert=False, request_qos_id=None): """Call network driver to apply QoS Policy on the vrrp ports.""" if not amps_data: db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) amps_data = db_lb.amphorae apply_qos = ApplyQosAmphora() for amp_data in amps_data: apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data.to_dict(), qos_policy_id) def execute(self, loadbalancer, amps_data=None, update_dict=None): """Apply qos policy on the vrrp ports which are related with vip.""" db_lb = self.loadbalancer_repo.get( db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) qos_policy_id = db_lb.vip.qos_policy_id if not qos_policy_id and ( not update_dict or ( 'vip' not in update_dict or 'qos_policy_id' not in update_dict[constants.VIP])): return if update_dict and update_dict.get(constants.VIP): vip_dict = update_dict[constants.VIP] if vip_dict.get(constants.QOS_POLICY_ID): qos_policy_id = vip_dict[constants.QOS_POLICY_ID] self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id) def revert(self, result, loadbalancer, amps_data=None, update_dict=None, *args, **kwargs): """Handle a failure to apply QoS to VIP""" request_qos_id = loadbalancer['vip_qos_policy_id'] orig_lb = self.task_utils.get_current_loadbalancer_from_db( loadbalancer[constants.LOADBALANCER_ID]) orig_qos_id = orig_lb.vip.qos_policy_id if request_qos_id != orig_qos_id: self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id, is_revert=True, request_qos_id=request_qos_id) class ApplyQosAmphora(BaseNetworkTask): """Apply Quality of Services to the VIP""" def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id, is_revert=False, request_qos_id=None): """Call network driver to apply QoS Policy on the vrrp ports.""" try: self.network_driver.apply_qos_on_port( qos_policy_id, amp_data[constants.VRRP_PORT_ID]) except Exception: if not is_revert: raise LOG.warning('Failed to undo qos policy %(qos_id)s ' 'on vrrp port: %(port)s from ' 'amphorae: %(amp)s', {'qos_id': request_qos_id, 'port': amp_data[constants.VRRP_PORT_ID], 'amp': [amp.get(constants.ID) for amp in amp_data]}) def execute(self, loadbalancer, amp_data=None, update_dict=None): """Apply qos policy on the vrrp ports which are related with vip.""" qos_policy_id = loadbalancer['vip_qos_policy_id'] if not qos_policy_id and ( update_dict and ( 'vip' not in update_dict or 'qos_policy_id' not in update_dict[constants.VIP])): return self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id) def revert(self, result, loadbalancer, amp_data=None, update_dict=None, *args, **kwargs): """Handle a failure to apply QoS to VIP""" try: request_qos_id = loadbalancer['vip_qos_policy_id'] orig_lb = self.task_utils.get_current_loadbalancer_from_db( loadbalancer[constants.LOADBALANCER_ID]) orig_qos_id = orig_lb.vip.qos_policy_id if request_qos_id != orig_qos_id: self._apply_qos_on_vrrp_port(loadbalancer, amp_data, orig_qos_id, is_revert=True, request_qos_id=request_qos_id) except Exception as e: LOG.error('Failed to remove QoS policy: %s from port: %s due ' 'to error: %s', orig_qos_id, amp_data[constants.VRRP_PORT_ID], str(e)) class DeletePort(BaseNetworkTask): """Task to delete a network port.""" @tenacity.retry(retry=tenacity.retry_if_exception_type(), stop=tenacity.stop_after_attempt( CONF.networking.max_retries), wait=tenacity.wait_exponential( multiplier=CONF.networking.retry_backoff, min=CONF.networking.retry_interval, max=CONF.networking.retry_max), reraise=True) def execute(self, port_id, passive_failure=False): """Delete the network port.""" if port_id is None: return if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: LOG.debug("Deleting network port %s", port_id) else: LOG.warning('Retrying network port %s delete attempt %s of %s.', port_id, self.execute.retry.statistics[ constants.ATTEMPT_NUMBER], self.execute.retry.stop.max_attempt_number) # Let the Taskflow engine know we are working and alive # Don't use get with a default for 'attempt_number', we need to fail # if that number is missing. self.update_progress( self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / self.execute.retry.stop.max_attempt_number) try: self.network_driver.delete_port(port_id) except Exception: if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != self.execute.retry.stop.max_attempt_number): LOG.warning('Network port delete for port id: %s failed. ' 'Retrying.', port_id) raise if passive_failure: LOG.exception('Network port delete for port ID: %s failed. ' 'This resource will be abandoned and should ' 'manually be cleaned up once the ' 'network service is functional.', port_id) # Let's at least attempt to disable it so if the instance # comes back from the dead it doesn't conflict with anything. try: self.network_driver.admin_down_port(port_id) LOG.info('Successfully disabled (admin down) network port ' '%s that failed to delete.', port_id) except Exception: LOG.warning('Attempt to disable (admin down) network port ' '%s failed. The network service has failed. ' 'Continuing.', port_id) else: LOG.exception('Network port delete for port ID: %s failed. ' 'The network service has failed. ' 'Aborting and reverting.', port_id) raise class CreateVIPBasePort(BaseNetworkTask): """Task to create the VIP base port for an amphora.""" @tenacity.retry(retry=tenacity.retry_if_exception_type(), stop=tenacity.stop_after_attempt( CONF.networking.max_retries), wait=tenacity.wait_exponential( multiplier=CONF.networking.retry_backoff, min=CONF.networking.retry_interval, max=CONF.networking.retry_max), reraise=True) def execute(self, vip, vip_sg_id, amphora_id): port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id fixed_ips = [{constants.SUBNET_ID: vip[constants.SUBNET_ID]}] sg_id = [] if vip_sg_id: sg_id = [vip_sg_id] port = self.network_driver.create_port( vip[constants.NETWORK_ID], name=port_name, fixed_ips=fixed_ips, secondary_ips=[vip[constants.IP_ADDRESS]], security_group_ids=sg_id, qos_policy_id=vip[constants.QOS_POLICY_ID]) LOG.info('Created port %s with ID %s for amphora %s', port_name, port.id, amphora_id) return port.to_dict(recurse=True) def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs): if isinstance(result, failure.Failure): return try: port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id for port in result: self.network_driver.delete_port(port.id) LOG.info('Deleted port %s with ID %s for amphora %s due to a ' 'revert.', port_name, port.id, amphora_id) except Exception as e: LOG.error('Failed to delete port %s. Resources may still be in ' 'use for a port intended for amphora %s due to error ' '%s. Search for a port named %s', result, amphora_id, str(e), port_name) class AdminDownPort(BaseNetworkTask): def execute(self, port_id): try: self.network_driver.set_port_admin_state_up(port_id, False) except base.PortNotFound: return for i in range(CONF.networking.max_retries): port = self.network_driver.get_port(port_id) if port.status == constants.DOWN: LOG.debug('Disabled port: %s', port_id) return LOG.debug('Port %s is %s instead of DOWN, waiting.', port_id, port.status) time.sleep(CONF.networking.retry_interval) LOG.error('Port %s failed to go DOWN. Port status is still %s. ' 'Ignoring and continuing.', port_id, port.status) def revert(self, result, port_id, *args, **kwargs): if isinstance(result, failure.Failure): return try: self.network_driver.set_port_admin_state_up(port_id, True) except Exception as e: LOG.error('Failed to bring port %s admin up on revert due to: %s.', port_id, str(e)) class GetVIPSecurityGroupID(BaseNetworkTask): def execute(self, loadbalancer_id): sg_name = utils.get_vip_security_group_name(loadbalancer_id) try: security_group = self.network_driver.get_security_group(sg_name) if security_group: return security_group.id except base.SecurityGroupNotFound: with excutils.save_and_reraise_exception() as ctxt: if self.network_driver.sec_grp_enabled: LOG.error('VIP security group %s was not found.', sg_name) else: ctxt.reraise = False return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/controller/worker/v2/tasks/retry_tasks.py0000664000175000017500000000643700000000000024726 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from taskflow import retry LOG = logging.getLogger(__name__) class SleepingRetryTimesController(retry.Times): """A retry controller to attempt subflow retries a number of times. This retry controller overrides the Times on_failure to inject a sleep interval between retries. It also adds a log message when all of the retries are exhausted. :param attempts: number of attempts to retry the associated subflow before giving up :type attempts: int :param name: Meaningful name for this atom, should be something that is distinguishable and understandable for notification, debugging, storing and any other similar purposes. :param provides: A set, string or list of items that this will be providing (or could provide) to others, used to correlate and associate the thing/s this atom produces, if it produces anything at all. :param requires: A set or list of required inputs for this atom's ``execute`` method. :param rebind: A dict of key/value pairs used to define argument name conversions for inputs to this atom's ``execute`` method. :param revert_all: when provided this will cause the full flow to revert when the number of attempts that have been tried has been reached (when false, it will only locally revert the associated subflow) :type revert_all: bool :param interval: Interval, in seconds, between retry attempts. :type interval: int """ def __init__(self, attempts=1, name=None, provides=None, requires=None, auto_extract=True, rebind=None, revert_all=False, interval=1): super().__init__(attempts, name, provides, requires, auto_extract, rebind, revert_all) self._interval = interval def on_failure(self, history, *args, **kwargs): if len(history) < self._attempts: LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and ' 'retrying.', self.name[self.name.startswith('retry-') and len('retry-'):], len(history), self._attempts, self._interval) time.sleep(self._interval) return retry.RETRY return self._revert_action def revert(self, history, *args, **kwargs): LOG.error('%s retries with interval %s seconds have failed for %s. ' 'Giving up.', len(history), self._interval, self.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3942165 octavia-6.2.2/octavia/db/0000775000175000017500000000000000000000000015205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/__init__.py0000664000175000017500000000107400000000000017320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/api.py0000664000175000017500000000450400000000000016333 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import time from sqlalchemy.sql.expression import select from oslo_config import cfg from oslo_db.sqlalchemy import session as db_session from oslo_log import log as logging from oslo_utils import excutils LOG = logging.getLogger(__name__) _FACADE = None def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = db_session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True) return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(expire_on_commit=True, autocommit=True): """Helper method to grab session.""" facade = _create_facade_lazily() return facade.get_session(expire_on_commit=expire_on_commit, autocommit=autocommit) @contextlib.contextmanager def get_lock_session(): """Context manager for using a locking (not auto-commit) session.""" lock_session = get_session(autocommit=False) try: yield lock_session lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() def wait_for_connection(exit_event): """Helper method to wait for DB connection""" down = True while down and not exit_event.is_set(): try: LOG.debug('Trying to re-establish connection to database.') get_engine().scalar(select([1])) down = False LOG.debug('Connection to database re-established.') except Exception: retry_interval = cfg.CONF.database.retry_interval LOG.exception('Connection to database failed. Retrying in %s ' 'seconds.', retry_interval) time.sleep(retry_interval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/base_models.py0000664000175000017500000001700600000000000020040 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import models from oslo_utils import strutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.ext import declarative from sqlalchemy.orm import collections class OctaviaBase(models.ModelBase): __data_model__ = None @staticmethod def _get_unique_key(obj): """Returns a unique key for passed object for data model building.""" # First handle all objects with their own ID, then handle subordinate # objects. if obj.__class__.__name__ in ['Member', 'Pool', 'LoadBalancer', 'Listener', 'Amphora', 'L7Policy', 'L7Rule', 'Flavor', 'FlavorProfile', 'AvailabilityZoneProfile']: return obj.__class__.__name__ + obj.id if obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']: return obj.__class__.__name__ + obj.pool_id if obj.__class__.__name__ in ['ListenerStatistics']: return obj.__class__.__name__ + obj.listener_id + obj.amphora_id if obj.__class__.__name__ in ['ListenerCidr']: return obj.__class__.__name__ + obj.listener_id + obj.cidr if obj.__class__.__name__ in ['VRRPGroup', 'Vip']: return obj.__class__.__name__ + obj.load_balancer_id if obj.__class__.__name__ in ['AmphoraHealth']: return obj.__class__.__name__ + obj.amphora_id if obj.__class__.__name__ in ['SNI']: return (obj.__class__.__name__ + obj.listener_id + obj.tls_container_id) if obj.__class__.__name__ in ['Quotas']: return obj.__class__.__name__ + obj.project_id if obj.__class__.__name__ in ['AvailabilityZone']: return obj.__class__.__name__ + obj.name raise NotImplementedError def to_data_model(self, _graph_nodes=None): """Converts to a data model graph. In order to make the resulting data model graph usable no matter how many internal references are followed, we generate a complete graph of OctaviaBase nodes connected to the object passed to this method. :param _graph_nodes: Used only for internal recursion of this method. Should not be called from the outside. Contains a dictionary of all OctaviaBase type objects in the generated graph """ _graph_nodes = _graph_nodes or {} if not self.__data_model__: raise NotImplementedError dm_kwargs = {} for column in self.__table__.columns: dm_kwargs[column.name] = getattr(self, column.name) attr_names = [attr_name for attr_name in dir(self) if not attr_name.startswith('_')] # Appending early, as any unique ID should be defined already and # the rest of this object will get filled out more fully later on, # and we need to add ourselves to the _graph_nodes before we # attempt recursion. dm_self = self.__data_model__(**dm_kwargs) dm_key = self._get_unique_key(dm_self) _graph_nodes.update({dm_key: dm_self}) for attr_name in attr_names: attr = getattr(self, attr_name) if isinstance(attr, OctaviaBase) and attr.__class__: # If this attr is already in the graph node list, just # reference it there and don't recurse. ukey = self._get_unique_key(attr) if ukey in _graph_nodes.keys(): setattr(dm_self, attr_name, _graph_nodes[ukey]) else: setattr(dm_self, attr_name, attr.to_data_model( _graph_nodes=_graph_nodes)) elif isinstance(attr, (collections.InstrumentedList, list)): setattr(dm_self, attr_name, []) listref = getattr(dm_self, attr_name) for item in attr: if isinstance(item, OctaviaBase) and item.__class__: ukey = self._get_unique_key(item) if ukey in _graph_nodes.keys(): listref.append(_graph_nodes[ukey]) else: listref.append( item.to_data_model(_graph_nodes=_graph_nodes)) elif not isinstance(item, OctaviaBase): listref.append(item) return dm_self @staticmethod def apply_filter(query, model, filters): translated_filters = {} child_map = {} # Convert enabled to proper type if 'enabled' in filters: filters['enabled'] = strutils.bool_from_string( filters['enabled']) for attr, name_map in model.__v2_wsme__._child_map.items(): for k, v in name_map.items(): if attr in filters and k in filters[attr]: child_map.setdefault(attr, {}).update( {k: filters[attr].pop(k)}) filters.pop(attr, None) for k, v in model.__v2_wsme__._type_to_model_map.items(): if k in filters: translated_filters[v] = filters.pop(k) translated_filters.update(filters) if translated_filters: query = query.filter_by(**translated_filters) for k, v in child_map.items(): query = query.join(getattr(model, k)).filter_by(**v) return query class LookupTableMixin(object): """Mixin to add to classes that are lookup tables.""" name = sa.Column(sa.String(255), primary_key=True, nullable=False) description = sa.Column(sa.String(255), nullable=True) class IdMixin(object): """Id mixin, add to subclasses that have an id.""" id = sa.Column(sa.String(36), primary_key=True, default=uuidutils.generate_uuid) class ProjectMixin(object): """Tenant mixin, add to subclasses that have a project.""" project_id = sa.Column(sa.String(36)) class NameMixin(object): """Name mixin to add to classes which need a name.""" name = sa.Column(sa.String(255), nullable=True) class TagMixin(object): """Tags mixin to add to classes which need tags. The class must realize the specified db relationship as well. """ @property def tags(self): if self._tags: return [each_tag.tag for each_tag in self._tags] return [] @tags.setter def tags(self, values): new_tags = [] if values: for tag in values: tag_ref = Tags() tag_ref.resource_id = self.id tag_ref.tag = tag new_tags.append(tag_ref) self._tags = new_tags BASE = declarative.declarative_base(cls=OctaviaBase) class Tags(BASE): __tablename__ = "tags" resource_id = sa.Column(sa.String(36), primary_key=True) tag = sa.Column(sa.String(255), primary_key=True, index=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/healthcheck.py0000664000175000017500000000240100000000000020017 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from octavia.i18n import _ LOG = logging.getLogger(__name__) def check_database_connection(session): """This is a simple database connection check function. It will do a simple no-op query (low overhead) against the sqlalchemy session passed in. :param session: A Sql Alchemy database session. :returns: True if the connection check is successful, False if not. """ try: session.execute('SELECT 1;') return True, None except Exception as e: message = _('Database health check failed due to: {err}.').format( err=str(e)) LOG.error(message) return False, message ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3942165 octavia-6.2.2/octavia/db/migration/0000775000175000017500000000000000000000000017176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/__init__.py0000664000175000017500000000107400000000000021311 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic.ini0000664000175000017500000000211500000000000021272 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic_migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3942165 octavia-6.2.2/octavia/db/migration/alembic_migrations/0000775000175000017500000000000000000000000023026 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/README.rst0000664000175000017500000000044100000000000024514 0ustar00zuulzuul00000000000000The migrations in the alembic/versions contain the migrations. Before running this migration ensure that the database octavia exists. To run migrations you must first be in the octavia/db/migration directory. To migrate to the most current version run: $ octavia-db-manage upgrade head ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/env.py0000664000175000017500000000503700000000000024175 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import with_statement import sys from alembic import context from sqlalchemy import create_engine from sqlalchemy import pool # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config try: octavia_config = config.octavia_config except AttributeError: print("Error: Please use the octavia-db-manage command for octavia" " alembic actions.") sys.exit(1) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=octavia_config.database.connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( octavia_config.database.connection, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/script.py.mako0000664000175000017500000000053500000000000025635 0ustar00zuulzuul00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/0000775000175000017500000000000000000000000024676 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/034756a182a2_amphora_add_image_id.py0000664000175000017500000000173700000000000033004 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """amphora add image id Revision ID: 034756a182a2 Revises: 10d38216ad34 Create Date: 2018-02-26 17:38:37.971677 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '034756a182a2' down_revision = '10d38216ad34' def upgrade(): op.add_column( u'amphora', sa.Column(u'image_id', sa.String(36), nullable=True) ) ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/034b2dc2f3e0_modernize_l7policy_fields.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/034b2dc2f3e0_modernize_l7policy_field0000664000175000017500000001205700000000000033470 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """modernize_l7policy_fields Revision ID: 034b2dc2f3e0 Revises: fac584114642 Create Date: 2017-04-01 05:44:43.400535 """ from alembic import op import sqlalchemy as sa from octavia.common import constants # revision identifiers, used by Alembic. revision = '034b2dc2f3e0' down_revision = 'fac584114642' def upgrade(): # Add timing data op.add_column( u'l7policy', sa.Column(u'created_at', sa.DateTime(), nullable=True) ) op.add_column( u'l7policy', sa.Column(u'updated_at', sa.DateTime(), nullable=True) ) # Add project_id op.add_column( u'l7policy', sa.Column(u'project_id', sa.String(36), nullable=True) ) # Add new operating_status column, setting existing rows to ONLINE op.add_column( u'l7policy', sa.Column(u'operating_status', sa.String(16), nullable=False, server_default=constants.ONLINE) ) # Remove the default, as we don't actually want one op.alter_column(u'l7policy', u'operating_status', existing_type=sa.String(16), server_default=None) # Add the foreign key for operating_status_name op.create_foreign_key( u'fk_l7policy_operating_status_name', u'l7policy', u'operating_status', [u'operating_status'], [u'name'] ) op.drop_constraint('fk_health_monitor_provisioning_status_name', 'health_monitor', type_='foreignkey') op.drop_constraint('fk_l7policy_provisioning_status_name', 'l7policy', type_='foreignkey') op.drop_constraint('fk_l7rule_provisioning_status_name', 'l7rule', type_='foreignkey') op.drop_constraint('fk_member_provisioning_status_name', 'member', type_='foreignkey') op.drop_constraint('fk_pool_provisioning_status_name', 'pool', type_='foreignkey') # provisioning_status was mistakenly added as nullable, the fix is similar op.alter_column(u'l7policy', u'provisioning_status', nullable=False, existing_type=sa.String(16), server_default=constants.ACTIVE) op.alter_column(u'l7policy', u'provisioning_status', existing_type=sa.String(16), server_default=None) # Fix the rest of these that were also mistakenly set as nullable in: # 9b5473976d6d_add_provisioning_status_to_objects.py op.alter_column(u'health_monitor', u'provisioning_status', nullable=False, existing_type=sa.String(16), server_default=constants.ACTIVE) op.alter_column(u'health_monitor', u'provisioning_status', existing_type=sa.String(16), server_default=None) op.alter_column(u'member', u'provisioning_status', nullable=False, existing_type=sa.String(16), server_default=constants.ACTIVE) op.alter_column(u'member', u'provisioning_status', existing_type=sa.String(16), server_default=None) op.alter_column(u'pool', u'provisioning_status', nullable=False, existing_type=sa.String(16), server_default=constants.ACTIVE) op.alter_column(u'pool', u'provisioning_status', existing_type=sa.String(16), server_default=None) op.alter_column(u'l7rule', u'provisioning_status', nullable=False, existing_type=sa.String(16), server_default=constants.ACTIVE) op.alter_column(u'l7rule', u'provisioning_status', existing_type=sa.String(16), server_default=None) op.create_foreign_key( u'fk_health_monitor_provisioning_status_name', u'health_monitor', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.create_foreign_key( u'fk_l7policy_provisioning_status_name', u'l7policy', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.create_foreign_key( u'fk_l7rule_provisioning_status_name', u'l7rule', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.create_foreign_key( u'fk_member_provisioning_status_name', u'member', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.create_foreign_key( u'fk_pool_provisioning_status_name', u'pool', u'provisioning_status', [u'provisioning_status'], [u'name'] ) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/0aee2b450512_extend_api_to_accept_qos_policy_id.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/0aee2b450512_extend_api_to_accept_qos0000664000175000017500000000207600000000000033450 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add QoS Policy ID column to VIP table Revision ID: 0aee2b450512 Revises: bf171d0d91c3 Create Date: 2017-02-07 20:47:52.405865 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '0aee2b450512' down_revision = 'bf171d0d91c3' def upgrade(): op.add_column('vip', sa.Column('qos_policy_id', sa.String(length=36), nullable=True, server_default=None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/0f242cf02c74_add_provider_column.py0000664000175000017500000000212300000000000033073 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add provider column Revision ID: 0f242cf02c74 Revises: 0fd2c131923f Create Date: 2018-04-23 16:22:26.971048 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '0f242cf02c74' down_revision = '0fd2c131923f' def upgrade(): op.add_column( u'load_balancer', sa.Column(u'provider', sa.String(64), nullable=True) ) op.execute("UPDATE load_balancer set provider='amphora' where provider " "is null") ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_listener.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_li0000664000175000017500000000340200000000000033441 0ustar00zuulzuul00000000000000# Copyright 2018 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add timeout fields to listener Revision ID: 0fd2c131923f Revises: ba35e0fb88e1 Create Date: 2018-03-23 03:34:26.657254 """ from alembic import op import sqlalchemy as sa from octavia.common import constants # revision identifiers, used by Alembic. revision = '0fd2c131923f' down_revision = 'ba35e0fb88e1' def upgrade(): op.add_column('listener', sa.Column('timeout_client_data', sa.Integer(), nullable=True, default=constants.DEFAULT_TIMEOUT_CLIENT_DATA)) op.add_column('listener', sa.Column('timeout_member_connect', sa.Integer(), nullable=True, default=constants.DEFAULT_TIMEOUT_MEMBER_CONNECT)) op.add_column('listener', sa.Column('timeout_member_data', sa.Integer(), nullable=True, default=constants.DEFAULT_TIMEOUT_MEMBER_DATA)) op.add_column('listener', sa.Column('timeout_tcp_inspect', sa.Integer(), nullable=True, default=constants.DEFAULT_TIMEOUT_TCP_INSPECT)) ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/10d38216ad34_add_timestamps_to_amphora.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/10d38216ad34_add_timestamps_to_amphor0000664000175000017500000000212200000000000033410 0ustar00zuulzuul00000000000000# Copyright 2018 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add timestamps to amphora Revision ID: 10d38216ad34 Revises: 0aee2b450512 Create Date: 2018-02-26 10:04:59.133772 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '10d38216ad34' down_revision = '0aee2b450512' def upgrade(): op.add_column( u'amphora', sa.Column(u'created_at', sa.DateTime(), nullable=True) ) op.add_column( u'amphora', sa.Column(u'updated_at', sa.DateTime(), nullable=True) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/11e4bb2bb8ef_fix_ipv6_vip.py0000664000175000017500000000166200000000000031714 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix_IPv6_VIP Revision ID: 11e4bb2bb8ef Revises: 211982b05afc Create Date: 2019-01-28 08:35:35.333616 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '11e4bb2bb8ef' down_revision = '211982b05afc' def upgrade(): op.alter_column(u'vip', u'ip_address', type_=sa.String(64)) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/13500e2e978d_update_url_and_name_size.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/13500e2e978d_update_url_and_name_size0000664000175000017500000000272200000000000033404 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. '''update url and name size Revision ID: 13500e2e978d Revises: 4c094013699a Create Date: 2014-09-18 16:07:04.859812 ''' from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '13500e2e978d' down_revision = '4c094013699a' def upgrade(): op.alter_column(u'provisioning_status', u'name', existing_type=sa.String(255)) op.alter_column(u'operating_status', u'name', existing_type=sa.String(255)) op.alter_column(u'health_monitor_type', u'name', existing_type=sa.String(255)) op.alter_column(u'protocol', u'name', existing_type=sa.String(255)) op.alter_column(u'algorithm', u'name', existing_type=sa.String(255)) op.alter_column(u'session_persistence_type', u'name', existing_type=sa.String(255)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/14892634e228_update_vip.py0000664000175000017500000000242300000000000031016 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update vip Revision ID: 14892634e228 Revises: 3a1e1cdb7b27 Create Date: 2015-01-10 00:53:57.798213 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '14892634e228' down_revision = '3a1e1cdb7b27' def upgrade(): with op.batch_alter_table(u'vip') as batch_op: batch_op.alter_column(u'subnet_id', new_column_name=u'network_id', existing_type=sa.String(36)) batch_op.alter_column(u'net_port_id', new_column_name=u'port_id', existing_type=sa.String(36)) batch_op.drop_column(u'floating_ip_id') batch_op.drop_column(u'floating_ip_network_id') ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/186509101b9b_add_server_group_id_to_loadbalancer.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/186509101b9b_add_server_group_id_to_l0000664000175000017500000000202300000000000033314 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_server_group_id_to_loadbalancer Revision ID: 186509101b9b Revises: 29ff921a6eb Create Date: 2016-01-25 15:12:52.489652 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '186509101b9b' down_revision = '458c9ee2a011' def upgrade(): op.add_column(u'load_balancer', sa.Column(u'server_group_id', sa.String(36), nullable=True)) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/1afc932f1ca2_l7rule_support_client_cert.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/1afc932f1ca2_l7rule_support_client_ce0000664000175000017500000000235600000000000033607 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Extend the l7rule type for support client certificate cases Revision ID: 1afc932f1ca2 Revises: ffad172e98c1 Create Date: 2018-10-03 20:47:52.405865 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '1afc932f1ca2' down_revision = 'ffad172e98c1' new_fields = ['SSL_CONN_HAS_CERT', 'SSL_VERIFY_RESULT', 'SSL_DN_FIELD'] def upgrade(): insert_table = sql.table( u'l7rule_type', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) cows = [{'name': field} for field in new_fields] op.bulk_insert(insert_table, cows) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/1e4c1d83044c_keepalived_configuration_datamodel.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/1e4c1d83044c_keepalived_configuration0000664000175000017500000000513100000000000033470 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keepalived configuration datamodel Revision ID: 1e4c1d83044c Revises: 5a3ee5472c31 Create Date: 2015-08-06 10:39:54.998797 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '1e4c1d83044c' down_revision = '5a3ee5472c31' def upgrade(): op.create_table( u'vrrp_auth_method', sa.Column(u'name', sa.String(36), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'vrrp_auth_method', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'PASS'}, {'name': 'AH'} ] ) op.create_table( u'vrrp_group', sa.Column(u'load_balancer_id', sa.String(36), nullable=False), sa.Column(u'vrrp_group_name', sa.String(36), nullable=True), sa.Column(u'vrrp_auth_type', sa.String(16), nullable=True), sa.Column(u'vrrp_auth_pass', sa.String(36), nullable=True), sa.Column(u'advert_int', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint(u'load_balancer_id'), sa.ForeignKeyConstraint([u'load_balancer_id'], [u'load_balancer.id'], name=u'fk_vrrp_group_load_balancer_id'), sa.ForeignKeyConstraint([u'vrrp_auth_type'], [u'vrrp_auth_method.name'], name=u'fk_load_balancer_vrrp_auth_method_name') ) op.add_column( u'listener', sa.Column(u'peer_port', sa.Integer(), nullable=True) ) op.add_column( u'amphora', sa.Column(u'vrrp_interface', sa.String(16), nullable=True) ) op.add_column( u'amphora', sa.Column(u'vrrp_id', sa.Integer(), nullable=True) ) op.add_column( u'amphora', sa.Column(u'vrrp_priority', sa.Integer(), nullable=True) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/211982b05afc_add_flavor_id_to_lb.py0000664000175000017500000000175100000000000033017 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_flavor_id_to_lb Revision ID: 211982b05afc Revises: b9c703669314 Create Date: 2018-11-30 14:57:28.559884 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '211982b05afc' down_revision = 'b9c703669314' def upgrade(): op.add_column('load_balancer', sa.Column('flavor_id', sa.String(36), nullable=True)) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/2351ea316465_adding_terminate_https_tls_ref_support.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/2351ea316465_adding_terminate_https_t0000664000175000017500000000256100000000000033341 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Adding TERMINATED_HTTPS support and TLS ref ID char length increase Revision ID: 2351ea316465 Revises: 48660b6643f0 Create Date: 2015-05-22 11:57:04.703910 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '2351ea316465' down_revision = '357d17a6d5ac' new_protocol = 'TERMINATED_HTTPS' def upgrade(): insert_table = sql.table( u'protocol', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': new_protocol} ] ) op.alter_column(u'listener', u'tls_certificate_id', existing_type=sa.String(255), nullable=True) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/256852d5ff7c_add_lb_network_ip_to_amphora.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/256852d5ff7c_add_lb_network_ip_to_amp0000664000175000017500000000200000000000000033450 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add lb_network_ip to amphora Revision ID: 256852d5ff7c Revises: 14892634e228 Create Date: 2015-01-13 16:18:57.359290 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '256852d5ff7c' down_revision = '14892634e228' def upgrade(): op.add_column(u'amphora', sa.Column(u'lb_network_ip', sa.String(64), nullable=True)) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/27e54d00c3cd_add_monitor_address_and_port_to_member.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/27e54d00c3cd_add_monitor_address_and_0000664000175000017500000000237000000000000033477 0ustar00zuulzuul00000000000000# Copyright 2017 EayunStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add monitor address and port to member Revision ID: 27e54d00c3cd Revises: 5309960964f8 Create Date: 2017-05-01 23:12:16.695581 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '27e54d00c3cd' down_revision = '5309960964f8' def upgrade(): op.add_column(u'member', sa.Column(u'monitor_address', sa.String(64), nullable=True) ) op.add_column(u'member', sa.Column(u'monitor_port', sa.Integer(), nullable=True) ) ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/298eac0640a7_add_amphora_vrrp_port_id_and_ha_port_id.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/298eac0640a7_add_amphora_vrrp_port_id0000664000175000017500000000213700000000000033471 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add vrrp_port_id and ha_port_id to amphora Revision ID: 298eac0640a7 Revises: 4fe8240425b4 Create Date: 2015-07-20 15:25:37.044098 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '298eac0640a7' down_revision = '4fe8240425b4' def upgrade(): op.add_column(u'amphora', sa.Column(u'vrrp_port_id', sa.String(36), nullable=True)) op.add_column(u'amphora', sa.Column(u'ha_port_id', sa.String(36), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/29ff921a6eb_shared_pools.py0000664000175000017500000000517700000000000031564 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Shared pools Revision ID: 29ff921a6eb Revises: 43287cd10fef Create Date: 2015-12-09 10:32:12.712932 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '29ff921a6eb' down_revision = '43287cd10fef' def upgrade(): conn = op.get_bind() # Minimal examples of the tables we need to manipulate listener = sa.sql.table( 'listener', sa.sql.column('load_balancer_id', sa.String), sa.sql.column('default_pool_id', sa.String)) pool = sa.sql.table( 'pool', sa.sql.column('load_balancer_id', sa.String), sa.sql.column('id', sa.String)) # This foreign key does not need to be unique anymore. To remove the # uniqueness but keep the foreign key we have to do some juggling. op.drop_constraint('fk_listener_pool_id', 'listener', type_='foreignkey') op.drop_constraint('uq_listener_default_pool_id', 'listener', type_='unique') op.create_foreign_key('fk_listener_pool_id', 'listener', 'pool', ['default_pool_id'], ['id']) op.add_column(u'pool', sa.Column('load_balancer_id', sa.String(36), sa.ForeignKey('load_balancer.id'), nullable=True)) # Populate this new column appropriately select_obj = sa.select([listener.c.load_balancer_id, listener.c.default_pool_id]).where( listener.c.default_pool_id is not None) result = conn.execute(select_obj) for row in result: stmt = pool.update().values(load_balancer_id=row[0]).where( pool.c.id == row[1]) op.execute(stmt) # For existing installations, the above ETL should populate the above column # using the following procedure: # # Get the output from this: # # SELECT default_pool_id, load_balancer_id l_id FROM listener WHERE # default_pool_id IS NOT NULL; # # Then for every row returned run: # # UPDATE pool SET load_balancer_id = l_id WHERE id = default_pool_id; ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/2ad093f6353f_add_listener_client_ca_tls_certificate_.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/2ad093f6353f_add_listener_client_ca_t0000664000175000017500000000204100000000000033416 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add listener client_ca_tls_certificate_id column Revision ID: 2ad093f6353f Revises: 11e4bb2bb8ef Create Date: 2019-02-13 08:32:43.009997 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '2ad093f6353f' down_revision = '11e4bb2bb8ef' def upgrade(): op.add_column( u'listener', sa.Column(u'client_ca_tls_certificate_id', sa.String(255), nullable=True) ) ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/357d17a6d5ac_update_lb_and_amphora_data_model_for_.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/357d17a6d5ac_update_lb_and_amphora_da0000664000175000017500000000452700000000000033463 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """update lb and amphora data model for active passive Revision ID: 357d17a6d5ac Revises: 298eac0640a7 Create Date: 2015-07-16 17:41:49.029145 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '357d17a6d5ac' down_revision = '298eac0640a7' def upgrade(): op.create_table( u'lb_topology', sa.Column(u'name', sa.String(36), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'lb_topology', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'SINGLE'}, {'name': 'ACTIVE_STANDBY'} ] ) op.create_table( u'amphora_roles', sa.Column(u'name', sa.String(36), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'amphora_roles', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'MASTER'}, {'name': 'BACKUP'}, {'name': 'STANDALONE'} ] ) op.add_column( u'load_balancer', sa.Column(u'topology', sa.String(36), sa.ForeignKey(u'lb_topology.name', name=u'fk_lb_topology_name'), nullable=True) ) op.add_column( u'amphora', sa.Column(u'role', sa.String(36), sa.ForeignKey(u'amphora_roles.name', name=u'fk_amphora_roles_name'), nullable=True) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/35dee79d5865_initial_create.py0000664000175000017500000003253600000000000032076 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. '''initial_create Revision ID: 35dee79d5865 Revises: None Create Date: 2014-08-15 11:01:14.897223 ''' from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '35dee79d5865' down_revision = None def upgrade(): # Create lookup tables op.create_table( u'health_monitor_type', sa.Column(u'name', sa.String(30), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) # Create temporary table for table data seeding insert_table = sql.table( u'health_monitor_type', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'HTTP'}, {'name': 'HTTPS'}, {'name': 'TCP'} ] ) op.create_table( u'protocol', sa.Column(u'name', sa.String(30), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'protocol', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'HTTP'}, {'name': 'HTTPS'}, {'name': 'TCP'} ] ) op.create_table( u'algorithm', sa.Column(u'name', sa.String(30), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'algorithm', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'ROUND_ROBIN'}, {'name': 'LEAST_CONNECTIONS'}, {'name': 'SOURCE_IP'} ] ) op.create_table( u'session_persistence_type', sa.Column(u'name', sa.String(30), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'session_persistence_type', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'SOURCE_IP'}, {'name': 'HTTP_COOKIE'}, {'name': 'APP_COOKIE'} ] ) op.create_table( u'provisioning_status', sa.Column(u'name', sa.String(30), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'provisioning_status', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'ACTIVE'}, {'name': 'PENDING_CREATE'}, {'name': 'PENDING_UPDATE'}, {'name': 'PENDING_DELETE'}, {'name': 'DELETED'}, {'name': 'ERROR'} ] ) op.create_table( u'operating_status', sa.Column(u'name', sa.String(30), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'operating_status', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'ONLINE'}, {'name': 'OFFLINE'}, {'name': 'DEGRADED'}, {'name': 'ERROR'} ] ) op.create_table( u'pool', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'protocol', sa.String(16), nullable=False), sa.Column(u'lb_algorithm', sa.String(16), nullable=False), sa.Column(u'operating_status', sa.String(16), nullable=False), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'protocol'], [u'protocol.name'], name=u'fk_pool_protocol_name'), sa.ForeignKeyConstraint([u'lb_algorithm'], [u'algorithm.name'], name=u'fk_pool_algorithm_name'), sa.ForeignKeyConstraint([u'operating_status'], [u'operating_status.name'], name=u'fk_pool_operating_status_name') ) op.create_table( u'health_monitor', sa.Column(u'pool_id', sa.String(36), nullable=False), sa.Column(u'type', sa.String(36), nullable=False), sa.Column(u'delay', sa.Integer(), nullable=False), sa.Column(u'timeout', sa.Integer(), nullable=False), sa.Column(u'fall_threshold', sa.Integer(), nullable=False), sa.Column(u'rise_threshold', sa.Integer(), nullable=False), sa.Column(u'http_method', sa.String(16), nullable=True), sa.Column(u'url_path', sa.String(255), nullable=True), sa.Column(u'expected_codes', sa.String(64), nullable=True), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'pool_id'), sa.ForeignKeyConstraint([u'pool_id'], [u'pool.id'], name=u'fk_health_monitor_pool_id'), sa.ForeignKeyConstraint( [u'type'], [u'health_monitor_type.name'], name=u'fk_health_monitor_health_monitor_type_name') ) op.create_table( u'session_persistence', sa.Column(u'pool_id', sa.String(36), nullable=False), sa.Column(u'type', sa.String(16), nullable=False), sa.Column(u'cookie_name', sa.String(255), nullable=True), sa.ForeignKeyConstraint( [u'type'], [u'session_persistence_type.name'], name=u'fk_session_persistence_session_persistence_type_name'), sa.ForeignKeyConstraint([u'pool_id'], [u'pool.id'], name=u'fk_session_persistence_pool_id'), sa.PrimaryKeyConstraint(u'pool_id') ) op.create_table( u'member', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'pool_id', sa.String(36), nullable=False), sa.Column(u'subnet_id', sa.String(36), nullable=True), sa.Column(u'address', sa.String(64), nullable=False), sa.Column(u'protocol_port', sa.Integer(), nullable=False), sa.Column(u'weight', sa.Integer(), nullable=True), sa.Column(u'operating_status', sa.String(16), nullable=False), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'pool_id'], [u'pool.id'], name=u'fk_member_pool_id'), sa.ForeignKeyConstraint([u'operating_status'], [u'operating_status.name'], name=u'fk_member_operating_status_name'), sa.UniqueConstraint(u'pool_id', u'address', u'protocol_port', name=u'uq_member_pool_id_address_protocol_port') ) op.create_table( u'load_balancer', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'provisioning_status', sa.String(16), nullable=False), sa.Column(u'operating_status', sa.String(16), nullable=False), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint( [u'provisioning_status'], [u'provisioning_status.name'], name=u'fk_load_balancer_provisioning_status_name'), sa.ForeignKeyConstraint([u'operating_status'], [u'operating_status.name'], name=u'fk_load_balancer_operating_status_name') ) op.create_table( u'vip', sa.Column(u'load_balancer_id', sa.String(36), nullable=False), sa.Column(u'ip_address', sa.String(36), nullable=True), sa.Column(u'net_port_id', sa.String(36), nullable=True), sa.Column(u'subnet_id', sa.String(36), nullable=True), sa.Column(u'floating_ip_id', sa.String(36), nullable=True), sa.Column(u'floating_ip_network_id', sa.String(36), nullable=True), sa.PrimaryKeyConstraint(u'load_balancer_id'), sa.ForeignKeyConstraint([u'load_balancer_id'], [u'load_balancer.id'], name=u'fk_vip_load_balancer_id') ) op.create_table( u'listener', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'protocol', sa.String(16), nullable=False), sa.Column(u'protocol_port', sa.Integer(), nullable=False), sa.Column(u'connection_limit', sa.Integer(), nullable=True), sa.Column(u'load_balancer_id', sa.String(36), nullable=True), sa.Column(u'tls_certificate_id', sa.String(36), nullable=True), sa.Column(u'default_pool_id', sa.String(36), nullable=True), sa.Column(u'provisioning_status', sa.String(16), nullable=False), sa.Column(u'operating_status', sa.String(16), nullable=False), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint([u'load_balancer_id'], [u'load_balancer.id'], name=u'fk_listener_load_balancer_id'), sa.ForeignKeyConstraint([u'default_pool_id'], [u'pool.id'], name=u'fk_listener_pool_id'), sa.ForeignKeyConstraint([u'protocol'], [u'protocol.name'], name=u'fk_listener_protocol_name'), sa.ForeignKeyConstraint([u'provisioning_status'], [u'provisioning_status.name'], name=u'fk_listener_provisioning_status_name'), sa.ForeignKeyConstraint([u'operating_status'], [u'operating_status.name'], name=u'fk_listener_operating_status_name'), sa.UniqueConstraint(u'default_pool_id', name=u'uq_listener_default_pool_id'), sa.UniqueConstraint( u'load_balancer_id', u'protocol_port', name=u'uq_listener_load_balancer_id_protocol_port'), sa.PrimaryKeyConstraint(u'id') ) op.create_table( u'sni', sa.Column(u'listener_id', sa.String(36), nullable=False), sa.Column(u'tls_container_id', sa.String(36), nullable=False), sa.Column(u'position', sa.Integer, nullable=True), sa.ForeignKeyConstraint([u'listener_id'], [u'listener.id'], name=u'fk_sni_listener_id'), sa.PrimaryKeyConstraint(u'listener_id', u'tls_container_id') ) op.create_table( u'listener_statistics', sa.Column(u'listener_id', sa.String(36), nullable=False), sa.Column(u'bytes_in', sa.BigInteger(), nullable=False), sa.Column(u'bytes_out', sa.BigInteger(), nullable=False), sa.Column(u'active_connections', sa.Integer(), nullable=False), sa.Column(u'total_connections', sa.BigInteger(), nullable=False), sa.PrimaryKeyConstraint(u'listener_id'), sa.ForeignKeyConstraint([u'listener_id'], [u'listener.id'], name=u'fk_listener_statistics_listener_id') ) op.create_table( u'amphora', # id should come from the service providing the amphora (i.e. nova) sa.Column(u'id', sa.String(36), nullable=False, autoincrement=False), sa.Column(u'host_id', sa.String(36), nullable=False), sa.Column(u'status', sa.String(36), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint( [u'status'], [u'provisioning_status.name'], name=u'fk_container_provisioning_status_name') ) op.create_table( u'load_balancer_amphora', sa.Column(u'amphora_id', sa.String(36), nullable=False), sa.Column(u'load_balancer_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint( [u'load_balancer_id'], [u'load_balancer.id'], name=u'fk_load_balancer_amphora_load_balancer_id'), sa.ForeignKeyConstraint([u'amphora_id'], [u'amphora.id'], name=u'fk_load_balancer_amphora_id'), sa.PrimaryKeyConstraint(u'amphora_id', u'load_balancer_id') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/36b94648fef8_add_timestamp.py0000664000175000017500000000226100000000000031726 0ustar00zuulzuul00000000000000# Copyright 2016 Catalyst IT # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add timestamp Revision ID: 36b94648fef8 Revises: 4d9cf7d32f2 Create Date: 2016-04-21 10:45:32.278433 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '36b94648fef8' down_revision = '4d9cf7d32f2' tables = ['member', 'pool', 'load_balancer', 'listener'] def upgrade(): for table in tables: op.add_column( table, sa.Column(u'created_at', sa.DateTime(), nullable=True) ) op.add_column( table, sa.Column(u'updated_at', sa.DateTime(), nullable=True) ) ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/392fb85b4419_add_primary_key_to_spares_pool.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/392fb85b4419_add_primary_key_to_spare0000664000175000017500000000300500000000000033424 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add primary key to spares_pool Revision ID: 392fb85b4419 Revises: 46d914b2a5e5 Create Date: 2019-04-30 09:58:54.159823 """ from alembic import op from sqlalchemy.engine import reflection from oslo_log import log as logging # revision identifiers, used by Alembic. revision = '392fb85b4419' down_revision = '46d914b2a5e5' LOG = logging.getLogger(__name__) def upgrade(): bind = op.get_bind() inspector = reflection.Inspector.from_engine(bind.engine) pk = inspector.get_pk_constraint('spares_pool') if not pk['constrained_columns']: op.create_primary_key( u'pk_spares_pool', u'spares_pool', [u'updated_at']) else: # Revision '46d914b2a5e5' has been updated to create the # missing PK. Depending whether the env is already deployed or # not we may or not have to add the primary key. LOG.info("The primary key in spares_pool already exists, continuing.") ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/3a1e1cdb7b27_rename_amphora_host_id.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/3a1e1cdb7b27_rename_amphora_host_id.p0000664000175000017500000000200300000000000033506 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """rename amphora host id Revision ID: 3a1e1cdb7b27 Revises: 4faaa983e7a9 Create Date: 2015-01-10 02:01:04.997336 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '3a1e1cdb7b27' down_revision = '4faaa983e7a9' def upgrade(): op.alter_column(u'amphora', u'host_id', new_column_name='compute_id', existing_type=sa.String(36), nullable=True) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/3b199c848b96_create_no_monitor_operational_status.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/3b199c848b96_create_no_monitor_operat0000664000175000017500000000214700000000000033464 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Create NO_MONITOR operational_status Revision ID: 3b199c848b96 Revises: 543f5d8e4e56 Create Date: 2015-09-03 17:11:03.724070 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '3b199c848b96' down_revision = '543f5d8e4e56' def upgrade(): bind = op.get_bind() md = sa.MetaData() sa.Table('operating_status', md, autoload=True, autoload_with=bind) op.bulk_insert(md.tables['operating_status'], [{'name': 'NO_MONITOR'}]) ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/3e5b37a0bdb9_add_vrrp_ip_and_ha_ip_to_amphora.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/3e5b37a0bdb9_add_vrrp_ip_and_ha_ip_to0000664000175000017500000000211100000000000033543 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add vrrp_ip and ha_ip to amphora Revision ID: 3e5b37a0bdb9 Revises: 92fe9857279 Create Date: 2015-03-24 18:17:36.998604 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '3e5b37a0bdb9' down_revision = '92fe9857279' def upgrade(): op.add_column(u'amphora', sa.Column(u'vrrp_ip', sa.String(64), nullable=True)) op.add_column(u'amphora', sa.Column(u'ha_ip', sa.String(64), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/3f8ff3be828e_create_quotas_table.py0000664000175000017500000000322700000000000033257 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create quotas table Revision ID: 3f8ff3be828e Revises: 44a2414dd683 Create Date: 2016-09-01 13:59:20.723621 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '3f8ff3be828e' down_revision = '44a2414dd683' def upgrade(): op.create_table( u'quotas', sa.Column(u'project_id', sa.String(36), primary_key=True, nullable=False), sa.Column(u'health_monitor', sa.Integer(), nullable=True), sa.Column(u'load_balancer', sa.Integer(), nullable=True), sa.Column(u'listener', sa.Integer(), nullable=True), sa.Column(u'member', sa.Integer(), nullable=True), sa.Column(u'pool', sa.Integer(), nullable=True), sa.Column(u'in_use_health_monitor', sa.Integer(), nullable=True), sa.Column(u'in_use_load_balancer', sa.Integer(), nullable=True), sa.Column(u'in_use_listener', sa.Integer(), nullable=True), sa.Column(u'in_use_member', sa.Integer(), nullable=True), sa.Column(u'in_use_pool', sa.Integer(), nullable=True), ) ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/43287cd10fef_make_pool_lb_algorithm_larger.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/43287cd10fef_make_pool_lb_algorithm_l0000664000175000017500000000252000000000000033524 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Make pool.lb_algorithm larger Revision ID: 43287cd10fef Revises: 6abb04f24c5 Create Date: 2016-01-14 10:05:27.803518 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '43287cd10fef' down_revision = '6abb04f24c5' def upgrade(): op.drop_constraint( u'fk_pool_algorithm_name', u'pool', type_=u'foreignkey' ) op.alter_column(u'algorithm', u'name', nullable=False, existing_type=sa.String(255)) op.alter_column(u'pool', u'lb_algorithm', nullable=False, existing_type=sa.String(255)) op.create_foreign_key( u'fk_pool_algorithm_name', u'pool', u'algorithm', [u'lb_algorithm'], [u'name'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/443fe6676637_add_network_id_to_vip.py0000664000175000017500000000174500000000000033312 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add a column network_id in table vip Revision ID: 443fe6676637 Revises: 3f8ff3be828e Create Date: 2017-02-06 15:21:25.637744 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '443fe6676637' down_revision = '3f8ff3be828e' def upgrade(): op.add_column(u'vip', sa.Column(u'network_id', sa.String(36), nullable=True)) ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/44a2414dd683_adding_name_column_to_member_and_health_.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/44a2414dd683_adding_name_column_to_me0000664000175000017500000000210500000000000033341 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adding name column to member and health monitor Revision ID: 44a2414dd683 Revises: c11292016060 Create Date: 2016-12-19 13:14:58.879793 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '44a2414dd683' down_revision = 'c11292016060' tables = ['member', 'health_monitor'] def upgrade(): for table in tables: op.add_column( table, sa.Column(u'name', sa.String(255), nullable=True) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/458c9ee2a011_l7_policies_and_rules.py0000664000175000017500000001154600000000000033337 0ustar00zuulzuul00000000000000# Copyright 2015 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """L7 Policies and Rules Revision ID: 458c9ee2a011 Revises: 29ff921a6eb Create Date: 2016-01-07 11:45:45.391851 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '458c9ee2a011' down_revision = '29ff921a6eb' def upgrade(): # L7 Rule Types op.create_table( u'l7rule_type', sa.Column(u'name', sa.String(36), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) # Create temporary table for table data seeding insert_table = sql.table( u'l7rule_type', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'HOST_NAME'}, {'name': 'PATH'}, {'name': 'FILE_TYPE'}, {'name': 'HEADER'}, {'name': 'COOKIE'} ] ) # L7 Rule Compare Types op.create_table( u'l7rule_compare_type', sa.Column(u'name', sa.String(36), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'l7rule_compare_type', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'REGEX'}, {'name': 'STARTS_WITH'}, {'name': 'ENDS_WITH'}, {'name': 'CONTAINS'}, {'name': 'EQUAL_TO'} ] ) # L7 Policy Actions op.create_table( u'l7policy_action', sa.Column(u'name', sa.String(36), primary_key=True), sa.Column(u'description', sa.String(255), nullable=True) ) insert_table = sql.table( u'l7policy_action', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'REJECT'}, {'name': 'REDIRECT_TO_URL'}, {'name': 'REDIRECT_TO_POOL'} ] ) # L7 Policies op.create_table( u'l7policy', sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'listener_id', sa.String(36), nullable=False), sa.Column(u'action', sa.String(36), nullable=False), sa.Column(u'redirect_pool_id', sa.String(36), nullable=True), sa.Column(u'redirect_url', sa.String(255), nullable=True), sa.Column(u'position', sa.Integer, nullable=False), sa.Column(u'enabled', sa.Boolean(), default=True, nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'listener_id'], [u'listener.id'], name=u'fk_l7policy_listener_id'), sa.ForeignKeyConstraint([u'redirect_pool_id'], [u'pool.id'], name=u'fk_l7policy_pool_id'), sa.ForeignKeyConstraint([u'action'], [u'l7policy_action.name'], name=u'fk_l7policy_l7policy_action_name') ) # L7 Rules op.create_table( u'l7rule', sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'l7policy_id', sa.String(36), nullable=False), sa.Column(u'type', sa.String(36), nullable=False), sa.Column(u'compare_type', sa.String(36), nullable=False), sa.Column(u'key', sa.String(255), nullable=True), sa.Column(u'value', sa.String(255), nullable=False), sa.Column(u'invert', sa.Boolean(), default=False, nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'l7policy_id'], [u'l7policy.id'], name=u'fk_l7rule_l7policy_id'), sa.ForeignKeyConstraint([u'type'], [u'l7rule_type.name'], name=u'fk_l7rule_l7rule_type_name'), sa.ForeignKeyConstraint([u'compare_type'], [u'l7rule_compare_type.name'], name=u'fk_l7rule_l7rule_compare_type_name') ) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/46d914b2a5e5_seed_the_spares_pool_table.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/46d914b2a5e5_seed_the_spares_pool_tab0000664000175000017500000000236300000000000033460 0ustar00zuulzuul00000000000000# Copyright 2019 Michael Johnson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Seed the spares_pool table Revision ID: 46d914b2a5e5 Revises: 6ffc710674ef Create Date: 2019-04-03 14:03:25.596157 """ import datetime from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '46d914b2a5e5' down_revision = '6ffc710674ef' def upgrade(): # Create temporary table for table data seeding insert_table = sa.table( u'spares_pool', sa.column(u'updated_at', sa.DateTime), ) # Note: The date/time doesn't matter, we just need to seed the table. op.bulk_insert( insert_table, [ {'updated_at': datetime.datetime.now()} ] ) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_ampho0000664000175000017500000000233000000000000033317 0ustar00zuulzuul00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add new states for amphora Revision ID: 48660b6643f0 Revises: 3e5b37a0bdb9 Create Date: 2015-01-20 13:31:30.017959 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '48660b6643f0' down_revision = '3e5b37a0bdb9' def upgrade(): insert_table = sql.table( u'provisioning_status', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'READY'}, {'name': 'BOOTING'}, {'name': 'ALLOCATED'} ] ) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4a6ec0ab7284_remove_fk_constraints_on_listener_.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4a6ec0ab7284_remove_fk_constraints_on0000664000175000017500000000250100000000000033611 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove FK constraints on listener_statistics because it will be cross-DB Revision ID: 4a6ec0ab7284 Revises: 62816c232310 Create Date: 2016-07-05 14:09:16.320931 """ from alembic import op # revision identifiers, used by Alembic. revision = '4a6ec0ab7284' down_revision = '62816c232310' def upgrade(): # OpenStack has decided that "down" migrations are not supported. # The downgrade() method has been omitted for this reason. op.drop_constraint('fk_listener_statistics_listener_id', 'listener_statistics', type_='foreignkey') op.drop_constraint('fk_listener_statistic_amphora_id', 'listener_statistics', type_='foreignkey') ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4aeb9e23ad43_add_draining_operating_status.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4aeb9e23ad43_add_draining_operating_s0000664000175000017500000000207400000000000033573 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add DRAINING operating status Revision ID: 4aeb9e23ad43 Revises: e6672bda93bf Create Date: 2017-07-27 00:54:07.128617 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4aeb9e23ad43' down_revision = 'e6672bda93bf' def upgrade(): bind = op.get_bind() md = sa.MetaData() sa.Table('operating_status', md, autoload=True, autoload_with=bind) op.bulk_insert(md.tables['operating_status'], [{'name': 'DRAINING'}]) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4c094013699a_update_load_balancer_amphora.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4c094013699a_update_load_balancer_amp0000664000175000017500000000270300000000000033250 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. '''update load balancer amphora relationship Revision ID: 4c094013699a Revises: 35dee79d5865 Create Date: 2014-09-15 14:42:44.875448 ''' from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4c094013699a' down_revision = '35dee79d5865' def upgrade(): op.add_column( u'amphora', sa.Column(u'load_balancer_id', sa.String(36), sa.ForeignKey(u'load_balancer.id', name=u'fk_amphora_load_balancer_id'), nullable=True) ) op.drop_table(u'load_balancer_amphora') op.drop_constraint( u'fk_container_provisioning_status_name', u'amphora', type_=u'foreignkey' ) op.create_foreign_key( u'fk_amphora_provisioning_status_name', u'amphora', u'provisioning_status', [u'status'], [u'name'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4d9cf7d32f2_insert_headers.py0000664000175000017500000000167000000000000032074 0ustar00zuulzuul00000000000000# Copyright 2016 VMware # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Insert headers Revision ID: 4d9cf7d32f2 Revises: 9bf4d21caaea Create Date: 2016-02-21 17:16:22.316744 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4d9cf7d32f2' down_revision = '9bf4d21caaea' def upgrade(): op.add_column('listener', sa.Column('insert_headers', sa.PickleType())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4f65b4f91c39_amphora_add_flavor_id.py0000664000175000017500000000202100000000000033360 0ustar00zuulzuul00000000000000# Copyright (c) 2018 China Telecom Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """amphora add flavor id Revision ID: 4f65b4f91c39 Revises: 80dba23a159f Create Date: 2018-07-16 09:59:07.169894 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4f65b4f91c39' down_revision = '80dba23a159f' def upgrade(): op.add_column( u'amphora', sa.Column(u'compute_flavor', sa.String(255), nullable=True) ) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4faaa983e7a9_update_member_address_column.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4faaa983e7a9_update_member_address_co0000664000175000017500000000177200000000000033615 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update member address column Revision ID: 4faaa983e7a9 Revises: 13500e2e978d Create Date: 2014-09-29 11:22:16.565071 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4faaa983e7a9' down_revision = '13500e2e978d' def upgrade(): op.alter_column(u'member', u'address', new_column_name=u'ip_address', existing_type=sa.String(64)) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4fe8240425b4_update_vip_add_subnet_id.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/4fe8240425b4_update_vip_add_subnet_id0000664000175000017500000000206300000000000033363 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update vip add subnet id Revision ID: 4fe8240425b4 Revises: 48660b6643f0 Create Date: 2015-07-01 14:27:44.187179 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4fe8240425b4' down_revision = '48660b6643f0' def upgrade(): with op.batch_alter_table(u'vip') as batch_op: batch_op.alter_column(u'network_id', new_column_name=u'subnet_id', existing_type=sa.String(36)) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/52377704420e_add_timestamps_to_healthmonitor.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/52377704420e_add_timestamps_to_health0000664000175000017500000000331100000000000033241 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add timestamps and operating_status to healthmonitor Revision ID: 52377704420e Revises: d85ca7258d21 Create Date: 2017-04-13 08:58:18.078170 """ from alembic import op import sqlalchemy as sa from octavia.common import constants # revision identifiers, used by Alembic. revision = '52377704420e' down_revision = 'd85ca7258d21' def upgrade(): op.add_column( u'health_monitor', sa.Column(u'created_at', sa.DateTime(), nullable=True) ) op.add_column( u'health_monitor', sa.Column(u'updated_at', sa.DateTime(), nullable=True) ) op.add_column(u'health_monitor', sa.Column(u'operating_status', sa.String(16), nullable=False, server_default=constants.ONLINE) ) op.alter_column(u'health_monitor', u'operating_status', existing_type=sa.String(16), server_default=None) op.create_foreign_key( u'fk_health_monitor_operating_status_name', u'health_monitor', u'operating_status', [u'operating_status'], [u'name'] ) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/5309960964f8_add_proxy_protocol_for_pool.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/5309960964f8_add_proxy_protocol_for_p0000664000175000017500000000225400000000000033343 0ustar00zuulzuul00000000000000# Copyright 2017 EayunStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add proxy protocol for pool Revision ID: 5309960964f8 Revises: 52377704420e Create Date: 2017-04-27 01:13:38.064697 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '5309960964f8' down_revision = '52377704420e' new_protocol = 'PROXY' def upgrade(): insert_table = sql.table( u'protocol', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': new_protocol} ] ) ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/543f5d8e4e56_add_a_column_busy_in_table_amphora_health.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/543f5d8e4e56_add_a_column_busy_in_tab0000664000175000017500000000202100000000000033432 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add a column busy in table amphora health Revision ID: 543f5d8e4e56 Revises: 2351ea316465 Create Date: 2015-07-27 11:32:16.685383 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '543f5d8e4e56' down_revision = '2351ea316465' def upgrade(): op.add_column(u'amphora_health', sa.Column(u'busy', sa.Boolean(), nullable=False)) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redi0000664000175000017500000000237500000000000033452 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add l7policy action redirect prefix Revision ID: 55874a4ceed6 Revises: 76aacf2e176c Create Date: 2018-09-09 20:35:38.780054 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '55874a4ceed6' down_revision = '76aacf2e176c' def upgrade(): # Add collumn redirect_prefix op.add_column( u'l7policy', sa.Column(u'redirect_prefix', sa.String(255), nullable=True) ) insert_table = sql.table( u'l7policy_action', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'REDIRECT_PREFIX'} ] ) ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__inf0000664000175000017500000000233700000000000033443 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add cert expiration infor in amphora table Revision ID: 5a3ee5472c31 Revises: 3b199c848b96 Create Date: 2015-08-20 10:15:19.561066 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '5a3ee5472c31' down_revision = '3b199c848b96' def upgrade(): op.add_column(u'amphora', sa.Column(u'cert_expiration', sa.DateTime(timezone=True), nullable=True) ) op.add_column(u'amphora', sa.Column(u'cert_busy', sa.Boolean(), nullable=False, default=False)) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/62816c232310_fix_migration_for_mysql_5_7.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/62816c232310_fix_migration_for_mysql_0000664000175000017500000000173500000000000033304 0ustar00zuulzuul00000000000000# Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix migration for MySQL 5.7 Revision ID: 62816c232310 Revises: 36b94648fef8 Create Date: 2016-06-07 12:59:21.059619 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '62816c232310' down_revision = '36b94648fef8' def upgrade(): op.alter_column(u'sni', u'tls_container_id', type_=sa.String(128), existing_type=sa.String(36), nullable=False) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_ht0000664000175000017500000000173400000000000033363 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add L7policy Redirect http code Revision ID: 6742ca1b27c2 Revises: a7f187cd221f Create Date: 2018-12-13 09:35:38.780054 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '6742ca1b27c2' down_revision = 'a7f187cd221f' def upgrade(): # Add collumn redirect_prefix op.add_column( u'l7policy', sa.Column(u'redirect_http_code', sa.Integer(), nullable=True) ) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/6abb04f24c5_tenant_id_to_project_id.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/6abb04f24c5_tenant_id_to_project_id.p0000664000175000017500000000266300000000000033541 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tenant id to project id Revision ID: 6abb04f24c5 Revises: 5a3ee5472c31 Create Date: 2015-12-03 15:22:25.390595 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '6abb04f24c5' down_revision = '1e4c1d83044c' def upgrade(): op.alter_column('load_balancer', 'tenant_id', new_column_name='project_id', existing_type=sa.String(36)) op.alter_column('listener', 'tenant_id', new_column_name='project_id', existing_type=sa.String(36)) op.alter_column('pool', 'tenant_id', new_column_name='project_id', existing_type=sa.String(36)) op.alter_column('member', 'tenant_id', new_column_name='project_id', existing_type=sa.String(36)) op.add_column('health_monitor', sa.Column('project_id', sa.String(36))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/6ffc710674ef_spares_pool_table.py0000664000175000017500000000205000000000000032652 0ustar00zuulzuul00000000000000# Copyright 2019 Michael Johnson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Spares pool table Revision ID: 6ffc710674ef Revises: 7432f1d4ea83 Create Date: 2019-03-11 10:45:43.296236 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '6ffc710674ef' down_revision = '7432f1d4ea83' def upgrade(): op.create_table( u'spares_pool', sa.Column(u'updated_at', sa.DateTime(), primary_key=True, server_default=sa.func.current_timestamp())) ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/7432f1d4ea83_add_http_host_head_inject_for_http_health_check.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/7432f1d4ea83_add_http_host_head_injec0000664000175000017500000000206100000000000033414 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add l7policy action redirect prefix Revision ID: 7432f1d4ea83 Revises: 6742ca1b27c2 Create Date: 2018-09-09 20:35:38.780054 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '7432f1d4ea83' down_revision = '6742ca1b27c2' def upgrade(): op.add_column( u'health_monitor', sa.Column(u'http_version', sa.Float(), nullable=True) ) op.add_column( u'health_monitor', sa.Column(u'domain_name', sa.String(255), nullable=True) ) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/74aae261694c_extend_pool_for_backend_ca_and_crl.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/74aae261694c_extend_pool_for_backend_0000664000175000017500000000224500000000000033440 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace US Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """extend pool for backend CA and CRL Revision ID: 74aae261694c Revises: a1f689aecc1d Create Date: 2019-02-27 09:22:24.779576 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '74aae261694c' down_revision = 'a1f689aecc1d' def upgrade(): op.add_column(u'pool', sa.Column(u'ca_tls_certificate_id', sa.String(255), nullable=True)) op.add_column(u'pool', sa.Column(u'crl_container_id', sa.String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_proto0000664000175000017500000000356100000000000033763 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Extend some necessary fields for udp support Revision ID: 76aacf2e176c Revises: ebbcc72b4e5e Create Date: 2018-01-01 20:47:52.405865 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '76aacf2e176c' down_revision = 'ebbcc72b4e5e' tables = [u'protocol', u'health_monitor_type'] new_fields = ['UDP', 'UDP-CONNECT'] def upgrade(): # New UDP protocol addition. # New UDP_CONNNECT healthmonitor type addition. for table, new_field in zip(tables, new_fields): insert_table = sql.table( table, sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': new_field} ] ) # Two new columns add to session_persistence table op.add_column('session_persistence', sa.Column('persistence_timeout', sa.Integer(), nullable=True, server_default=None)) op.add_column('session_persistence', sa.Column('persistence_granularity', sa.String(length=64), nullable=True, server_default=None)) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/7c36b277bfb0_add_listener_ciphers_column.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/7c36b277bfb0_add_listener_ciphers_col0000664000175000017500000000176100000000000033533 0ustar00zuulzuul00000000000000# Copyright 2020 Dawson Coleman # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add listener ciphers column Revision ID: 7c36b277bfb0 Revises: 8ac4ed24df3a Create Date: 2020-03-11 02:23:49.097485 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '7c36b277bfb0' down_revision = '8ac4ed24df3a' def upgrade(): op.add_column( 'listener', sa.Column('tls_ciphers', sa.String(2048), nullable=True) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/80dba23a159f_tags_support.py0000664000175000017500000000215000000000000031671 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """tags_support Revision ID: 80dba23a159f Revises: 55874a4ceed6 Create Date: 2018-10-15 15:29:27.258640 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '80dba23a159f' down_revision = '55874a4ceed6' def upgrade(): op.create_table( u'tags', sa.Column(u'resource_id', sa.String(36), primary_key=True, nullable=False), sa.Column(u'tag', sa.String(255), primary_key=True, nullable=False, index=True), ) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/82b9402e71fd_update_vip_address_size.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/82b9402e71fd_update_vip_address_size.0000664000175000017500000000172600000000000033425 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Update vip address size Revision ID: 82b9402e71fd Revises: 62816c232310 Create Date: 2016-07-17 14:36:36.698870 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '82b9402e71fd' down_revision = '4a6ec0ab7284' def upgrade(): op.alter_column(u'vip', u'ip_address', existing_type=sa.String(64)) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to0000664000175000017500000000231300000000000033673 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add availability_zone to lb Revision ID: 8ac4ed24df3a Revises: c761c8a71579 Create Date: 2019-11-13 08:37:39.392163 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8ac4ed24df3a' down_revision = 'c761c8a71579' def upgrade(): op.add_column(u'load_balancer', sa.Column(u'availability_zone', sa.String(255), nullable=True) ) op.create_foreign_key( u'fk_load_balancer_availability_zone_name', u'load_balancer', u'availability_zone', [u'availability_zone'], [u'name'] ) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/8c0851bdf6c3_change_tls_container_id_length_in_sni_.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/8c0851bdf6c3_change_tls_container_id_0000664000175000017500000000202300000000000033502 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """change_tls_container_id_length_in_sni_table Revision ID: 8c0851bdf6c3 Revises: 186509101b9b Create Date: 2016-03-23 19:08:53.148812 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8c0851bdf6c3' down_revision = '186509101b9b' def upgrade(): op.alter_column(u'sni', u'tls_container_id', type_=sa.String(128), existing_type=sa.String(36), nullable=False) ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/92fe9857279_create_healthmanager_table.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/92fe9857279_create_healthmanager_tabl0000664000175000017500000000222300000000000033371 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create healthmanager table Revision ID: 92fe9857279 Revises: 256852d5ff7c Create Date: 2015-01-22 16:58:23.440247 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '92fe9857279' down_revision = '256852d5ff7c' def upgrade(): op.create_table( u'amphora_health', sa.Column(u'amphora_id', sa.String(36), nullable=False, primary_key=True), sa.Column(u'last_update', sa.DateTime(timezone=True), nullable=False) ) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/9b5473976d6d_add_provisioning_status_to_objects.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/9b5473976d6d_add_provisioning_status_0000664000175000017500000000513200000000000033503 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add provisioning_status to objects Revision ID: 9b5473976d6d Revises: 82b9402e71fd Create Date: 2016-09-20 21:46:26.843695 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '9b5473976d6d' down_revision = '82b9402e71fd' def upgrade(): op.add_column('health_monitor', sa.Column('provisioning_status', sa.String(16), nullable=True) ) op.create_foreign_key( u'fk_health_monitor_provisioning_status_name', u'health_monitor', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.add_column('l7policy', sa.Column('provisioning_status', sa.String(16), nullable=True) ) op.create_foreign_key( u'fk_l7policy_provisioning_status_name', u'l7policy', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.add_column('l7rule', sa.Column('provisioning_status', sa.String(16), nullable=True) ) op.create_foreign_key( u'fk_l7rule_provisioning_status_name', u'l7rule', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.add_column('member', sa.Column('provisioning_status', sa.String(16), nullable=True) ) op.create_foreign_key( u'fk_member_provisioning_status_name', u'member', u'provisioning_status', [u'provisioning_status'], [u'name'] ) op.add_column('pool', sa.Column('provisioning_status', sa.String(16), nullable=True) ) op.create_foreign_key( u'fk_pool_provisioning_status_name', u'pool', u'provisioning_status', [u'provisioning_status'], [u'name'] ) ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/9bf4d21caaea_adding_amphora_id_to_listener_.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/9bf4d21caaea_adding_amphora_id_to_lis0000664000175000017500000000364600000000000033722 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adding Amphora ID to listener_statistics table Revision ID: 9bf4d21caaea Revises: 8c0851bdf6c3 Create Date: 2016-05-02 07:50:12.888263 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '9bf4d21caaea' down_revision = '8c0851bdf6c3' def upgrade(): op.add_column('listener_statistics', sa.Column('amphora_id', sa.String(36), nullable=False) ) op.drop_constraint('fk_listener_statistics_listener_id', 'listener_statistics', type_='foreignkey') op.drop_constraint('PRIMARY', 'listener_statistics', type_='primary') op.create_primary_key('pk_listener_statistics', 'listener_statistics', ['listener_id', 'amphora_id']) op.create_foreign_key('fk_listener_statistics_listener_id', 'listener_statistics', 'listener', ['listener_id'], ['id']) op.create_foreign_key('fk_listener_statistic_amphora_id', 'listener_statistics', 'amphora', ['amphora_id'], ['id']) ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/a1f689aecc1d_extend_pool_for_support_backend_reencryption.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/a1f689aecc1d_extend_pool_for_support_0000664000175000017500000000201600000000000034000 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Extend pool for support backend re-encryption Revision ID: a1f689aecc1d Revises: 1afc932f1ca2 Create Date: 2018-10-23 20:47:52.405865 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'a1f689aecc1d' down_revision = '1afc932f1ca2' def upgrade(): op.add_column(u'pool', sa.Column(u'tls_certificate_id', sa.String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/a5762a99609a_add_protocol_in_listener_keys.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/a5762a99609a_add_protocol_in_listener0000664000175000017500000000221000000000000033427 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add protocol in listener keys Revision ID: a5762a99609a Revises: 392fb85b4419 Create Date: 2019-06-28 14:02:11.415292 """ from alembic import op # revision identifiers, used by Alembic. revision = 'a5762a99609a' down_revision = '392fb85b4419' def upgrade(): op.execute("ALTER TABLE `listener` " "DROP INDEX `uq_listener_load_balancer_id_protocol_port`, " "ADD UNIQUE KEY " "`uq_listener_load_balancer_id_protocol_port` " "(`load_balancer_id`, `protocol`, `protocol_port`)") ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/a7f187cd221f_add_tls_boolean_type_for_reencryption.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/a7f187cd221f_add_tls_boolean_type_for0000664000175000017500000000212600000000000033541 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add tls boolean type for backend re-encryption Revision ID: a7f187cd221f Revises: 74aae261694c Create Date: 2018-11-01 20:47:52.405865 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'a7f187cd221f' down_revision = '74aae261694c' def upgrade(): op.add_column(u'pool', sa.Column(u'tls_enabled', sa.Boolean(), server_default=sa.sql.expression.false(), nullable=False)) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/b9c703669314_add_flavor_and_flavor_profile_table.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/b9c703669314_add_flavor_and_flavor_pr0000664000175000017500000000360500000000000033306 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add flavor and flavor_profile table Revision ID: b9c703669314 Revises: 4f65b4f91c39 Create Date: 2018-01-02 16:05:29.745457 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b9c703669314' down_revision = '4f65b4f91c39' def upgrade(): op.create_table( u'flavor_profile', sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=False), sa.Column(u'provider_name', sa.String(255), nullable=False), sa.Column(u'flavor_data', sa.String(4096), nullable=False), sa.PrimaryKeyConstraint(u'id')) op.create_table( u'flavor', sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=False), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.Column(u'flavor_profile_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint([u'flavor_profile_id'], [u'flavor_profile.id'], name=u'fk_flavor_flavor_profile_id'), sa.PrimaryKeyConstraint(u'id'), sa.UniqueConstraint(u'name', name=u'uq_flavor_name'),) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/ba35e0fb88e1_add_backup_field_to_member.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/ba35e0fb88e1_add_backup_field_to_memb0000664000175000017500000000200300000000000033513 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add backup field to member Revision ID: ba35e0fb88e1 Revises: 034756a182a2 Create Date: 2018-03-14 00:46:16.281857 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'ba35e0fb88e1' down_revision = '034756a182a2' def upgrade(): op.add_column(u'member', sa.Column(u'backup', sa.Boolean(), nullable=False, default=False)) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/bf171d0d91c3_amphora_add_cached_zone.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/bf171d0d91c3_amphora_add_cached_zone.0000664000175000017500000000177300000000000033360 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add cached_zone to amphora Revision ID: bf171d0d91c3 Revises: 4aeb9e23ad43 Create Date: 2017-10-06 12:07:34.290451 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'bf171d0d91c3' down_revision = '4aeb9e23ad43' def upgrade(): op.add_column(u'amphora', sa.Column(u'cached_zone', sa.String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/c11292016060_add_request_errors_for_stats.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/c11292016060_add_request_errors_for_s0000664000175000017500000000206200000000000033263 0ustar00zuulzuul00000000000000# Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adding request error number to listener_statistics table Revision ID: c11292016060 Revises: 9b5473976d6d Create Date: 2016-08-12 03:37:38.656962 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'c11292016060' down_revision = '9b5473976d6d' def upgrade(): op.add_column('listener_statistics', sa.Column('request_errors', sa.BigInteger(), nullable=False, default=0)) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/c761c8a71579_add_availability_zone_table.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/c761c8a71579_add_availability_zone_ta0000664000175000017500000000461200000000000033376 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add availability_zone table Revision ID: c761c8a71579 Revises: e37941b010db Create Date: 2019-11-11 18:53:15.428386 """ from alembic import op import sqlalchemy as sa from octavia.common import constants # revision identifiers, used by Alembic. revision = 'c761c8a71579' down_revision = 'e37941b010db' def upgrade(): azp_table = op.create_table( u'availability_zone_profile', sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=False), sa.Column(u'provider_name', sa.String(255), nullable=False), sa.Column(u'availability_zone_data', sa.String(4096), nullable=False), sa.PrimaryKeyConstraint(u'id')) op.bulk_insert( azp_table, [ {'id': constants.NIL_UUID, 'name': 'DELETED-PLACEHOLDER', 'provider_name': 'DELETED', 'availability_zone_data': '{}'}, ] ) az_table = op.create_table( u'availability_zone', sa.Column(u'name', sa.String(255), nullable=False), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.Column(u'availability_zone_profile_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint([u'availability_zone_profile_id'], [u'availability_zone_profile.id'], name=u'fk_az_az_profile_id'), sa.PrimaryKeyConstraint(u'name'),) op.bulk_insert( az_table, [ {'name': constants.NIL_UUID, 'description': 'Placeholder for DELETED LBs with DELETED ' 'availability zones', 'enabled': False, 'availability_zone_profile_id': constants.NIL_UUID} ] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/d85ca7258d21_modernize_l7rule.py0000664000175000017500000000375000000000000032364 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """modernize l7rule Revision ID: d85ca7258d21 Revises: 034b2dc2f3e0 Create Date: 2017-04-04 06:26:55.287198 """ from alembic import op import sqlalchemy as sa from octavia.common import constants # revision identifiers, used by Alembic. revision = 'd85ca7258d21' down_revision = '034b2dc2f3e0' def upgrade(): # Add timing data op.add_column( u'l7rule', sa.Column(u'created_at', sa.DateTime(), nullable=True) ) op.add_column( u'l7rule', sa.Column(u'updated_at', sa.DateTime(), nullable=True) ) # Add project_id op.add_column( u'l7rule', sa.Column(u'project_id', sa.String(36), nullable=True) ) # Add enabled op.add_column( u'l7rule', sa.Column(u'enabled', sa.Boolean(), nullable=False) ) # Add new operating_status column, setting existing rows to ONLINE op.add_column( u'l7rule', sa.Column(u'operating_status', sa.String(16), nullable=False, server_default=constants.ONLINE) ) # Remove the default, as we don't actually want one op.alter_column(u'l7rule', u'operating_status', existing_type=sa.String(16), server_default=None) # Add the foreign key for operating_status_name op.create_foreign_key( u'fk_l7rule_operating_status_name', u'l7rule', u'operating_status', [u'operating_status'], [u'name'] ) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/da371b422669_allowed_cidr_for_listeners.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/da371b422669_allowed_cidr_for_listene0000664000175000017500000000242400000000000033402 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add CIDRs for listeners Revision ID: da371b422669 Revises: a5762a99609a Create Date: 2018-11-22 12:31:39.864238 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'da371b422669' down_revision = 'a5762a99609a' def upgrade(): op.create_table( u'listener_cidr', sa.Column(u'listener_id', sa.String(36), nullable=False), sa.Column(u'cidr', sa.String(64), nullable=False), sa.ForeignKeyConstraint([u'listener_id'], [u'listener.id'], name=u'fk_listener_cidr_listener_id'), sa.PrimaryKeyConstraint(u'listener_id', u'cidr') ) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/dcf88e59aae4_add_lb_algorithm_source_ip_port.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/dcf88e59aae4_add_lb_algorithm_source_0000664000175000017500000000356500000000000033676 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add LB_ALGORITHM_SOURCE_IP_PORT Revision ID: dcf88e59aae4 Revises: da371b422669 Create Date: 2019-07-23 12:50:49.722003 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'dcf88e59aae4' down_revision = 'da371b422669' def migrate_pools(): conn = op.get_bind() lb_table = sa.sql.table( 'load_balancer', sa.sql.column('id', sa.String), sa.sql.column('provider', sa.String), sa.sql.column('provisioning_status', sa.String)) pool_table = sa.sql.table( 'pool', sa.sql.column('id', sa.String), sa.sql.column('load_balancer_id', sa.String), sa.sql.column('lb_algorithm', sa.String)) j = pool_table.join(lb_table, pool_table.c.load_balancer_id == lb_table.c.id) stmt = sa.select([pool_table.c.id]).select_from(j).where( lb_table.c.provider == 'ovn') result = conn.execute(stmt) for row in result: stmt = pool_table.update().values(lb_algorithm='SOURCE_IP_PORT').where( pool_table.c.id == row[0]) op.execute(stmt) def upgrade(): insert_table = sa.table( u'algorithm', sa.column(u'name', sa.String(255)), ) op.bulk_insert( insert_table, [ {'name': 'SOURCE_IP_PORT'} ] ) migrate_pools() ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/e37941b010db_add_lb_flavor_constraint.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/e37941b010db_add_lb_flavor_constraint0000664000175000017500000000523300000000000033453 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add lb flavor ID constraint Revision ID: e37941b010db Revises: dcf88e59aae4 Create Date: 2019-10-31 10:09:37.869653 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql from octavia.common import constants # revision identifiers, used by Alembic. revision = 'e37941b010db' down_revision = 'dcf88e59aae4' def upgrade(): insert_table = sql.table( u'flavor_profile', sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=False), sa.Column(u'provider_name', sa.String(255), nullable=False), sa.Column(u'flavor_data', sa.String(4096), nullable=False), ) op.bulk_insert( insert_table, [ {'id': constants.NIL_UUID, 'name': 'DELETED-PLACEHOLDER', 'provider_name': 'DELETED', 'flavor_data': '{}'}, ] ) insert_table = sql.table( u'flavor', sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=False), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'enabled', sa.Boolean(), nullable=False), sa.Column(u'flavor_profile_id', sa.String(36), nullable=False), ) op.bulk_insert( insert_table, [ {'id': constants.NIL_UUID, 'name': 'DELETED-PLACEHOLDER', 'description': 'Placeholder for DELETED LBs with DELETED flavors', 'enabled': False, 'flavor_profile_id': constants.NIL_UUID} ] ) # Make sure any existing load balancers with invalid flavor_id # map to a valid flavor. # Note: constant is not used here to not trigger security tool errors. op.execute("UPDATE load_balancer LEFT JOIN flavor ON " "load_balancer.flavor_id = flavor.id SET " "load_balancer.flavor_id = " "'00000000-0000-0000-0000-000000000000' WHERE " "flavor.id IS NULL and load_balancer.flavor_id IS NOT NULL") op.create_foreign_key('fk_loadbalancer_flavor_id', 'load_balancer', 'flavor', ['flavor_id'], ['id']) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/e6672bda93bf_add_ping_and_tlshello_monitor_types.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/e6672bda93bf_add_ping_and_tlshello_mo0000664000175000017500000000227700000000000033605 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add ping and tls-hello monitor types Revision ID: e6672bda93bf Revises: 27e54d00c3cd Create Date: 2017-06-21 16:13:09.615651 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = 'e6672bda93bf' down_revision = '27e54d00c3cd' def upgrade(): insert_table = sql.table( u'health_monitor_type', sql.column(u'name', sa.String), sql.column(u'description', sa.String) ) op.bulk_insert( insert_table, [ {'name': 'PING'}, {'name': 'TLS-HELLO'} ] ) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/ebbcc72b4e5e_add_octavia_owned_vip_column_to_vip_.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/ebbcc72b4e5e_add_octavia_owned_vip_co0000664000175000017500000000177600000000000033741 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add Octavia owned VIP column to VIP table Revision ID: ebbcc72b4e5e Revises: 0f242cf02c74 Create Date: 2018-07-09 17:25:30.137527 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'ebbcc72b4e5e' down_revision = '0f242cf02c74' def upgrade(): op.add_column( u'vip', sa.Column(u'octavia_owned', sa.Boolean(), nullable=True) ) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/f21ae3f21adc_add_client_auth_option.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/f21ae3f21adc_add_client_auth_option.p0000664000175000017500000000324000000000000033577 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add Client Auth options Revision ID: f21ae3f21adc Revises: 2ad093f6353f Create Date: 2018-10-01 20:47:52.405865 """ from alembic import op import sqlalchemy as sa from octavia.common import constants # revision identifiers, used by Alembic. revision = 'f21ae3f21adc' down_revision = '2ad093f6353f' def upgrade(): op.create_table( u'client_authentication_mode', sa.Column(u'name', sa.String(10), primary_key=True), ) # Create temporary table for table data seeding insert_table = sa.table( u'client_authentication_mode', sa.column(u'name', sa.String), ) op.bulk_insert( insert_table, [ {'name': constants.CLIENT_AUTH_NONE}, {'name': constants.CLIENT_AUTH_OPTIONAL}, {'name': constants.CLIENT_AUTH_MANDATORY} ] ) op.add_column( u'listener', sa.Column(u'client_authentication', sa.String(10), sa.ForeignKey('client_authentication_mode.name'), server_default=constants.CLIENT_AUTH_NONE, nullable=False) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/fac584114642_.py0000664000175000017500000000331400000000000027064 0ustar00zuulzuul00000000000000# Copyright 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add ID column to Healthmonitor table Revision ID: fac584114642 Revises: fc5582da7d8a Create Date: 2017-02-07 20:47:52.405865 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'fac584114642' down_revision = 'fc5582da7d8a' def upgrade(): op.add_column('health_monitor', sa.Column('id', sa.String(length=36), nullable=True, )) op.drop_constraint('fk_health_monitor_pool_id', 'health_monitor', type_='foreignkey',) op.execute("UPDATE health_monitor SET id = pool_id") op.execute("ALTER TABLE health_monitor MODIFY id varchar(36) NOT NULL") op.execute("ALTER TABLE health_monitor DROP PRIMARY KEY," "ADD PRIMARY KEY(id);") op.create_foreign_key('fk_health_monitor_pool_id', 'health_monitor', 'pool', ['pool_id'], ['id']) op.create_index('uq_health_monitor_pool', 'health_monitor', ['pool_id'], unique=True) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/fbd705961c3a_add_pool_ciphers_column.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/fbd705961c3a_add_pool_ciphers_column.0000664000175000017500000000175100000000000033454 0ustar00zuulzuul00000000000000# Copyright 2020 Dawson Coleman # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add pool ciphers column Revision ID: fbd705961c3a Revises: 7c36b277bfb0 Create Date: 2020-03-31 14:19:25.280946 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'fbd705961c3a' down_revision = '7c36b277bfb0' def upgrade(): op.add_column( 'pool', sa.Column('tls_ciphers', sa.String(2048), nullable=True) ) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/fc5582da7d8a_create_amphora_build_rate_limit_tables.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/fc5582da7d8a_create_amphora_build_rat0000664000175000017500000000347500000000000033616 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create_amphora_build_rate_limit_tables Revision ID: fc5582da7d8a Revises: 443fe6676637 Create Date: 2016-04-07 19:42:28.171902 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = 'fc5582da7d8a' down_revision = '443fe6676637' def upgrade(): op.create_table( u'amphora_build_slots', sa.Column(u'id', sa.Integer(), primary_key=True), sa.Column(u'slots_used', sa.Integer(), default=0) ) # Create temporary table for table data seeding insert_table = sql.table( u'amphora_build_slots', sql.column(u'id', sa.Integer), sql.column(u'slots_used', sa.Integer) ) op.bulk_insert( insert_table, [ {'id': 1, 'slots_used': 0} ] ) op.create_table( u'amphora_build_request', sa.Column(u'amphora_id', sa.String(36), nullable=True, primary_key=True), sa.Column(u'priority', sa.Integer()), sa.Column(u'created_time', sa.DateTime(timezone=True), nullable=False), sa.Column(u'status', sa.String(16), default='WAITING', nullable=False) ) def downgrade(): pass ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/ffad172e98c1_add_certificate_revoke_list_option.py 22 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/alembic_migrations/versions/ffad172e98c1_add_certificate_revoke_l0000664000175000017500000000203700000000000033567 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add certificate revoke revocation list field Revision ID: ffad172e98c1 Revises: f21ae3f21adc Create Date: 2018-10-01 20:47:52.405865 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'ffad172e98c1' down_revision = 'f21ae3f21adc' def upgrade(): op.add_column(u'listener', sa.Column(u'client_crl_container_id', sa.String(255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/migration/cli.py0000664000175000017500000001217600000000000020326 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from alembic import command as alembic_cmd from alembic import config as alembic_cfg from alembic import util as alembic_u from oslo_config import cfg from oslo_db import options from oslo_log import log from octavia.controller.worker.v2 import taskflow_jobboard_driver from octavia.i18n import _ CONF = cfg.CONF options.set_defaults(CONF) # Setting explicitly here needed for taskflow persistence successful # initialization options.set_defaults(CONF, max_pool_size=10, max_overflow=20, pool_timeout=10) log.set_defaults() log.register_options(CONF) log.setup(CONF, 'octavia-db-manage') def do_alembic_command(config, cmd, *args, **kwargs): try: getattr(alembic_cmd, cmd)(config, *args, **kwargs) except alembic_u.CommandError as e: alembic_u.err(str(e)) def do_check_migration(config, _cmd): do_alembic_command(config, 'branches') def add_alembic_subparser(sub, cmd): return sub.add_parser(cmd, help=getattr(alembic_cmd, cmd).__doc__) def do_upgrade(config, cmd): if not CONF.command.revision and not CONF.command.delta: raise SystemExit(_('You must provide a revision or relative delta')) revision = CONF.command.revision or '' if '-' in revision: raise SystemExit(_('Negative relative revision (downgrade) not ' 'supported')) delta = CONF.command.delta if delta: if '+' in revision: raise SystemExit(_('Use either --delta or relative revision, ' 'not both')) if delta < 0: raise SystemExit(_('Negative delta (downgrade) not supported')) revision = '%s+%d' % (revision, delta) do_alembic_command(config, cmd, revision, sql=CONF.command.sql) def no_downgrade(config, cmd): raise SystemExit(_("Downgrade no longer supported")) def do_stamp(config, cmd): do_alembic_command(config, cmd, CONF.command.revision, sql=CONF.command.sql) def do_revision(config, cmd): do_alembic_command(config, cmd, message=CONF.command.message, autogenerate=CONF.command.autogenerate, sql=CONF.command.sql) def do_persistence_upgrade(config, cmd): opt = cfg.StrOpt('persistence_connection', default='sqlite://') cfg.CONF.register_opts([opt], group='task_flow') persistence = taskflow_jobboard_driver.MysqlPersistenceDriver() persistence.initialize() def add_command_parsers(subparsers): for name in ['current', 'history', 'branches']: parser = add_alembic_subparser(subparsers, name) parser.set_defaults(func=do_alembic_command) help_text = (getattr(alembic_cmd, 'branches').__doc__ + ' and validate head file') parser = subparsers.add_parser('check_migration', help=help_text) parser.set_defaults(func=do_check_migration) parser = add_alembic_subparser(subparsers, 'upgrade') parser.add_argument('--delta', type=int) parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') parser.set_defaults(func=do_upgrade) parser = subparsers.add_parser( "upgrade_persistence", help="Run migrations for persistence backend") parser.set_defaults(func=do_persistence_upgrade) parser = subparsers.add_parser('downgrade', help="(No longer supported)") parser.add_argument('None', nargs='?', help="Downgrade not supported") parser.set_defaults(func=no_downgrade) parser = add_alembic_subparser(subparsers, 'stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision') parser.set_defaults(func=do_stamp) parser = add_alembic_subparser(subparsers, 'revision') parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.add_argument('--sql', action='store_true') parser.set_defaults(func=do_revision) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) CONF.register_cli_opt(command_opt) def main(): config = alembic_cfg.Config( os.path.join(os.path.dirname(__file__), 'alembic.ini') ) config.set_main_option('script_location', 'octavia.db.migration:alembic_migrations') # attach the octavia conf to the Alembic conf config.octavia_config = CONF CONF(project='octavia') CONF.command.func(config, CONF.command.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/models.py0000664000175000017500000007360100000000000017051 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import models import sqlalchemy as sa from sqlalchemy.ext import orderinglist from sqlalchemy import orm from sqlalchemy.orm import validates from sqlalchemy.sql import func from octavia.api.v2.types import amphora from octavia.api.v2.types import availability_zone_profile from octavia.api.v2.types import availability_zones from octavia.api.v2.types import flavor_profile from octavia.api.v2.types import flavors from octavia.api.v2.types import health_monitor from octavia.api.v2.types import l7policy from octavia.api.v2.types import l7rule from octavia.api.v2.types import listener from octavia.api.v2.types import load_balancer from octavia.api.v2.types import member from octavia.api.v2.types import pool from octavia.api.v2.types import quotas from octavia.common import constants from octavia.common import data_models from octavia.db import base_models from octavia.i18n import _ class ProvisioningStatus(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "provisioning_status" class OperatingStatus(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "operating_status" class Protocol(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "protocol" class Algorithm(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "algorithm" class AmphoraRoles(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "amphora_roles" class LBTopology(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "lb_topology" class SessionPersistenceType(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "session_persistence_type" class HealthMonitorType(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "health_monitor_type" class VRRPAuthMethod(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "vrrp_auth_method" class L7RuleType(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "l7rule_type" class L7RuleCompareType(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "l7rule_compare_type" class L7PolicyAction(base_models.BASE, base_models.LookupTableMixin): __tablename__ = "l7policy_action" class AmphoraBuildSlots(base_models.BASE): __tablename__ = "amphora_build_slots" id = sa.Column(sa.Integer(), primary_key=True) slots_used = sa.Column(sa.Integer()) class AmphoraBuildRequest(base_models.BASE): __tablename__ = "amphora_build_request" amphora_id = sa.Column(sa.String(36), nullable=True, primary_key=True) priority = sa.Column(sa.Integer()) created_time = sa.Column(sa.DateTime, default=func.now(), nullable=False) status = sa.Column(sa.String(16), default='WAITING', nullable=False) class SessionPersistence(base_models.BASE): __data_model__ = data_models.SessionPersistence __tablename__ = "session_persistence" pool_id = sa.Column( sa.String(36), sa.ForeignKey("pool.id", name="fk_session_persistence_pool_id"), nullable=False, primary_key=True) type = sa.Column( sa.String(36), sa.ForeignKey( "session_persistence_type.name", name="fk_session_persistence_session_persistence_type_name"), nullable=False) cookie_name = sa.Column(sa.String(255), nullable=True) persistence_timeout = sa.Column(sa.Integer(), nullable=True) persistence_granularity = sa.Column(sa.String(64), nullable=True) pool = orm.relationship("Pool", uselist=False, back_populates="session_persistence") class ListenerStatistics(base_models.BASE): __data_model__ = data_models.ListenerStatistics __tablename__ = "listener_statistics" listener_id = sa.Column( sa.String(36), primary_key=True, nullable=False) amphora_id = sa.Column( sa.String(36), primary_key=True, nullable=False) bytes_in = sa.Column(sa.BigInteger, nullable=False) bytes_out = sa.Column(sa.BigInteger, nullable=False) active_connections = sa.Column(sa.Integer, nullable=False) total_connections = sa.Column(sa.BigInteger, nullable=False) request_errors = sa.Column(sa.BigInteger, nullable=False) @validates('bytes_in', 'bytes_out', 'active_connections', 'total_connections', 'request_errors') def validate_non_negative_int(self, key, value): if value < 0: data = {'key': key, 'value': value} raise ValueError(_('The %(key)s field can not have ' 'negative value. ' 'Current value is %(value)d.') % data) return value class Member(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, models.TimestampMixin, base_models.NameMixin, base_models.TagMixin): __data_model__ = data_models.Member __tablename__ = "member" __v2_wsme__ = member.MemberResponse __table_args__ = ( sa.UniqueConstraint('pool_id', 'ip_address', 'protocol_port', name='uq_member_pool_id_address_protocol_port'), ) pool_id = sa.Column( sa.String(36), sa.ForeignKey("pool.id", name="fk_member_pool_id"), nullable=False) subnet_id = sa.Column(sa.String(36), nullable=True) ip_address = sa.Column('ip_address', sa.String(64), nullable=False) protocol_port = sa.Column(sa.Integer, nullable=False) weight = sa.Column(sa.Integer, nullable=True) backup = sa.Column(sa.Boolean(), nullable=False) monitor_address = sa.Column(sa.String(64), nullable=True) monitor_port = sa.Column(sa.Integer, nullable=True) provisioning_status = sa.Column( sa.String(16), sa.ForeignKey("provisioning_status.name", name="fk_member_provisioning_status_name"), nullable=False) operating_status = sa.Column( sa.String(16), sa.ForeignKey("operating_status.name", name="fk_member_operating_status_name"), nullable=False) enabled = sa.Column(sa.Boolean(), nullable=False) pool = orm.relationship("Pool", back_populates="members") _tags = orm.relationship( 'Tags', single_parent=True, lazy='subquery', cascade='all,delete-orphan', primaryjoin='and_(foreign(Tags.resource_id)==Member.id)' ) class HealthMonitor(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, models.TimestampMixin, base_models.NameMixin, base_models.TagMixin): __data_model__ = data_models.HealthMonitor __tablename__ = "health_monitor" __v2_wsme__ = health_monitor.HealthMonitorResponse __table_args__ = ( sa.UniqueConstraint('pool_id', name='uq_health_monitor_pool'), ) type = sa.Column( sa.String(36), sa.ForeignKey("health_monitor_type.name", name="fk_health_monitor_health_monitor_type_name"), nullable=False) pool_id = sa.Column( sa.String(36), sa.ForeignKey("pool.id", name="fk_health_monitor_pool_id"), nullable=False) delay = sa.Column(sa.Integer, nullable=False) timeout = sa.Column(sa.Integer, nullable=False) fall_threshold = sa.Column(sa.Integer, nullable=False) rise_threshold = sa.Column(sa.Integer, nullable=False) http_method = sa.Column(sa.String(16), nullable=True) url_path = sa.Column(sa.String(2048), nullable=True) expected_codes = sa.Column(sa.String(64), nullable=True) enabled = sa.Column(sa.Boolean, nullable=False) pool = orm.relationship("Pool", uselist=False, back_populates="health_monitor") provisioning_status = sa.Column( sa.String(16), sa.ForeignKey("provisioning_status.name", name="fk_health_monitor_provisioning_status_name"), nullable=False) operating_status = sa.Column( sa.String(16), sa.ForeignKey("operating_status.name", name="fk_health_monitor_operating_status_name"), nullable=False) _tags = orm.relationship( 'Tags', single_parent=True, lazy='subquery', cascade='all,delete-orphan', primaryjoin='and_(foreign(Tags.resource_id)==HealthMonitor.id)' ) http_version = sa.Column(sa.Float, nullable=True) domain_name = sa.Column(sa.String(255), nullable=True) class Pool(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, models.TimestampMixin, base_models.NameMixin, base_models.TagMixin): __data_model__ = data_models.Pool __tablename__ = "pool" __v2_wsme__ = pool.PoolResponse description = sa.Column(sa.String(255), nullable=True) protocol = sa.Column( sa.String(16), sa.ForeignKey("protocol.name", name="fk_pool_protocol_name"), nullable=False) lb_algorithm = sa.Column( sa.String(255), sa.ForeignKey("algorithm.name", name="fk_pool_algorithm_name"), nullable=False) provisioning_status = sa.Column( sa.String(16), sa.ForeignKey("provisioning_status.name", name="fk_pool_provisioning_status_name"), nullable=False) operating_status = sa.Column( sa.String(16), sa.ForeignKey("operating_status.name", name="fk_pool_operating_status_name"), nullable=False) enabled = sa.Column(sa.Boolean, nullable=False) load_balancer_id = sa.Column( sa.String(36), sa.ForeignKey("load_balancer.id", name="fk_pool_load_balancer_id"), nullable=True) health_monitor = orm.relationship("HealthMonitor", uselist=False, cascade="delete", back_populates="pool") load_balancer = orm.relationship("LoadBalancer", uselist=False, back_populates="pools") members = orm.relationship("Member", uselist=True, cascade="delete", back_populates="pool") session_persistence = orm.relationship( "SessionPersistence", uselist=False, cascade="delete", back_populates="pool") _default_listeners = orm.relationship("Listener", uselist=True, back_populates="default_pool") l7policies = orm.relationship("L7Policy", uselist=True, back_populates="redirect_pool") _tags = orm.relationship( 'Tags', single_parent=True, lazy='subquery', cascade='all,delete-orphan', primaryjoin='and_(foreign(Tags.resource_id)==Pool.id)' ) tls_certificate_id = sa.Column(sa.String(255), nullable=True) ca_tls_certificate_id = sa.Column(sa.String(255), nullable=True) crl_container_id = sa.Column(sa.String(255), nullable=True) tls_enabled = sa.Column(sa.Boolean, default=False, nullable=False) tls_ciphers = sa.Column(sa.String(2048), nullable=True) # This property should be a unique list of any listeners that reference # this pool as its default_pool and any listeners referenced by enabled # L7Policies with at least one l7rule which also reference this pool. The # intent is that pool.listeners should be a unique list of listeners # *actually* using the pool. @property def listeners(self): _listeners = self._default_listeners[:] _l_ids = [li.id for li in _listeners] l7_listeners = [p.listener for p in self.l7policies if len(p.l7rules) > 0 and p.enabled is True] for li in l7_listeners: if li.id not in _l_ids: _listeners.append(li) _l_ids.append(li.id) return _listeners class LoadBalancer(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, models.TimestampMixin, base_models.NameMixin, base_models.TagMixin): __data_model__ = data_models.LoadBalancer __tablename__ = "load_balancer" __v2_wsme__ = load_balancer.LoadBalancerResponse description = sa.Column(sa.String(255), nullable=True) provisioning_status = sa.Column( sa.String(16), sa.ForeignKey("provisioning_status.name", name="fk_load_balancer_provisioning_status_name"), nullable=False) operating_status = sa.Column( sa.String(16), sa.ForeignKey("operating_status.name", name="fk_load_balancer_operating_status_name"), nullable=False) topology = sa.Column( sa.String(36), sa.ForeignKey("lb_topology.name", name="fk_lb_topology_name"), nullable=True) enabled = sa.Column(sa.Boolean, nullable=False) amphorae = orm.relationship("Amphora", uselist=True, back_populates="load_balancer") server_group_id = sa.Column(sa.String(36), nullable=True) provider = sa.Column(sa.String(64), nullable=True) vip = orm.relationship('Vip', cascade='delete', uselist=False, backref=orm.backref('load_balancer', uselist=False)) pools = orm.relationship('Pool', cascade='delete', uselist=True, back_populates="load_balancer") listeners = orm.relationship('Listener', cascade='delete', uselist=True, back_populates='load_balancer') _tags = orm.relationship( 'Tags', single_parent=True, lazy='subquery', cascade='all,delete-orphan', primaryjoin='and_(foreign(Tags.resource_id)==LoadBalancer.id)' ) flavor_id = sa.Column( sa.String(36), sa.ForeignKey("flavor.id", name="fk_lb_flavor_id"), nullable=True) availability_zone = sa.Column( sa.String(255), sa.ForeignKey("availability_zone.name", name="fk_load_balancer_availability_zone_name"), nullable=True) class VRRPGroup(base_models.BASE): __data_model__ = data_models.VRRPGroup __tablename__ = "vrrp_group" load_balancer_id = sa.Column( sa.String(36), sa.ForeignKey("load_balancer.id", name="fk_vrrp_group_load_balancer_id"), nullable=False, primary_key=True) vrrp_group_name = sa.Column(sa.String(36), nullable=True) vrrp_auth_type = sa.Column(sa.String(16), sa.ForeignKey( "vrrp_auth_method.name", name="fk_load_balancer_vrrp_auth_method_name")) vrrp_auth_pass = sa.Column(sa.String(36), nullable=True) advert_int = sa.Column(sa.Integer(), nullable=True) load_balancer = orm.relationship("LoadBalancer", uselist=False, backref=orm.backref("vrrp_group", uselist=False, cascade="delete")) class Vip(base_models.BASE): __data_model__ = data_models.Vip __tablename__ = "vip" load_balancer_id = sa.Column( sa.String(36), sa.ForeignKey("load_balancer.id", name="fk_vip_load_balancer_id"), nullable=False, primary_key=True) ip_address = sa.Column(sa.String(64), nullable=True) port_id = sa.Column(sa.String(36), nullable=True) subnet_id = sa.Column(sa.String(36), nullable=True) network_id = sa.Column(sa.String(36), nullable=True) qos_policy_id = sa.Column(sa.String(36), nullable=True) octavia_owned = sa.Column(sa.Boolean(), nullable=True) class Listener(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, models.TimestampMixin, base_models.NameMixin, base_models.TagMixin): __data_model__ = data_models.Listener __tablename__ = "listener" __v2_wsme__ = listener.ListenerResponse __table_args__ = ( sa.UniqueConstraint( 'load_balancer_id', 'protocol', 'protocol_port', name='uq_listener_load_balancer_id_protocol_port'), ) description = sa.Column(sa.String(255), nullable=True) protocol = sa.Column( sa.String(16), sa.ForeignKey("protocol.name", name="fk_listener_protocol_name"), nullable=False) protocol_port = sa.Column(sa.Integer(), nullable=False) connection_limit = sa.Column(sa.Integer, nullable=True) load_balancer_id = sa.Column( sa.String(36), sa.ForeignKey("load_balancer.id", name="fk_listener_load_balancer_id"), nullable=True) tls_certificate_id = sa.Column(sa.String(255), nullable=True) default_pool_id = sa.Column( sa.String(36), sa.ForeignKey("pool.id", name="fk_listener_pool_id"), nullable=True) provisioning_status = sa.Column( sa.String(16), sa.ForeignKey("provisioning_status.name", name="fk_listener_provisioning_status_name"), nullable=False) operating_status = sa.Column( sa.String(16), sa.ForeignKey("operating_status.name", name="fk_listener_operating_status_name"), nullable=False) enabled = sa.Column(sa.Boolean(), nullable=False) load_balancer = orm.relationship("LoadBalancer", uselist=False, back_populates="listeners") default_pool = orm.relationship("Pool", uselist=False, back_populates="_default_listeners") sni_containers = orm.relationship( 'SNI', cascade='all,delete-orphan', uselist=True, backref=orm.backref('listener', uselist=False)) l7policies = orm.relationship( 'L7Policy', uselist=True, order_by='L7Policy.position', collection_class=orderinglist.ordering_list('position', count_from=1), cascade='delete', back_populates='listener') peer_port = sa.Column(sa.Integer(), nullable=True) insert_headers = sa.Column(sa.PickleType()) timeout_client_data = sa.Column(sa.Integer, nullable=True) timeout_member_connect = sa.Column(sa.Integer, nullable=True) timeout_member_data = sa.Column(sa.Integer, nullable=True) timeout_tcp_inspect = sa.Column(sa.Integer, nullable=True) client_ca_tls_certificate_id = sa.Column(sa.String(255), nullable=True) client_authentication = sa.Column( sa.String(10), sa.ForeignKey("client_authentication_mode.name", name="fk_listener_client_authentication_mode_name"), nullable=False, default=constants.CLIENT_AUTH_NONE) client_crl_container_id = sa.Column(sa.String(255), nullable=True) tls_ciphers = sa.Column(sa.String(2048), nullable=True) _tags = orm.relationship( 'Tags', single_parent=True, lazy='subquery', cascade='all,delete-orphan', primaryjoin='and_(foreign(Tags.resource_id)==Listener.id)' ) # This property should be a unique list of the default_pool and anything # referenced by enabled L7Policies with at least one rule that also # reference this listener. The intent is that listener.pools should be a # unique list of pools this listener is *actually* using. @property def pools(self): _pools = [] _p_ids = [] if self.default_pool: _pools.append(self.default_pool) _p_ids.append(self.default_pool.id) l7_pools = [p.redirect_pool for p in self.l7policies if p.redirect_pool is not None and len(p.l7rules) > 0 and p.enabled is True] for p in l7_pools: if p.id not in _p_ids: _pools.append(p) _p_ids.append(p.id) return _pools allowed_cidrs = orm.relationship( 'ListenerCidr', cascade='all,delete-orphan', uselist=True, backref=orm.backref('listener', uselist=False)) class SNI(base_models.BASE): __data_model__ = data_models.SNI __tablename__ = "sni" __table_args__ = ( sa.PrimaryKeyConstraint('listener_id', 'tls_container_id'), ) listener_id = sa.Column( sa.String(36), sa.ForeignKey("listener.id", name="fk_sni_listener_id"), nullable=False) tls_container_id = sa.Column(sa.String(128), nullable=False) position = sa.Column(sa.Integer(), nullable=True) class Amphora(base_models.BASE, base_models.IdMixin, models.TimestampMixin): __data_model__ = data_models.Amphora __tablename__ = "amphora" __v2_wsme__ = amphora.AmphoraResponse load_balancer_id = sa.Column( sa.String(36), sa.ForeignKey("load_balancer.id", name="fk_amphora_load_balancer_id"), nullable=True) compute_id = sa.Column(sa.String(36), nullable=True) lb_network_ip = sa.Column(sa.String(64), nullable=True) vrrp_ip = sa.Column(sa.String(64), nullable=True) ha_ip = sa.Column(sa.String(64), nullable=True) vrrp_port_id = sa.Column(sa.String(36), nullable=True) ha_port_id = sa.Column(sa.String(36), nullable=True) cert_expiration = sa.Column(sa.DateTime(timezone=True), default=None, nullable=True) cert_busy = sa.Column(sa.Boolean(), default=False, nullable=False) role = sa.Column( sa.String(36), sa.ForeignKey("amphora_roles.name", name="fk_amphora_roles_name"), nullable=True) status = sa.Column( sa.String(36), sa.ForeignKey("provisioning_status.name", name="fk_container_provisioning_status_name")) vrrp_interface = sa.Column(sa.String(16), nullable=True) vrrp_id = sa.Column(sa.Integer(), nullable=True) vrrp_priority = sa.Column(sa.Integer(), nullable=True) cached_zone = sa.Column(sa.String(255), nullable=True) image_id = sa.Column(sa.String(36), nullable=True) load_balancer = orm.relationship("LoadBalancer", uselist=False, back_populates='amphorae') compute_flavor = sa.Column(sa.String(255), nullable=True) class AmphoraHealth(base_models.BASE): __data_model__ = data_models.AmphoraHealth __tablename__ = "amphora_health" amphora_id = sa.Column( sa.String(36), nullable=False, primary_key=True) last_update = sa.Column(sa.DateTime, default=func.now(), nullable=False) busy = sa.Column(sa.Boolean(), default=False, nullable=False) class L7Rule(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, models.TimestampMixin, base_models.TagMixin): __data_model__ = data_models.L7Rule __tablename__ = "l7rule" __v2_wsme__ = l7rule.L7RuleResponse l7policy_id = sa.Column( sa.String(36), sa.ForeignKey("l7policy.id", name="fk_l7rule_l7policy_id"), nullable=False) type = sa.Column( sa.String(36), sa.ForeignKey( "l7rule_type.name", name="fk_l7rule_l7rule_type_name"), nullable=False) compare_type = sa.Column( sa.String(36), sa.ForeignKey( "l7rule_compare_type.name", name="fk_l7rule_l7rule_compare_type_name"), nullable=False) key = sa.Column(sa.String(255), nullable=True) value = sa.Column(sa.String(255), nullable=False) invert = sa.Column(sa.Boolean(), default=False, nullable=False) enabled = sa.Column(sa.Boolean(), nullable=False) l7policy = orm.relationship("L7Policy", uselist=False, back_populates="l7rules") provisioning_status = sa.Column( sa.String(16), sa.ForeignKey("provisioning_status.name", name="fk_l7rule_provisioning_status_name"), nullable=False) operating_status = sa.Column( sa.String(16), sa.ForeignKey("operating_status.name", name="fk_l7rule_operating_status_name"), nullable=False) _tags = orm.relationship( 'Tags', single_parent=True, lazy='subquery', cascade='all,delete-orphan', primaryjoin='and_(foreign(Tags.resource_id)==L7Rule.id)' ) class L7Policy(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, models.TimestampMixin, base_models.NameMixin, base_models.TagMixin): __data_model__ = data_models.L7Policy __tablename__ = "l7policy" __v2_wsme__ = l7policy.L7PolicyResponse description = sa.Column(sa.String(255), nullable=True) listener_id = sa.Column( sa.String(36), sa.ForeignKey("listener.id", name="fk_l7policy_listener_id"), nullable=False) action = sa.Column( sa.String(36), sa.ForeignKey( "l7policy_action.name", name="fk_l7policy_l7policy_action_name"), nullable=False) redirect_pool_id = sa.Column( sa.String(36), sa.ForeignKey("pool.id", name="fk_l7policy_pool_id"), nullable=True) redirect_url = sa.Column( sa.String(255), nullable=True) redirect_prefix = sa.Column( sa.String(255), nullable=True) redirect_http_code = sa.Column(sa.Integer, nullable=True) position = sa.Column(sa.Integer, nullable=False) enabled = sa.Column(sa.Boolean(), nullable=False) listener = orm.relationship("Listener", uselist=False, back_populates="l7policies") redirect_pool = orm.relationship("Pool", uselist=False, back_populates="l7policies") l7rules = orm.relationship("L7Rule", uselist=True, cascade="delete", back_populates="l7policy") provisioning_status = sa.Column( sa.String(16), sa.ForeignKey("provisioning_status.name", name="fk_l7policy_provisioning_status_name"), nullable=False) operating_status = sa.Column( sa.String(16), sa.ForeignKey("operating_status.name", name="fk_l7policy_operating_status_name"), nullable=False) _tags = orm.relationship( 'Tags', single_parent=True, lazy='subquery', cascade='all,delete-orphan', primaryjoin='and_(foreign(Tags.resource_id)==L7Policy.id)' ) class Quotas(base_models.BASE): __data_model__ = data_models.Quotas __tablename__ = "quotas" __v2_wsme__ = quotas.QuotaAllBase project_id = sa.Column(sa.String(36), primary_key=True) health_monitor = sa.Column(sa.Integer(), nullable=True) listener = sa.Column(sa.Integer(), nullable=True) load_balancer = sa.Column(sa.Integer(), nullable=True) member = sa.Column(sa.Integer(), nullable=True) pool = sa.Column(sa.Integer(), nullable=True) in_use_health_monitor = sa.Column(sa.Integer(), nullable=True) in_use_listener = sa.Column(sa.Integer(), nullable=True) in_use_load_balancer = sa.Column(sa.Integer(), nullable=True) in_use_member = sa.Column(sa.Integer(), nullable=True) in_use_pool = sa.Column(sa.Integer(), nullable=True) class FlavorProfile(base_models.BASE, base_models.IdMixin, base_models.NameMixin): __data_model__ = data_models.FlavorProfile __tablename__ = "flavor_profile" __v2_wsme__ = flavor_profile.FlavorProfileResponse provider_name = sa.Column(sa.String(255), nullable=False) flavor_data = sa.Column(sa.String(4096), nullable=False) class Flavor(base_models.BASE, base_models.IdMixin, base_models.NameMixin): __data_model__ = data_models.Flavor __tablename__ = "flavor" __v2_wsme__ = flavors.FlavorResponse __table_args__ = ( sa.UniqueConstraint('name', name='uq_flavor_name'), ) description = sa.Column(sa.String(255), nullable=True) enabled = sa.Column(sa.Boolean(), nullable=False) flavor_profile_id = sa.Column( sa.String(36), sa.ForeignKey("flavor_profile.id", name="fk_flavor_flavor_profile_id"), nullable=False) class AvailabilityZoneProfile(base_models.BASE, base_models.IdMixin, base_models.NameMixin): __data_model__ = data_models.AvailabilityZoneProfile __tablename__ = "availability_zone_profile" __v2_wsme__ = availability_zone_profile.AvailabilityZoneProfileResponse provider_name = sa.Column(sa.String(255), nullable=False) availability_zone_data = sa.Column(sa.String(4096), nullable=False) class AvailabilityZone(base_models.BASE, base_models.NameMixin): __data_model__ = data_models.AvailabilityZone __tablename__ = "availability_zone" __v2_wsme__ = availability_zones.AvailabilityZoneResponse __table_args__ = ( sa.PrimaryKeyConstraint('name'), ) description = sa.Column(sa.String(255), nullable=True) enabled = sa.Column(sa.Boolean(), nullable=False) availability_zone_profile_id = sa.Column( sa.String(36), sa.ForeignKey("availability_zone_profile.id", name="fk_az_az_profile_id"), nullable=False) class ClientAuthenticationMode(base_models.BASE): __tablename__ = "client_authentication_mode" name = sa.Column(sa.String(10), primary_key=True, nullable=False) class SparesPool(base_models.BASE): __tablename__ = "spares_pool" updated_at = sa.Column(sa.DateTime, primary_key=True, nullable=True) class ListenerCidr(base_models.BASE): __data_model__ = data_models.ListenerCidr __tablename__ = "listener_cidr" __table_args__ = ( sa.PrimaryKeyConstraint('listener_id', 'cidr'), ) listener_id = sa.Column( sa.String(36), sa.ForeignKey("listener.id", name="fk_listener_cidr_listener_id"), nullable=False) cidr = sa.Column(sa.String(64), nullable=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/prepare.py0000664000175000017500000002174300000000000017224 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import uuidutils from octavia.api.v2.types import l7rule from octavia.common import constants from octavia.common import exceptions from octavia.common import validate CONF = cfg.CONF def create_load_balancer_tree(lb_dict): listeners = lb_dict.pop('listeners') or [] prepped_lb = create_load_balancer(lb_dict) prepped_lb['listeners'] = [] for listener_dict in listeners: pool = listener_dict.pop('default_pool') or None listener_dict['project_id'] = prepped_lb.get('project_id') prepped_listener = create_listener(listener_dict, prepped_lb.get('id')) if pool: hm = pool.pop('health_monitor') or None members = pool.pop('members') or [] pool['project_id'] = prepped_lb.get('project_id') prepped_pool = create_pool(pool) pool_id = prepped_pool.get('id') prepped_pool['load_balancer_id'] = prepped_lb.get('id') if hm: hm['project_id'] = lb_dict.get('project_id') prepped_hm = create_health_monitor(hm, pool_id) prepped_pool['health_monitor'] = prepped_hm prepped_pool['members'] = [] for member_dict in members: member_dict['project_id'] = prepped_lb.get('project_id') prepped_pool['members'].append( create_member(member_dict, pool_id)) prepped_listener['default_pool'] = prepped_pool prepped_listener['default_pool_id'] = prepped_pool['id'] if listener_dict.get('l7policies'): prepped_l7policies = [] for l7policy in listener_dict.get('l7policies'): prepped_l7policy = create_l7policy( l7policy, prepped_lb.get('id'), listener_dict.get('id')) prepped_l7policies.append(prepped_l7policy) prepped_listener['l7policies'] = prepped_l7policies prepped_lb['listeners'].append(prepped_listener) return prepped_lb def create_load_balancer(lb_dict): if not lb_dict.get('id'): lb_dict['id'] = uuidutils.generate_uuid() if lb_dict.get('vip'): lb_dict['vip']['load_balancer_id'] = lb_dict.get('id') lb_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE lb_dict[constants.OPERATING_STATUS] = constants.OFFLINE # Set defaults later possibly overriden by flavors later lb_dict['topology'] = CONF.controller_worker.loadbalancer_topology return lb_dict def create_listener(listener_dict, lb_id): if not listener_dict.get('id'): listener_dict['id'] = uuidutils.generate_uuid() if 'loadbalancer_id' in listener_dict: listener_dict['load_balancer_id'] = listener_dict.pop( 'loadbalancer_id') else: listener_dict['load_balancer_id'] = lb_id listener_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE listener_dict[constants.OPERATING_STATUS] = constants.OFFLINE # NOTE(blogan): Throwing away because we should not store secure data # in the database nor should we send it to a handler. if 'tls_termination' in listener_dict: del listener_dict['tls_termination'] if 'sni_containers' in listener_dict: sni_container_ids = listener_dict.pop('sni_containers') or [] elif 'sni_container_refs' in listener_dict: sni_container_ids = listener_dict.pop('sni_container_refs') or [] else: sni_container_ids = [] sni_containers = [{'listener_id': listener_dict.get('id'), 'tls_container_id': sni_container_id} for sni_container_id in sni_container_ids] listener_dict['sni_containers'] = sni_containers if 'client_authentication' not in listener_dict: listener_dict['client_authentication'] = constants.CLIENT_AUTH_NONE if (listener_dict['protocol'] == constants.PROTOCOL_TERMINATED_HTTPS and ('tls_ciphers' not in listener_dict or listener_dict['tls_ciphers'] is None)): listener_dict['tls_ciphers'] = ( CONF.api_settings.default_listener_ciphers) if listener_dict.get('timeout_client_data') is None: listener_dict['timeout_client_data'] = ( CONF.haproxy_amphora.timeout_client_data) if listener_dict.get('timeout_member_connect') is None: listener_dict['timeout_member_connect'] = ( CONF.haproxy_amphora.timeout_member_connect) if listener_dict.get('timeout_member_data') is None: listener_dict['timeout_member_data'] = ( CONF.haproxy_amphora.timeout_member_data) if listener_dict.get('timeout_tcp_inspect') is None: listener_dict['timeout_tcp_inspect'] = ( CONF.haproxy_amphora.timeout_tcp_inspect) return listener_dict def create_l7policy(l7policy_dict, lb_id, listener_id): l7policy_dict = validate.sanitize_l7policy_api_args(l7policy_dict, create=True) l7policy_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE l7policy_dict[constants.OPERATING_STATUS] = constants.OFFLINE if not l7policy_dict.get('id'): l7policy_dict['id'] = uuidutils.generate_uuid() l7policy_dict['listener_id'] = listener_id if l7policy_dict.get('redirect_pool'): pool_dict = l7policy_dict.pop('redirect_pool') prepped_pool = create_pool(pool_dict, lb_id) l7policy_dict['redirect_pool'] = prepped_pool l7policy_dict['redirect_pool_id'] = prepped_pool['id'] rules = l7policy_dict.pop('rules', None) if rules: l7policy_dict['l7rules'] = rules if l7policy_dict.get('l7rules'): if (len(l7policy_dict.get('l7rules')) > constants.MAX_L7RULES_PER_L7POLICY): raise exceptions.TooManyL7RulesOnL7Policy(id=l7policy_dict['id']) prepped_l7rules = [] for l7rule_dict in l7policy_dict.get('l7rules'): try: validate.l7rule_data(l7rule.L7RulePOST(**l7rule_dict)) except Exception as e: raise exceptions.L7RuleValidation(error=e) prepped_l7rule = create_l7rule(l7rule_dict, l7policy_dict['id']) prepped_l7rules.append(prepped_l7rule) return l7policy_dict def create_l7rule(l7rule_dict, l7policy_id): l7rule_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE l7rule_dict[constants.OPERATING_STATUS] = constants.OFFLINE if not l7rule_dict.get('id'): l7rule_dict['id'] = uuidutils.generate_uuid() l7rule_dict['l7policy_id'] = l7policy_id if 'enabled' not in l7rule_dict: l7rule_dict['enabled'] = True return l7rule_dict def create_pool(pool_dict, lb_id=None): if not pool_dict.get('id'): pool_dict['id'] = uuidutils.generate_uuid() if 'loadbalancer_id' in pool_dict: pool_dict['load_balancer_id'] = pool_dict.pop('loadbalancer_id') else: pool_dict['load_balancer_id'] = lb_id if pool_dict.get('session_persistence'): pool_dict['session_persistence']['pool_id'] = pool_dict.get('id') if 'members' in pool_dict and not pool_dict.get('members'): del pool_dict['members'] elif pool_dict.get('members'): prepped_members = [] for member_dict in pool_dict.get('members'): prepped_members.append(create_member(member_dict, pool_dict['id'])) if pool_dict['tls_enabled'] is True and pool_dict['tls_ciphers'] is None: pool_dict['tls_ciphers'] = CONF.api_settings.default_pool_ciphers pool_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE pool_dict[constants.OPERATING_STATUS] = constants.OFFLINE return pool_dict def create_member(member_dict, pool_id, has_health_monitor=False): member_dict['pool_id'] = pool_id member_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE if has_health_monitor: member_dict[constants.OPERATING_STATUS] = constants.OFFLINE else: member_dict[constants.OPERATING_STATUS] = constants.NO_MONITOR if 'backup' not in member_dict: member_dict['backup'] = False return member_dict def create_health_monitor(hm_dict, pool_id=None): hm_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE hm_dict[constants.OPERATING_STATUS] = constants.OFFLINE if pool_id: hm_dict['id'] = pool_id hm_dict['pool_id'] = pool_id else: if not hm_dict.get('id'): hm_dict['id'] = uuidutils.generate_uuid() return hm_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/db/repositories.py0000664000175000017500000026111000000000000020307 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Defines interface for DB access that Resource or Octavia Controllers may reference """ import datetime from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exception from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy.orm import noload from sqlalchemy.orm import subqueryload from sqlalchemy.sql.expression import false from sqlalchemy.sql import func from octavia.common import constants as consts from octavia.common import data_models from octavia.common import exceptions from octavia.common import validate from octavia.db import models CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseRepository(object): model_class = None def count(self, session, **filters): """Retrieves a count of entities from the database. :param session: A Sql Alchemy database session. :param filters: Filters to decide which entities should be retrieved. :returns: int """ deleted = filters.pop('show_deleted', True) model = session.query(self.model_class).filter_by(**filters) if not deleted: if hasattr(self.model_class, 'status'): model = model.filter( self.model_class.status != consts.DELETED) else: model = model.filter( self.model_class.provisioning_status != consts.DELETED) return model.count() def create(self, session, **model_kwargs): """Base create method for a database entity. :param session: A Sql Alchemy database session. :param model_kwargs: Attributes of the model to insert. :returns: octavia.common.data_model """ with session.begin(subtransactions=True): model = self.model_class(**model_kwargs) session.add(model) return model.to_data_model() def delete(self, session, **filters): """Deletes an entity from the database. :param session: A Sql Alchemy database session. :param filters: Filters to decide which entity should be deleted. :returns: None :raises: sqlalchemy.orm.exc.NoResultFound """ model = session.query(self.model_class).filter_by(**filters).one() with session.begin(subtransactions=True): session.delete(model) session.flush() def delete_batch(self, session, ids=None): """Batch deletes by entity ids.""" ids = ids or [] for id in ids: self.delete(session, id=id) def update(self, session, id, **model_kwargs): """Updates an entity in the database. :param session: A Sql Alchemy database session. :param model_kwargs: Entity attributes that should be updates. :returns: octavia.common.data_model """ with session.begin(subtransactions=True): tags = model_kwargs.pop('tags', None) if tags is not None: resource = session.query(self.model_class).get(id) resource.tags = tags session.query(self.model_class).filter_by( id=id).update(model_kwargs) def get(self, session, **filters): """Retrieves an entity from the database. :param session: A Sql Alchemy database session. :param filters: Filters to decide which entity should be retrieved. :returns: octavia.common.data_model """ deleted = filters.pop('show_deleted', True) model = session.query(self.model_class).filter_by(**filters) if not deleted: if hasattr(self.model_class, 'status'): model = model.filter( self.model_class.status != consts.DELETED) else: model = model.filter( self.model_class.provisioning_status != consts.DELETED) model = model.first() if not model: return None return model.to_data_model() def get_all(self, session, pagination_helper=None, query_options=None, **filters): """Retrieves a list of entities from the database. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param query_options: Optional query options to apply. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ deleted = filters.pop('show_deleted', True) query = session.query(self.model_class).filter_by(**filters) if query_options: query = query.options(query_options) if not deleted: if hasattr(self.model_class, 'status'): query = query.filter( self.model_class.status != consts.DELETED) else: query = query.filter( self.model_class.provisioning_status != consts.DELETED) if pagination_helper: model_list, links = pagination_helper.apply( query, self.model_class) else: links = None model_list = query.all() data_model_list = [model.to_data_model() for model in model_list] return data_model_list, links def exists(self, session, id): """Determines whether an entity exists in the database by its id. :param session: A Sql Alchemy database session. :param id: id of entity to check for existence. :returns: octavia.common.data_model """ return bool(session.query(self.model_class).filter_by(id=id).first()) def get_all_deleted_expiring(self, session, exp_age): """Get all previously deleted resources that are now expiring. :param session: A Sql Alchemy database session. :param exp_age: A standard datetime delta which is used to see for how long can a resource live without updates before it is considered expired :returns: A list of resource IDs """ expiry_time = datetime.datetime.utcnow() - exp_age query = session.query(self.model_class).filter( self.model_class.updated_at < expiry_time) if hasattr(self.model_class, 'status'): query = query.filter_by(status=consts.DELETED) else: query = query.filter_by(provisioning_status=consts.DELETED) # Do not load any relationship query = query.options(noload('*')) model_list = query.all() id_list = [model.id for model in model_list] return id_list class Repositories(object): def __init__(self): self.load_balancer = LoadBalancerRepository() self.vip = VipRepository() self.health_monitor = HealthMonitorRepository() self.session_persistence = SessionPersistenceRepository() self.pool = PoolRepository() self.member = MemberRepository() self.listener = ListenerRepository() self.listener_cidr = ListenerCidrRepository() self.listener_stats = ListenerStatisticsRepository() self.amphora = AmphoraRepository() self.sni = SNIRepository() self.amphorahealth = AmphoraHealthRepository() self.vrrpgroup = VRRPGroupRepository() self.l7rule = L7RuleRepository() self.l7policy = L7PolicyRepository() self.amp_build_slots = AmphoraBuildSlotsRepository() self.amp_build_req = AmphoraBuildReqRepository() self.quotas = QuotasRepository() self.flavor = FlavorRepository() self.flavor_profile = FlavorProfileRepository() self.spares_pool = SparesPoolRepository() self.availability_zone = AvailabilityZoneRepository() self.availability_zone_profile = AvailabilityZoneProfileRepository() def create_load_balancer_and_vip(self, session, lb_dict, vip_dict): """Inserts load balancer and vip entities into the database. Inserts load balancer and vip entities into the database in one transaction and returns the data model of the load balancer. :param session: A Sql Alchemy database session. :param lb_dict: Dictionary representation of a load balancer :param vip_dict: Dictionary representation of a vip :returns: octavia.common.data_models.LoadBalancer """ with session.begin(subtransactions=True): if not lb_dict.get('id'): lb_dict['id'] = uuidutils.generate_uuid() lb = models.LoadBalancer(**lb_dict) session.add(lb) vip_dict['load_balancer_id'] = lb_dict['id'] vip = models.Vip(**vip_dict) session.add(vip) return self.load_balancer.get(session, id=lb.id) def create_pool_on_load_balancer(self, session, pool_dict, listener_id=None): """Inserts a pool and session persistence entity into the database. :param session: A Sql Alchemy database session. :param pool_dict: Dictionary representation of a pool :param listener_id: Optional listener id that will reference this pool as its default_pool_id :returns: octavia.common.data_models.Pool """ with session.begin(subtransactions=True): if not pool_dict.get('id'): pool_dict['id'] = uuidutils.generate_uuid() sp_dict = pool_dict.pop('session_persistence', None) db_pool = self.pool.create(session, **pool_dict) if sp_dict is not None and sp_dict != {}: sp_dict['pool_id'] = pool_dict['id'] self.session_persistence.create(session, **sp_dict) if listener_id: self.listener.update(session, listener_id, default_pool_id=pool_dict['id']) # Immediate refresh, as we have found that sqlalchemy will sometimes # cache the above query and the pool object may miss the listener_id # information if listener_id: pool = session.query(models.Pool).filter_by(id=db_pool.id).first() session.refresh(pool) return self.pool.get(session, id=db_pool.id) def update_pool_and_sp(self, session, pool_id, pool_dict): """Updates a pool and session persistence entity in the database. :param session: A Sql Alchemy database session. :param pool_dict: Dictionary representation of a pool :returns: octavia.common.data_models.Pool """ with session.begin(subtransactions=True): if 'session_persistence' in pool_dict.keys(): sp_dict = pool_dict.pop('session_persistence') if sp_dict is None or sp_dict == {}: if self.session_persistence.exists(session, pool_id): self.session_persistence.delete(session, pool_id=pool_id) elif self.session_persistence.exists(session, pool_id): self.session_persistence.update(session, pool_id, **sp_dict) else: sp_dict['pool_id'] = pool_id self.session_persistence.create(session, **sp_dict) # If only the session_persistence is being updated, this will be # empty if pool_dict: self.pool.update(session, pool_id, **pool_dict) return self.pool.get(session, id=pool_id) def test_and_set_lb_and_listeners_prov_status(self, session, lb_id, lb_prov_status, listener_prov_status, listener_ids=None, pool_id=None, l7policy_id=None): """Tests and sets a load balancer and listener provisioning status. Puts a lock on the load balancer table to check the status of a load balancer. If the status is ACTIVE then the status of the load balancer and listener is updated and the method returns True. If the status is not ACTIVE, then nothing is done and False is returned. :param session: A Sql Alchemy database session. :param lb_id: ID of the Load Balancer to check and lock :param lb_prov_status: Status to set Load Balancer and Listener if check passes. :param listener_prov_status: Status to set Listeners if check passes :param listener_ids: List of IDs of listeners to check and lock (only use this when relevant to the operation) :param pool_id: ID of the Pool to check and lock (only use this when relevant to the operation) :param l7policy_id: ID of the L7Policy to check and lock (only use this when relevant to the operation) :returns: bool """ listener_ids = listener_ids or [] # Always set the status requested, regardless of whether we have # listeners-- sometimes pools will be disassociated with a listener # and we still need the LB locked when Pools or subordinate objects # are changed. success = self.load_balancer.test_and_set_provisioning_status( session, lb_id, lb_prov_status) if not success: return success for listener_id in listener_ids: self.listener.update(session, listener_id, provisioning_status=listener_prov_status) if pool_id: self.pool.update(session, pool_id, provisioning_status=lb_prov_status) if l7policy_id: self.l7policy.update(session, l7policy_id, provisioning_status=lb_prov_status) return success def check_quota_met(self, session, lock_session, _class, project_id, count=1): """Checks and updates object quotas. This method makes sure the project has available quota for the resource and updates the quota to reflect the new ussage. :param session: Context database session :param lock_session: Locking database session (autocommit=False) :param _class: Data model object requesting quota :param project_id: Project ID requesting quota :param count: Number of objects we're going to create (default=1) :returns: True if quota is met, False if quota was available """ LOG.debug('Checking quota for project: %(proj)s object: %(obj)s', {'proj': project_id, 'obj': _class}) # Under noauth everything is admin, so no quota if CONF.api_settings.auth_strategy == consts.NOAUTH: LOG.debug('Auth strategy is NOAUTH, skipping quota check.') return False if not project_id: raise exceptions.MissingProjectID() quotas = self.quotas.get(session, project_id=project_id) if not quotas: # Make sure we have a record to lock self.quotas.update( session, project_id, quota={}) # Lock the project record in the database to block other quota checks # # Note: You cannot just use the current count as the in-use # value as we don't want to lock the whole resource table try: quotas = lock_session.query(models.Quotas).filter_by( project_id=project_id).with_for_update().first() if _class == data_models.LoadBalancer: # Decide which quota to use if quotas.load_balancer is None: lb_quota = CONF.quotas.default_load_balancer_quota else: lb_quota = quotas.load_balancer # Get the current in use count if not quotas.in_use_load_balancer: # This is to handle the upgrade case lb_count = session.query(models.LoadBalancer).filter( models.LoadBalancer.project_id == project_id, models.LoadBalancer.provisioning_status != consts.DELETED).count() + count else: lb_count = quotas.in_use_load_balancer + count # Decide if the quota is met if lb_count <= lb_quota or lb_quota == consts.QUOTA_UNLIMITED: quotas.in_use_load_balancer = lb_count return False return True if _class == data_models.Listener: # Decide which quota to use if quotas.listener is None: listener_quota = CONF.quotas.default_listener_quota else: listener_quota = quotas.listener # Get the current in use count if not quotas.in_use_listener: # This is to handle the upgrade case listener_count = session.query(models.Listener).filter( models.Listener.project_id == project_id, models.Listener.provisioning_status != consts.DELETED).count() + count else: listener_count = quotas.in_use_listener + count # Decide if the quota is met if (listener_count <= listener_quota or listener_quota == consts.QUOTA_UNLIMITED): quotas.in_use_listener = listener_count return False return True if _class == data_models.Pool: # Decide which quota to use if quotas.pool is None: pool_quota = CONF.quotas.default_pool_quota else: pool_quota = quotas.pool # Get the current in use count if not quotas.in_use_pool: # This is to handle the upgrade case pool_count = session.query(models.Pool).filter( models.Pool.project_id == project_id, models.Pool.provisioning_status != consts.DELETED).count() + count else: pool_count = quotas.in_use_pool + count # Decide if the quota is met if (pool_count <= pool_quota or pool_quota == consts.QUOTA_UNLIMITED): quotas.in_use_pool = pool_count return False return True if _class == data_models.HealthMonitor: # Decide which quota to use if quotas.health_monitor is None: hm_quota = CONF.quotas.default_health_monitor_quota else: hm_quota = quotas.health_monitor # Get the current in use count if not quotas.in_use_health_monitor: # This is to handle the upgrade case hm_count = session.query(models.HealthMonitor).filter( models.HealthMonitor.project_id == project_id, models.HealthMonitor.provisioning_status != consts.DELETED).count() + count else: hm_count = quotas.in_use_health_monitor + count # Decide if the quota is met if (hm_count <= hm_quota or hm_quota == consts.QUOTA_UNLIMITED): quotas.in_use_health_monitor = hm_count return False return True if _class == data_models.Member: # Decide which quota to use if quotas.member is None: member_quota = CONF.quotas.default_member_quota else: member_quota = quotas.member # Get the current in use count if not quotas.in_use_member: # This is to handle the upgrade case member_count = session.query(models.Member).filter( models.Member.project_id == project_id, models.Member.provisioning_status != consts.DELETED).count() + count else: member_count = quotas.in_use_member + count # Decide if the quota is met if (member_count <= member_quota or member_quota == consts.QUOTA_UNLIMITED): quotas.in_use_member = member_count return False return True except db_exception.DBDeadlock: LOG.warning('Quota project lock timed out for project: %(proj)s', {'proj': project_id}) raise exceptions.ProjectBusyException() return False def decrement_quota(self, lock_session, _class, project_id, quantity=1): """Decrements the object quota for a project :param lock_session: Locking database session (autocommit=False) :param _class: Data model object to decrement quota :param project_id: Project ID to decrement quota on :param quantity: Quantity of quota to decrement :returns: None """ LOG.debug('Decrementing quota by: %(quant)s for project: %(proj)s ' 'object: %(obj)s', {'quant': quantity, 'proj': project_id, 'obj': _class}) # Lock the project record in the database to block other quota checks try: quotas = lock_session.query(models.Quotas).filter_by( project_id=project_id).with_for_update().first() if not quotas: if not CONF.api_settings.auth_strategy == consts.NOAUTH: LOG.error('Quota decrement on %(clss)s called on ' 'project: %(proj)s with no quota record in ' 'the database.', {'clss': type(_class), 'proj': project_id}) return if _class == data_models.LoadBalancer: if (quotas.in_use_load_balancer is not None and quotas.in_use_load_balancer > 0): quotas.in_use_load_balancer = ( quotas.in_use_load_balancer - quantity) else: if not CONF.api_settings.auth_strategy == consts.NOAUTH: LOG.warning('Quota decrement on %(clss)s called on ' 'project: %(proj)s that would cause a ' 'negative quota.', {'clss': type(_class), 'proj': project_id}) if _class == data_models.Listener: if (quotas.in_use_listener is not None and quotas.in_use_listener > 0): quotas.in_use_listener = ( quotas.in_use_listener - quantity) else: if not CONF.api_settings.auth_strategy == consts.NOAUTH: LOG.warning('Quota decrement on %(clss)s called on ' 'project: %(proj)s that would cause a ' 'negative quota.', {'clss': type(_class), 'proj': project_id}) if _class == data_models.Pool: if (quotas.in_use_pool is not None and quotas.in_use_pool > 0): quotas.in_use_pool = ( quotas.in_use_pool - quantity) else: if not CONF.api_settings.auth_strategy == consts.NOAUTH: LOG.warning('Quota decrement on %(clss)s called on ' 'project: %(proj)s that would cause a ' 'negative quota.', {'clss': type(_class), 'proj': project_id}) if _class == data_models.HealthMonitor: if (quotas.in_use_health_monitor is not None and quotas.in_use_health_monitor > 0): quotas.in_use_health_monitor = ( quotas.in_use_health_monitor - quantity) else: if not CONF.api_settings.auth_strategy == consts.NOAUTH: LOG.warning('Quota decrement on %(clss)s called on ' 'project: %(proj)s that would cause a ' 'negative quota.', {'clss': type(_class), 'proj': project_id}) if _class == data_models.Member: if (quotas.in_use_member is not None and quotas.in_use_member > 0): quotas.in_use_member = ( quotas.in_use_member - quantity) else: if not CONF.api_settings.auth_strategy == consts.NOAUTH: LOG.warning('Quota decrement on %(clss)s called on ' 'project: %(proj)s that would cause a ' 'negative quota.', {'clss': type(_class), 'proj': project_id}) except db_exception.DBDeadlock: LOG.warning('Quota project lock timed out for project: %(proj)s', {'proj': project_id}) raise exceptions.ProjectBusyException() def create_load_balancer_tree(self, session, lock_session, lb_dict): listener_dicts = lb_dict.pop('listeners', []) vip_dict = lb_dict.pop('vip') try: if self.check_quota_met(session, lock_session, data_models.LoadBalancer, lb_dict['project_id']): raise exceptions.QuotaException( resource=data_models.LoadBalancer._name()) lb_dm = self.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) for listener_dict in listener_dicts: # Add listener quota check if self.check_quota_met(session, lock_session, data_models.Listener, lb_dict['project_id']): raise exceptions.QuotaException( resource=data_models.Listener._name()) pool_dict = listener_dict.pop('default_pool', None) l7policies_dict = listener_dict.pop('l7policies', None) sni_containers = listener_dict.pop('sni_containers', []) if pool_dict: # Add pool quota check if self.check_quota_met(session, lock_session, data_models.Pool, lb_dict['project_id']): raise exceptions.QuotaException( resource=data_models.Pool._name()) hm_dict = pool_dict.pop('health_monitor', None) member_dicts = pool_dict.pop('members', []) sp_dict = pool_dict.pop('session_persistence', None) pool_dict['load_balancer_id'] = lb_dm.id del pool_dict['listener_id'] pool_dm = self.pool.create(lock_session, **pool_dict) if sp_dict: sp_dict['pool_id'] = pool_dm.id self.session_persistence.create(lock_session, **sp_dict) if hm_dict: # Add hm quota check if self.check_quota_met(session, lock_session, data_models.HealthMonitor, lb_dict['project_id']): raise exceptions.QuotaException( resource=data_models.HealthMonitor._name()) hm_dict['id'] = pool_dm.id hm_dict['pool_id'] = pool_dm.id self.health_monitor.create(lock_session, **hm_dict) for r_member_dict in member_dicts: # Add member quota check if self.check_quota_met(session, lock_session, data_models.Member, lb_dict['project_id']): raise exceptions.QuotaException( resource=data_models.Member._name()) r_member_dict['pool_id'] = pool_dm.id self.member.create(lock_session, **r_member_dict) listener_dict['default_pool_id'] = pool_dm.id self.listener.create(lock_session, **listener_dict) for sni_container in sni_containers: self.sni.create(lock_session, **sni_container) if l7policies_dict: for policy_dict in l7policies_dict: l7rules_dict = policy_dict.pop('l7rules') if policy_dict.get('redirect_pool'): # Add pool quota check if self.check_quota_met(session, lock_session, data_models.Pool, lb_dict['project_id']): raise exceptions.QuotaException( resource=data_models.Pool._name()) r_pool_dict = policy_dict.pop( 'redirect_pool') r_hm_dict = r_pool_dict.pop('health_monitor', None) r_sp_dict = r_pool_dict.pop( 'session_persistence', None) r_member_dicts = r_pool_dict.pop('members', []) if 'listener_id' in r_pool_dict.keys(): del r_pool_dict['listener_id'] r_pool_dm = self.pool.create(lock_session, **r_pool_dict) if r_sp_dict: r_sp_dict['pool_id'] = r_pool_dm.id self.session_persistence.create(lock_session, **r_sp_dict) if r_hm_dict: # Add hm quota check if self.check_quota_met( session, lock_session, data_models.HealthMonitor, lb_dict['project_id']): res = data_models.HealthMonitor raise exceptions.QuotaException( resource=res._name()) r_hm_dict['id'] = r_pool_dm.id r_hm_dict['pool_id'] = r_pool_dm.id self.health_monitor.create(lock_session, **r_hm_dict) for r_member_dict in r_member_dicts: # Add member quota check if self.check_quota_met( session, lock_session, data_models.Member, lb_dict['project_id']): raise exceptions.QuotaException( resource=data_models.Member._name()) r_member_dict['pool_id'] = r_pool_dm.id self.member.create(lock_session, **r_member_dict) policy_dict['redirect_pool_id'] = r_pool_dm.id policy_dm = self.l7policy.create(lock_session, **policy_dict) for rule_dict in l7rules_dict: rule_dict['l7policy_id'] = policy_dm.id self.l7rule.create(lock_session, **rule_dict) lock_session.commit() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() session.expire_all() return self.load_balancer.get(session, id=lb_dm.id) def get_amphora_stats(self, session, amp_id): """Gets the statistics for all listeners on an amphora. :param session: A Sql Alchemy database session. :param amp_id: The amphora ID to query. :returns: An amphora stats dictionary """ with session.begin(subtransactions=True): columns = (models.ListenerStatistics.__table__.columns + [models.Amphora.load_balancer_id]) amp_records = ( session.query(*columns) .filter(models.ListenerStatistics.amphora_id == amp_id) .filter(models.ListenerStatistics.amphora_id == models.Amphora.id).all()) amp_stats = [] for amp in amp_records: amp_stat = {consts.LOADBALANCER_ID: amp.load_balancer_id, consts.LISTENER_ID: amp.listener_id, 'id': amp.amphora_id, consts.ACTIVE_CONNECTIONS: amp.active_connections, consts.BYTES_IN: amp.bytes_in, consts.BYTES_OUT: amp.bytes_out, consts.REQUEST_ERRORS: amp.request_errors, consts.TOTAL_CONNECTIONS: amp.total_connections} amp_stats.append(amp_stat) return amp_stats class LoadBalancerRepository(BaseRepository): model_class = models.LoadBalancer def get_all_API_list(self, session, pagination_helper=None, **filters): """Get a list of load balancers for the API list call. This get_all returns a data set that is only one level deep in the data graph. This is an optimized query for the API load balancer list method. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ # sub-query load the tables we need # no-load (blank) the tables we don't need query_options = ( subqueryload(models.LoadBalancer.vip), subqueryload(models.LoadBalancer.amphorae), subqueryload(models.LoadBalancer.pools), subqueryload(models.LoadBalancer.listeners), subqueryload(models.LoadBalancer._tags), noload('*')) return super(LoadBalancerRepository, self).get_all( session, pagination_helper=pagination_helper, query_options=query_options, **filters) def test_and_set_provisioning_status(self, session, id, status, raise_exception=False): """Tests and sets a load balancer and provisioning status. Puts a lock on the load balancer table to check the status of a load balancer. If the status is ACTIVE then the status of the load balancer is updated and the method returns True. If the status is not ACTIVE, then nothing is done and False is returned. :param session: A Sql Alchemy database session. :param id: id of Load Balancer :param status: Status to set Load Balancer if check passes. :param raise_exception: If True, raise ImmutableObject on failure :returns: bool """ with session.begin(subtransactions=True): lb = session.query(self.model_class).with_for_update().filter_by( id=id).one() is_delete = status == consts.PENDING_DELETE acceptable_statuses = ( consts.DELETABLE_STATUSES if is_delete else consts.MUTABLE_STATUSES ) if lb.provisioning_status not in acceptable_statuses: if raise_exception: raise exceptions.ImmutableObject( resource='Load Balancer', id=id) return False lb.provisioning_status = status session.add(lb) return True def set_status_for_failover(self, session, id, status, raise_exception=False): """Tests and sets a load balancer provisioning status. Puts a lock on the load balancer table to check the status of a load balancer. If the status is ACTIVE or ERROR then the status of the load balancer is updated and the method returns True. If the status is not ACTIVE, then nothing is done and False is returned. :param session: A Sql Alchemy database session. :param id: id of Load Balancer :param status: Status to set Load Balancer if check passes. :param raise_exception: If True, raise ImmutableObject on failure :returns: bool """ with session.begin(subtransactions=True): lb = session.query(self.model_class).with_for_update().filter_by( id=id).one() if lb.provisioning_status not in consts.FAILOVERABLE_STATUSES: if raise_exception: raise exceptions.ImmutableObject( resource='Load Balancer', id=id) return False lb.provisioning_status = status session.add(lb) return True class VipRepository(BaseRepository): model_class = models.Vip def update(self, session, load_balancer_id, **model_kwargs): """Updates a vip entity in the database by load_balancer_id.""" with session.begin(subtransactions=True): session.query(self.model_class).filter_by( load_balancer_id=load_balancer_id).update(model_kwargs) class HealthMonitorRepository(BaseRepository): model_class = models.HealthMonitor def get_all_API_list(self, session, pagination_helper=None, **filters): """Get a list of health monitors for the API list call. This get_all returns a data set that is only one level deep in the data graph. This is an optimized query for the API health monitor list method. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ # sub-query load the tables we need # no-load (blank) the tables we don't need query_options = ( subqueryload(models.HealthMonitor.pool), subqueryload(models.HealthMonitor._tags), noload('*')) return super(HealthMonitorRepository, self).get_all( session, pagination_helper=pagination_helper, query_options=query_options, **filters) class SessionPersistenceRepository(BaseRepository): model_class = models.SessionPersistence def update(self, session, pool_id, **model_kwargs): """Updates a session persistence entity in the database by pool_id.""" with session.begin(subtransactions=True): session.query(self.model_class).filter_by( pool_id=pool_id).update(model_kwargs) def exists(self, session, pool_id): """Checks if session persistence exists on a pool.""" return bool(session.query(self.model_class).filter_by( pool_id=pool_id).first()) class ListenerCidrRepository(BaseRepository): model_class = models.ListenerCidr def create(self, session, listener_id, allowed_cidrs): if allowed_cidrs: with session.begin(subtransactions=True): for cidr in set(allowed_cidrs): cidr_dict = {'listener_id': listener_id, 'cidr': cidr} model = self.model_class(**cidr_dict) session.add(model) def update(self, session, listener_id, allowed_cidrs): """Updates allowed CIDRs in the database by listener_id.""" with session.begin(subtransactions=True): session.query(self.model_class).filter_by( listener_id=listener_id).delete() self.create(session, listener_id, allowed_cidrs) class PoolRepository(BaseRepository): model_class = models.Pool def get_all_API_list(self, session, pagination_helper=None, **filters): """Get a list of pools for the API list call. This get_all returns a data set that is only one level deep in the data graph. This is an optimized query for the API pool list method. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ # sub-query load the tables we need # no-load (blank) the tables we don't need query_options = ( subqueryload(models.Pool._default_listeners), subqueryload(models.Pool.health_monitor), subqueryload(models.Pool.l7policies), (subqueryload(models.Pool.l7policies). subqueryload(models.L7Policy.l7rules)), (subqueryload(models.Pool.l7policies). subqueryload(models.L7Policy.listener)), subqueryload(models.Pool.load_balancer), subqueryload(models.Pool.members), subqueryload(models.Pool.session_persistence), subqueryload(models.Pool._tags), noload('*')) return super(PoolRepository, self).get_all( session, pagination_helper=pagination_helper, query_options=query_options, **filters) def get_children_count(self, session, pool_id): hm_count = session.query(models.HealthMonitor).filter( models.HealthMonitor.pool_id == pool_id, models.HealthMonitor.provisioning_status != consts.DELETED).count() member_count = session.query(models.Member).filter( models.Member.pool_id == pool_id, models.Member.provisioning_status != consts.DELETED).count() return (hm_count, member_count) class MemberRepository(BaseRepository): model_class = models.Member def get_all_API_list(self, session, pagination_helper=None, **filters): """Get a list of members for the API list call. This get_all returns a data set that is only one level deep in the data graph. This is an optimized query for the API member list method. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ # sub-query load the tables we need # no-load (blank) the tables we don't need query_options = ( subqueryload(models.Member.pool), subqueryload(models.Member._tags), noload('*')) return super(MemberRepository, self).get_all( session, pagination_helper=pagination_helper, query_options=query_options, **filters) def delete_members(self, session, member_ids): """Batch deletes members from a pool.""" self.delete_batch(session, member_ids) def update_pool_members(self, session, pool_id, **model_kwargs): """Updates all of the members of a pool. :param session: A Sql Alchemy database session. :param pool_id: ID of the pool to update members on. :param model_kwargs: Entity attributes that should be updates. :returns: octavia.common.data_model """ with session.begin(subtransactions=True): session.query(self.model_class).filter_by( pool_id=pool_id).update(model_kwargs) class ListenerRepository(BaseRepository): model_class = models.Listener def get_all_API_list(self, session, pagination_helper=None, **filters): """Get a list of listeners for the API list call. This get_all returns a data set that is only one level deep in the data graph. This is an optimized query for the API listener list method. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ # sub-query load the tables we need # no-load (blank) the tables we don't need query_options = ( subqueryload(models.Listener.l7policies), subqueryload(models.Listener.load_balancer), subqueryload(models.Listener.sni_containers), subqueryload(models.Listener._tags), subqueryload(models.Listener.allowed_cidrs), noload('*')) return super(ListenerRepository, self).get_all( session, pagination_helper=pagination_helper, query_options=query_options, **filters) def _find_next_peer_port(self, session, lb_id): """Finds the next available peer port on the load balancer.""" max_peer_port = 0 load_balancer = session.query(models.LoadBalancer).filter_by( id=lb_id).first() for listener in load_balancer.listeners: if (listener.peer_port is not None and listener.peer_port > max_peer_port): max_peer_port = listener.peer_port if max_peer_port == 0: return consts.HAPROXY_BASE_PEER_PORT return max_peer_port + 1 def _pool_check(self, session, pool_id, listener_id=None, lb_id=None): """Sanity checks for default_pool_id if specified.""" # Pool must exist on same loadbalancer as listener pool_db = None if listener_id: lb_subquery = (session.query(self.model_class.load_balancer_id). filter_by(id=listener_id).subquery()) pool_db = (session.query(models.Pool). filter_by(id=pool_id). filter(models.LoadBalancer.id.in_(lb_subquery)).first()) elif lb_id: pool_db = (session.query(models.Pool). filter_by(id=pool_id). filter_by(load_balancer_id=lb_id).first()) if not pool_db: raise exceptions.NotFound( resource=data_models.Pool._name(), id=pool_id) return pool_db def has_default_pool(self, session, id): """Checks if a listener has a default pool.""" listener = self.get(session, id=id) return bool(listener.default_pool) def update(self, session, id, **model_kwargs): with session.begin(subtransactions=True): listener_db = session.query(self.model_class).filter_by( id=id).first() if not listener_db: raise exceptions.NotFound( resource=data_models.Listener._name(), id=id) tags = model_kwargs.pop('tags', None) if tags is not None: resource = session.query(self.model_class).get(id) resource.tags = tags # Verify any newly specified default_pool_id exists default_pool_id = model_kwargs.get('default_pool_id') if default_pool_id: self._pool_check(session, default_pool_id, listener_id=id) if 'sni_containers' in model_kwargs: # sni_container_refs is being updated. It is either being set # or unset/cleared. We need to update in DB side. containers = model_kwargs.pop('sni_containers', []) or [] listener_db.sni_containers = [] if containers: listener_db.sni_containers = [ models.SNI(listener_id=id, tls_container_id=container_ref) for container_ref in containers] if 'allowed_cidrs' in model_kwargs: # allowed_cidrs is being updated. It is either being set or # unset/cleared. We need to update in DB side. allowed_cidrs = model_kwargs.pop('allowed_cidrs', []) or [] listener_db.allowed_cidrs = [] if allowed_cidrs: listener_db.allowed_cidrs = [ models.ListenerCidr(listener_id=id, cidr=cidr) for cidr in allowed_cidrs] listener_db.update(model_kwargs) def create(self, session, **model_kwargs): """Creates a new Listener with some validation.""" with session.begin(subtransactions=True): listener_id = model_kwargs.get('id') allowed_cidrs = set(model_kwargs.pop('allowed_cidrs', []) or []) model_kwargs['allowed_cidrs'] = [ models.ListenerCidr(listener_id=listener_id, cidr=cidr) for cidr in allowed_cidrs] model = self.model_class(**model_kwargs) if model.default_pool_id: model.default_pool = self._pool_check( session, model.default_pool_id, lb_id=model.load_balancer_id) if model.peer_port is None: model.peer_port = self._find_next_peer_port( session, lb_id=model.load_balancer_id) session.add(model) return model.to_data_model() def prov_status_active_if_not_error(self, session, listener_id): """Update provisioning_status to ACTIVE if not already in ERROR.""" with session.begin(subtransactions=True): (session.query(self.model_class).filter_by(id=listener_id). # Don't mark ERROR or already ACTIVE as ACTIVE filter(~self.model_class.provisioning_status.in_( [consts.ERROR, consts.ACTIVE])). update({self.model_class.provisioning_status: consts.ACTIVE}, synchronize_session='fetch')) class ListenerStatisticsRepository(BaseRepository): model_class = models.ListenerStatistics def replace(self, session, listener_id, amphora_id, **model_kwargs): """replace or insert listener into database.""" with session.begin(subtransactions=True): count = session.query(self.model_class).filter_by( listener_id=listener_id, amphora_id=amphora_id).count() if count: session.query(self.model_class).filter_by( listener_id=listener_id, amphora_id=amphora_id).update( model_kwargs, synchronize_session=False) else: model_kwargs['listener_id'] = listener_id model_kwargs['amphora_id'] = amphora_id self.create(session, **model_kwargs) def update(self, session, listener_id, **model_kwargs): """Updates a listener's statistics by a listener's id.""" with session.begin(subtransactions=True): session.query(self.model_class).filter_by( listener_id=listener_id).update(model_kwargs) class AmphoraRepository(BaseRepository): model_class = models.Amphora def get_all_API_list(self, session, pagination_helper=None, **filters): """Get a list of amphorae for the API list call. This get_all returns a data set that is only one level deep in the data graph. This is an optimized query for the API amphora list method. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ # sub-query load the tables we need # no-load (blank) the tables we don't need query_options = ( subqueryload(models.Amphora.load_balancer), noload('*')) return super(AmphoraRepository, self).get_all( session, pagination_helper=pagination_helper, query_options=query_options, **filters) def associate(self, session, load_balancer_id, amphora_id): """Associates an amphora with a load balancer. :param session: A Sql Alchemy database session. :param load_balancer_id: The load balancer id to associate :param amphora_id: The amphora id to associate """ with session.begin(subtransactions=True): load_balancer = session.query(models.LoadBalancer).filter_by( id=load_balancer_id).first() amphora = session.query(self.model_class).filter_by( id=amphora_id).first() load_balancer.amphorae.append(amphora) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def allocate_and_associate(self, session, load_balancer_id, availability_zone=None): """Allocate an amphora for a load balancer. For v0.5 this is simple, find a free amp and associate the lb. In the future this needs to be enhanced. :param session: A Sql Alchemy database session. :param load_balancer_id: The load balancer id to associate :returns: The amphora ID for the load balancer or None """ filters = { 'status': 'READY', 'load_balancer_id': None } if availability_zone: LOG.debug("Filtering amps by zone: %s", availability_zone) filters['cached_zone'] = availability_zone with session.begin(subtransactions=True): amp = session.query(self.model_class).with_for_update().filter_by( **filters).first() if amp is None: return None if availability_zone: LOG.debug("Found amp: %s in %s", amp.id, amp.cached_zone) amp.status = 'ALLOCATED' amp.load_balancer_id = load_balancer_id return amp.to_data_model() @staticmethod def get_lb_for_amphora(session, amphora_id): """Get all of the load balancers on an amphora. :param session: A Sql Alchemy database session. :param amphora_id: The amphora id to list the load balancers from :returns: [octavia.common.data_model] """ with session.begin(): db_lb = ( # Get LB records session.query(models.LoadBalancer) # Joined to amphora records .filter(models.LoadBalancer.id == models.Amphora.load_balancer_id) # For just this amphora .filter(models.Amphora.id == amphora_id) # Where the amphora is not DELETED .filter(models.Amphora.status != consts.DELETED) # And the LB is also not DELETED .filter(models.LoadBalancer.provisioning_status != consts.DELETED)).first() if db_lb: return db_lb.to_data_model() return None def get_spare_amphora_count(self, session, availability_zone=None, check_booting_amphora=False): """Get the count of the spare amphora. :returns: Number of current spare amphora. """ filters = { 'load_balancer_id': None } # For jobboard based controller amphora in booting/pending create state # can reach READY state after restart of housekeeping/worker service, # so include amphora in these state to query if check_booting_amphora: status = [consts.AMPHORA_READY, consts.AMPHORA_BOOTING, consts.PENDING_CREATE] else: status = [consts.AMPHORA_READY] if availability_zone is not None: filters['cached_zone'] = availability_zone with session.begin(subtransactions=True): count = session.query(self.model_class).filter_by( **filters).filter(self.model_class.status.in_(status)).count() return count def get_cert_expiring_amphora(self, session): """Retrieves an amphora whose cert is close to expiring.. :param session: A Sql Alchemy database session. :returns: one amphora with expiring certificate """ # get amphorae with certs that will expire within the # configured buffer period, so we can rotate their certs ahead of time expired_seconds = CONF.house_keeping.cert_expiry_buffer expired_date = datetime.datetime.utcnow() + datetime.timedelta( seconds=expired_seconds) with session.begin(subtransactions=True): amp = session.query(self.model_class).with_for_update().filter( self.model_class.status.notin_( [consts.DELETED, consts.PENDING_DELETE]), self.model_class.cert_busy == false(), self.model_class.cert_expiration < expired_date ).first() if amp is None: return None amp.cert_busy = True return amp.to_data_model() def get_lb_for_health_update(self, session, amphora_id): """This method is for the health manager status update process. This is a time sensitive query that occurs often. It is an explicit query as the ORM produces a poorly optimized query. Use extreme caution making any changes to this query as it can impact the scalability of the health manager. All changes should be analyzed using SQL "EXPLAIN" to make sure only indexes are being used. Changes should also be evaluated using the stressHM tool. Note: The returned object is flat and not a graph representation of the load balancer as it is not needed. This is on purpose to optimize the processing time. This is not in the normal data model objects. :param session: A Sql Alchemy database session. :param amphora_id: The amphora ID to lookup the load balancer for. :returns: A dictionary containing the required load balancer details. """ rows = session.execute( "SELECT load_balancer.id, load_balancer.enabled, " "load_balancer.provisioning_status AS lb_prov_status, " "load_balancer.operating_status AS lb_op_status, " "listener.id AS list_id, " "listener.operating_status AS list_op_status, " "listener.enabled AS list_enabled, " "listener.protocol AS list_protocol, " "pool.id AS pool_id, " "pool.operating_status AS pool_op_status, " "member.id AS member_id, " "member.operating_status AS mem_op_status from " "amphora JOIN load_balancer ON " "amphora.load_balancer_id = load_balancer.id LEFT JOIN " "listener ON load_balancer.id = listener.load_balancer_id " "LEFT JOIN pool ON load_balancer.id = pool.load_balancer_id " "LEFT JOIN member ON pool.id = member.pool_id WHERE " "amphora.id = :amp_id AND amphora.status != :deleted AND " "load_balancer.provisioning_status != :deleted;", {'amp_id': amphora_id, 'deleted': consts.DELETED}).fetchall() lb = {} listeners = {} pools = {} for row in rows: if not lb: lb['id'] = row['id'] lb['enabled'] = row['enabled'] == 1 lb['provisioning_status'] = row['lb_prov_status'] lb['operating_status'] = row['lb_op_status'] if row['list_id'] and row['list_id'] not in listeners: listener = {'operating_status': row['list_op_status'], 'protocol': row['list_protocol'], 'enabled': row['list_enabled']} listeners[row['list_id']] = listener if row['pool_id']: if row['pool_id'] in pools and row['member_id']: member = {'operating_status': row['mem_op_status']} pools[row['pool_id']]['members'][row['member_id']] = member else: pool = {'operating_status': row['pool_op_status'], 'members': {}} if row['member_id']: member = {'operating_status': row['mem_op_status']} pool['members'][row['member_id']] = member pools[row['pool_id']] = pool if listeners: lb['listeners'] = listeners if pools: lb['pools'] = pools return lb class AmphoraBuildReqRepository(BaseRepository): model_class = models.AmphoraBuildRequest def add_to_build_queue(self, session, amphora_id=None, priority=None): """Adds the build request to the table.""" with session.begin(subtransactions=True): model = self.model_class(amphora_id=amphora_id, priority=priority) session.add(model) def update_req_status(self, session, amphora_id=None): """Updates the request status.""" with session.begin(subtransactions=True): (session.query(self.model_class) .filter_by(amphora_id=amphora_id) .update({self.model_class.status: 'BUILDING'})) def get_highest_priority_build_req(self, session): """Fetches build request with highest priority and least created_time. priority 20 = failover (highest) priority 40 = create_loadbalancer priority 60 = sparespool (least) :param session: A Sql Alchemy database session. :returns amphora_id corresponding to highest priority and least created time in 'WAITING' status. """ with session.begin(subtransactions=True): return (session.query(self.model_class.amphora_id) .order_by(self.model_class.status.desc()) .order_by(self.model_class.priority.asc()) .order_by(self.model_class.created_time.asc()) .first())[0] def delete_all(self, session): "Deletes all the build requests." with session.begin(subtransactions=True): session.query(self.model_class).delete() class AmphoraBuildSlotsRepository(BaseRepository): model_class = models.AmphoraBuildSlots def get_used_build_slots_count(self, session): """Gets the number of build slots in use. :returns: Number of current build slots. """ with session.begin(subtransactions=True): count = session.query(self.model_class.slots_used).one() return count[0] def update_count(self, session, action='increment'): """Increments/Decrements/Resets the number of build_slots used.""" with session.begin(subtransactions=True): if action == 'increment': session.query(self.model_class).filter_by(id=1).update( {self.model_class.slots_used: self.get_used_build_slots_count(session) + 1}) elif action == 'decrement': session.query(self.model_class).filter_by(id=1).update( {self.model_class.slots_used: self.get_used_build_slots_count(session) - 1}) elif action == 'reset': session.query(self.model_class).filter_by(id=1).update( {self.model_class.slots_used: 0}) class SNIRepository(BaseRepository): model_class = models.SNI def update(self, session, listener_id=None, tls_container_id=None, **model_kwargs): """Updates an SNI entity in the database.""" if not listener_id and tls_container_id: raise exceptions.MissingArguments with session.begin(subtransactions=True): if listener_id: session.query(self.model_class).filter_by( listener_id=listener_id).update(model_kwargs) elif tls_container_id: session.query(self.model_class).filter_by( tls_container_id=tls_container_id).update(model_kwargs) class AmphoraHealthRepository(BaseRepository): model_class = models.AmphoraHealth def update(self, session, amphora_id, **model_kwargs): """Updates a healthmanager entity in the database by amphora_id.""" with session.begin(subtransactions=True): session.query(self.model_class).filter_by( amphora_id=amphora_id).update(model_kwargs) def replace(self, session, amphora_id, **model_kwargs): """replace or insert amphora into database.""" with session.begin(subtransactions=True): count = session.query(self.model_class).filter_by( amphora_id=amphora_id).count() if count: session.query(self.model_class).filter_by( amphora_id=amphora_id).update(model_kwargs, synchronize_session=False) else: model_kwargs['amphora_id'] = amphora_id self.create(session, **model_kwargs) def check_amphora_health_expired(self, session, amphora_id, exp_age=None): """check if a specific amphora is expired in the amphora_health table :param session: A Sql Alchemy database session. :param amphora_id: id of an amphora object :param exp_age: A standard datetime delta which is used to see for how long can an amphora live without updates before it is considered expired (default: CONF.house_keeping.amphora_expiry_age) :returns: boolean """ if not exp_age: exp_age = datetime.timedelta( seconds=CONF.house_keeping.amphora_expiry_age) expiry_time = datetime.datetime.utcnow() - exp_age amphora_model = ( session.query(models.AmphoraHealth) .filter_by(amphora_id=amphora_id) .filter(models.AmphoraHealth.last_update > expiry_time) ).first() # This will return a value if: # * there is an entry in the table for this amphora_id # AND # * the entry was last updated more recently than our expiry_time # Receiving any value means that the amp is unexpired. # In contrast, we receive no value if: # * there is no entry for this amphora_id # OR # * the entry was last updated before our expiry_time # In this case, the amphora is expired. return amphora_model is None def get_stale_amphora(self, session): """Retrieves a stale amphora from the health manager database. :param session: A Sql Alchemy database session. :returns: [octavia.common.data_model] """ timeout = CONF.health_manager.heartbeat_timeout expired_time = datetime.datetime.utcnow() - datetime.timedelta( seconds=timeout) amp = session.query(self.model_class).with_for_update().filter_by( busy=False).filter( self.model_class.last_update < expired_time).order_by( func.random()).first() if amp is None: return None amp.busy = True return amp.to_data_model() class VRRPGroupRepository(BaseRepository): model_class = models.VRRPGroup def update(self, session, load_balancer_id, **model_kwargs): """Updates a VRRPGroup entry for by load_balancer_id.""" with session.begin(subtransactions=True): session.query(self.model_class).filter_by( load_balancer_id=load_balancer_id).update(model_kwargs) class L7RuleRepository(BaseRepository): model_class = models.L7Rule def get_all_API_list(self, session, pagination_helper=None, **filters): """Get a list of L7 Rules for the API list call. This get_all returns a data set that is only one level deep in the data graph. This is an optimized query for the API L7 Rule list method. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ # sub-query load the tables we need # no-load (blank) the tables we don't need query_options = ( subqueryload(models.L7Rule.l7policy), subqueryload(models.L7Rule._tags), noload('*')) return super(L7RuleRepository, self).get_all( session, pagination_helper=pagination_helper, query_options=query_options, **filters) def update(self, session, id, **model_kwargs): with session.begin(subtransactions=True): l7rule_db = session.query(self.model_class).filter_by( id=id).first() if not l7rule_db: raise exceptions.NotFound( resource=data_models.L7Rule._name(), id=id) l7rule_dict = l7rule_db.to_data_model().to_dict() # Ignore values that are None for k, v in model_kwargs.items(): if v is not None: l7rule_dict.update({k: v}) # Clear out the 'key' attribute for rule types that don't use it. if ('type' in l7rule_dict.keys() and l7rule_dict['type'] in (consts.L7RULE_TYPE_HOST_NAME, consts.L7RULE_TYPE_PATH, consts.L7RULE_TYPE_FILE_TYPE)): l7rule_dict['key'] = None model_kwargs.update({'key': None}) validate.l7rule_data(self.model_class(**l7rule_dict)) l7rule_db.update(model_kwargs) l7rule_db = self.get(session, id=id) return l7rule_db def create(self, session, **model_kwargs): with session.begin(subtransactions=True): if not model_kwargs.get('id'): model_kwargs.update(id=uuidutils.generate_uuid()) if model_kwargs.get('l7policy_id'): l7policy_db = session.query(models.L7Policy).filter_by( id=model_kwargs.get('l7policy_id')).first() model_kwargs.update(l7policy=l7policy_db) l7rule = self.model_class(**model_kwargs) validate.l7rule_data(l7rule) session.add(l7rule) l7rule_db = self.get(session, id=l7rule.id) return l7rule_db class L7PolicyRepository(BaseRepository): model_class = models.L7Policy def _pool_check(self, session, pool_id, lb_id, project_id): """Sanity checks for the redirect_pool if specified.""" pool_db = (session.query(models.Pool). filter_by(id=pool_id). filter_by(project_id=project_id). filter_by(load_balancer_id=lb_id).first()) if not pool_db: raise exceptions.NotFound( resource=data_models.Pool._name(), id=pool_id) def _validate_l7policy_pool_data(self, session, l7policy): """Does validations on a given L7 policy.""" if l7policy.action == consts.L7POLICY_ACTION_REDIRECT_TO_POOL: session.expire(session.query(models.Listener).filter_by( id=l7policy.listener_id).first()) listener = (session.query(models.Listener). filter_by(id=l7policy.listener_id).first()) self._pool_check(session, l7policy.redirect_pool_id, listener.load_balancer_id, listener.project_id) def get_all(self, session, pagination_helper=None, **filters): deleted = filters.pop('show_deleted', True) query = session.query(self.model_class).filter_by( **filters) if not deleted: query = query.filter( self.model_class.provisioning_status != consts.DELETED) if pagination_helper: model_list, links = pagination_helper.apply( query, self.model_class) else: links = None model_list = query.order_by(self.model_class.position).all() data_model_list = [model.to_data_model() for model in model_list] return data_model_list, links def get_all_API_list(self, session, pagination_helper=None, **filters): deleted = filters.pop('show_deleted', True) query = session.query(self.model_class).filter_by( **filters) query = query.options( subqueryload(models.L7Policy.l7rules), subqueryload(models.L7Policy.listener), subqueryload(models.L7Policy.redirect_pool), subqueryload(models.L7Policy._tags), noload('*')) if not deleted: query = query.filter( self.model_class.provisioning_status != consts.DELETED) if pagination_helper: model_list, links = pagination_helper.apply( query, self.model_class) else: links = None model_list = query.order_by(self.model_class.position).all() data_model_list = [model.to_data_model() for model in model_list] return data_model_list, links def update(self, session, id, **model_kwargs): with session.begin(subtransactions=True): l7policy_db = session.query(self.model_class).filter_by( id=id).first() if not l7policy_db: raise exceptions.NotFound( resource=data_models.L7Policy._name(), id=id) # Necessary to work around unexpected / idiotic behavior of # the SQLAlchemy Orderinglist extension if the position changes. position = model_kwargs.pop('position', None) if position == l7policy_db.position: position = None model_kwargs.update(listener_id=l7policy_db.listener_id) l7policy = self.model_class( **validate.sanitize_l7policy_api_args(model_kwargs)) self._validate_l7policy_pool_data(session, l7policy) if l7policy.action: model_kwargs.update(action=l7policy.action) if l7policy.action == consts.L7POLICY_ACTION_REJECT: model_kwargs.update(redirect_url=None) model_kwargs.update(redirect_pool_id=None) model_kwargs.update(redirect_prefix=None) model_kwargs.update(redirect_http_code=None) elif (l7policy.action == consts.L7POLICY_ACTION_REDIRECT_TO_URL): model_kwargs.update(redirect_pool_id=None) model_kwargs.update(redirect_prefix=None) elif (l7policy.action == consts.L7POLICY_ACTION_REDIRECT_TO_POOL): model_kwargs.update(redirect_url=None) model_kwargs.update(redirect_prefix=None) model_kwargs.update(redirect_http_code=None) elif (l7policy.action == consts.L7POLICY_ACTION_REDIRECT_PREFIX): model_kwargs.update(redirect_url=None) model_kwargs.update(redirect_pool_id=None) l7policy_db.update(model_kwargs) # Position manipulation must happen outside the other alterations # in the previous transaction if position is not None: listener = (session.query(models.Listener). filter_by(id=l7policy_db.listener_id).first()) # Immediate refresh, as we have found that sqlalchemy will # sometimes cache the above query session.refresh(listener) with session.begin(subtransactions=True): l7policy_db = listener.l7policies.pop(l7policy_db.position - 1) listener.l7policies.insert(position - 1, l7policy_db) listener.l7policies.reorder() session.flush() return self.get(session, id=id) def create(self, session, **model_kwargs): with session.begin(subtransactions=True): # We must append the new policy to the end of the collection. We # later re-insert it wherever it was requested to appear in order. # This is to work around unexpected / idiotic behavior of the # SQLAlchemy orderinglist extension. position = model_kwargs.pop('position', None) model_kwargs.update(position=consts.MAX_POLICY_POSITION) if not model_kwargs.get('id'): model_kwargs.update(id=uuidutils.generate_uuid()) if model_kwargs.get('redirect_pool_id'): pool_db = session.query(models.Pool).filter_by( id=model_kwargs.get('redirect_pool_id')).first() model_kwargs.update(redirect_pool=pool_db) if model_kwargs.get('listener_id'): listener_db = session.query(models.Listener).filter_by( id=model_kwargs.get('listener_id')).first() model_kwargs.update(listener=listener_db) l7policy = self.model_class( **validate.sanitize_l7policy_api_args(model_kwargs, create=True)) self._validate_l7policy_pool_data(session, l7policy) session.add(l7policy) session.flush() # Must be done outside the transaction which creates the L7Policy listener = (session.query(models.Listener). filter_by(id=l7policy.listener_id).first()) # Immediate refresh, as we have found that sqlalchemy will sometimes # cache the above query session.refresh(listener) session.refresh(l7policy) if position is not None and position < len(listener.l7policies) + 1: with session.begin(subtransactions=True): # New L7Policy will always be at the end of the list l7policy_db = listener.l7policies.pop() listener.l7policies.insert(position - 1, l7policy_db) listener.l7policies.reorder() session.flush() l7policy.updated_at = None return self.get(session, id=l7policy.id) def delete(self, session, id, **filters): with session.begin(subtransactions=True): l7policy_db = session.query(self.model_class).filter_by( id=id).first() if not l7policy_db: raise exceptions.NotFound( resource=data_models.L7Policy._name(), id=id) listener_id = l7policy_db.listener_id session.delete(l7policy_db) session.flush() # Must do reorder outside of the delete transaction. listener = (session.query(models.Listener). filter_by(id=listener_id).first()) # Immediate refresh, as we have found that sqlalchemy will sometimes # cache the above query session.refresh(listener) listener.l7policies.reorder() session.flush() class QuotasRepository(BaseRepository): model_class = models.Quotas # This is used with an autocommit session (non-lock_session) # Since this is for the initial quota record creation it locks the table # which can lead to recoverable deadlocks. Thus we use the deadlock # retry wrapper here. This may not be appropriate for other sessions # and or queries. Use with caution. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def update(self, session, project_id, **model_kwargs): with session.begin(subtransactions=True): kwargs_quota = model_kwargs['quota'] quotas = session.query(self.model_class).filter_by( project_id=project_id).with_for_update().first() if not quotas: quotas = models.Quotas(project_id=project_id) for key, val in kwargs_quota.items(): setattr(quotas, key, val) session.add(quotas) session.flush() return self.get(session, project_id=project_id) def delete(self, session, project_id): with session.begin(subtransactions=True): quotas = session.query(self.model_class).filter_by( project_id=project_id).with_for_update().first() if not quotas: raise exceptions.NotFound( resource=data_models.Quotas._name(), id=project_id) quotas.health_monitor = None quotas.load_balancer = None quotas.listener = None quotas.member = None quotas.pool = None session.flush() class _GetALLExceptDELETEDIdMixin(object): def get_all(self, session, pagination_helper=None, query_options=None, **filters): """Retrieves a list of entities from the database. This filters the "DELETED" placeholder from the list. :param session: A Sql Alchemy database session. :param pagination_helper: Helper to apply pagination and sorting. :param query_options: Optional query options to apply. :param filters: Filters to decide which entities should be retrieved. :returns: [octavia.common.data_model] """ query = session.query(self.model_class).filter_by(**filters) if query_options: query = query.options(query_options) if hasattr(self.model_class, 'id'): query = query.filter(self.model_class.id != consts.NIL_UUID) else: query = query.filter(self.model_class.name != consts.NIL_UUID) if pagination_helper: model_list, links = pagination_helper.apply( query, self.model_class) else: links = None model_list = query.all() data_model_list = [model.to_data_model() for model in model_list] return data_model_list, links class FlavorRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): model_class = models.Flavor def get_flavor_metadata_dict(self, session, flavor_id): with session.begin(subtransactions=True): flavor_metadata_json = ( session.query(models.FlavorProfile.flavor_data) .filter(models.Flavor.id == flavor_id) .filter( models.Flavor.flavor_profile_id == models.FlavorProfile.id) .one()[0]) result_dict = ({} if flavor_metadata_json is None else jsonutils.loads(flavor_metadata_json)) return result_dict def get_flavor_provider(self, session, flavor_id): with session.begin(subtransactions=True): return (session.query(models.FlavorProfile.provider_name) .filter(models.Flavor.id == flavor_id) .filter(models.Flavor.flavor_profile_id == models.FlavorProfile.id).one()[0]) def delete(self, serial_session, **filters): """Sets DELETED LBs flavor_id to NIL_UUID, then removes the flavor :param serial_session: A Sql Alchemy database transaction session. :param filters: Filters to decide which entity should be deleted. :returns: None :raises: odb_exceptions.DBReferenceError :raises: sqlalchemy.orm.exc.NoResultFound """ (serial_session.query(models.LoadBalancer). filter(models.LoadBalancer.flavor_id == filters['id']). filter(models.LoadBalancer.provisioning_status == consts.DELETED). update({models.LoadBalancer.flavor_id: consts.NIL_UUID}, synchronize_session=False)) flavor = (serial_session.query(self.model_class). filter_by(**filters).one()) serial_session.delete(flavor) class FlavorProfileRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): model_class = models.FlavorProfile class SparesPoolRepository(BaseRepository): model_class = models.SparesPool def get_for_update(self, lock_session): """Queries and locks the SparesPool record. This call will query for the SparesPool table record and lock it so that other processes cannot read or write it. :returns: expected_spares_count, updated_at """ row = lock_session.query(models.SparesPool).with_for_update().one() return row class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): model_class = models.AvailabilityZone def get_availability_zone_metadata_dict(self, session, availability_zone_name): with session.begin(subtransactions=True): availability_zone_metadata_json = ( session.query( models.AvailabilityZoneProfile.availability_zone_data) .filter(models.AvailabilityZone.name == availability_zone_name) .filter(models.AvailabilityZone.availability_zone_profile_id == models.AvailabilityZoneProfile.id) .one()[0]) result_dict = ( {} if availability_zone_metadata_json is None else jsonutils.loads(availability_zone_metadata_json)) return result_dict def get_availability_zone_provider(self, session, availability_zone_name): with session.begin(subtransactions=True): return (session.query(models.AvailabilityZoneProfile.provider_name) .filter( models.AvailabilityZone.name == availability_zone_name) .filter( models.AvailabilityZone.availability_zone_profile_id == models.AvailabilityZoneProfile.id).one()[0]) def update(self, session, name, **model_kwargs): """Updates an entity in the database. :param session: A Sql Alchemy database session. :param model_kwargs: Entity attributes that should be updates. :returns: octavia.common.data_model """ with session.begin(subtransactions=True): session.query(self.model_class).filter_by( name=name).update(model_kwargs) def delete(self, serial_session, **filters): """Special delete method for availability_zone. Sets DELETED LBs availability_zone to NIL_UUID, then removes the availability_zone. :param serial_session: A Sql Alchemy database transaction session. :param filters: Filters to decide which entity should be deleted. :returns: None :raises: odb_exceptions.DBReferenceError :raises: sqlalchemy.orm.exc.NoResultFound """ (serial_session.query(models.LoadBalancer). filter(models.LoadBalancer.availability_zone == filters[consts.NAME]). filter(models.LoadBalancer.provisioning_status == consts.DELETED). update({models.LoadBalancer.availability_zone: consts.NIL_UUID}, synchronize_session=False)) availability_zone = ( serial_session.query(self.model_class).filter_by(**filters).one()) serial_session.delete(availability_zone) class AvailabilityZoneProfileRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): model_class = models.AvailabilityZoneProfile ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/distributor/0000775000175000017500000000000000000000000017172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/distributor/__init__.py0000664000175000017500000000107400000000000021305 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/distributor/drivers/0000775000175000017500000000000000000000000020650 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/distributor/drivers/__init__.py0000664000175000017500000000107400000000000022763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/distributor/drivers/driver_base.py0000664000175000017500000001300500000000000023506 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corp. # Copyright 2017 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc # This class describes the abstraction of a distributor interface. # Distributor implementations may be: a noop, a single hardware device, # a single amphora, or multiple amphora among other options. class DistributorDriver(object, metaclass=abc.ABCMeta): @abc.abstractmethod def get_create_distributor_subflow(self): """Get a subflow to create a distributor :requires: **load_balancer** (object) - Load balancer object associated with this distributor :provides: **distributor_id** (string) - The created distributor ID :returns: A TaskFlow Flow that will create the distributor This method will setup the TaskFlow Flow required to setup the database fields and create a distributor should the driver need to instantiate one. The flow must store the generated distibutor ID in the flow. """ @abc.abstractmethod def get_delete_distributor_subflow(self): """Get a subflow that deletes a distributor :requires: **distributor_id** (string) - The ID of the distributor to delete :returns: A TaskFlow Flow that will delete the distributor This method will return a TaskFlow Flow that deletes the distributor (if applicable for the driver) and cleans up any associated database records. """ @abc.abstractmethod def get_add_vip_subflow(self): """Get a subflow that adds a VIP to a distributor :requires: **distributor_id** (string) - The ID of the distributor to create the VIP on. :requires: **vip** (object) - The VIP object to create on the distributor. :requires: **vip_alg** (string) - The optional algorithm to use for this VIP. :requires: **vip_persistence** (string) - The persistence type for this VIP. :returns: A TaskFlow Flow that will add a VIP to the distributor This method will return a TaskFlow Flow that adds a VIP to the distributor by perfoming the necessary steps to plug the VIP and configure the distributor to start receiving requests on this VIP. """ @abc.abstractmethod def get_remove_vip_subflow(self): """Get a subflow that removes a VIP from a distributor :requires: **distributor_id** (string) - The ID of the distributor to remove the VIP from. :requires: **vip** (object) - The VIP object to remove from the distributor. :returns: A TaskFlow Flow that will remove a VIP from the distributor This method will return a TaskFlow Flow that removes the VIP from the distributor by reconfiguring the distributor and unplugging the associated port. """ @abc.abstractmethod def get_register_amphorae_subflow(self): """Get a subflow that Registers amphorae with the distributor :requires: **distributor_id** (string) - The ID of the distributor to register the amphora on :requires: **amphorae** (tuple) - Tuple of amphora objects to register with the distributor. :returns: A TaskFlow Flow that will register amphorae with the distributor This method will return a TaskFlow Flow that registers amphorae with the distributor so it can begin to receive requests from the distributor. Amphora should be ready to receive requests prior to this call being made. """ @abc.abstractmethod def get_drain_amphorae_subflow(self): """Get a subflow that drains connections from amphorae :requires: **distributor_id** (string) - The ID of the distributor to drain amphorae from :requires: **amphorae** (tuple) - Tuple of amphora objects to drain from distributor. :returns: A TaskFlow Flow that will drain the listed amphorae on the distributor This method will return a TaskFlow Flow that configures the distributor to stop sending new connections to the amphorae in the list. Existing connections will continue to pass traffic to the amphorae in this list. """ @abc.abstractmethod def get_unregister_amphorae_subflow(self): """Get a subflow that unregisters amphorae from a distributor :requires: **distributor_id** (string) - The ID of the distributor to unregister amphorae from :requires: **amphorae** (tuple) - Tuple of amphora objects to unregister from distributor. :returns: A TaskFlow Flow that will unregister amphorae from the distributor This method will return a TaskFlow Flow that unregisters amphorae from the distributor. Amphorae in this list will immediately stop receiving traffic. """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/distributor/drivers/noop_driver/0000775000175000017500000000000000000000000023176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/distributor/drivers/noop_driver/__init__.py0000664000175000017500000000107400000000000025311 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/distributor/drivers/noop_driver/driver.py0000664000175000017500000001160700000000000025050 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corp. # Copyright 2017 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from taskflow.patterns import linear_flow from taskflow import task from oslo_log import log as logging from oslo_utils import uuidutils from octavia.distributor.drivers import driver_base LOG = logging.getLogger(__name__) class NoopProvidesRequiresTask(task.Task): def __init__(self, name, provides_dicts=None, requires=None): if provides_dicts is None: provides_dicts = {} super(NoopProvidesRequiresTask, self).__init__( name=name, provides=list(provides_dicts), requires=requires) self.provides_dict = provides_dicts def execute(self, *args, **kwargs): return self.provides_dict.values() class NoopManager(object): def __init__(self): super(NoopManager, self).__init__() def get_create_distributor_subflow(self): LOG.debug('Distributor %s create_distributor', self.__class__.__name__) create_distributor_flow = linear_flow.Flow('create-distributor') create_distributor_flow.add(NoopProvidesRequiresTask( 'create-distributor-task', requires=('load_balancer'), provides_dicts={'distributor_id': uuidutils.generate_uuid()})) return create_distributor_flow def get_delete_distributor_subflow(self): LOG.debug('Distributor %s delete_distributor', self.__class__.__name__) delete_distributor_flow = linear_flow.Flow('delete-distributor') delete_distributor_flow.add(NoopProvidesRequiresTask( 'delete-distributor-task', requires=('distributor_id'))) return delete_distributor_flow def get_add_vip_subflow(self): LOG.debug('Distributor %s add_vip', self.__class__.__name__) add_vip_flow = linear_flow.Flow('add-vip') add_vip_flow.add(NoopProvidesRequiresTask( 'add-vip-task', requires=('distributor_id', 'vip', 'vip_alg', 'vip_persistence'))) return add_vip_flow def get_remove_vip_subflow(self): LOG.debug('Distributor %s remove_vip', self.__class__.__name__) remove_vip_flow = linear_flow.Flow('remove-vip') remove_vip_flow.add(NoopProvidesRequiresTask('remove-vip-task', requires=('distributor_id', 'vip'))) return remove_vip_flow def get_register_amphorae_subflow(self): LOG.debug('Distributor %s register_amphorae', self.__class__.__name__) register_amphorae_flow = linear_flow.Flow('register_amphorae') register_amphorae_flow.add(NoopProvidesRequiresTask( 'register_amphorae_task', requires=('distributor_id', 'amphorae'))) return register_amphorae_flow def get_drain_amphorae_subflow(self): LOG.debug('Distributor %s drain_amphorae', self.__class__.__name__) drain_amphorae_flow = linear_flow.Flow('drain-amphorae') drain_amphorae_flow.add(NoopProvidesRequiresTask( 'drain_amphorae_task', requires=('distributor_id', 'amphorae'))) return drain_amphorae_flow def get_unregister_amphorae_subflow(self): LOG.debug('Distributor %s unregister_amphorae', self.__class__.__name__) unregister_amphorae_flow = linear_flow.Flow('unregister_amphora') unregister_amphorae_flow.add(NoopProvidesRequiresTask( 'unregister_amphorae_task', requires=('distributor_id', 'amphorae'))) return unregister_amphorae_flow class NoopDistributorDriver(driver_base.DistributorDriver): def __init__(self): super(NoopDistributorDriver, self).__init__() self.driver = NoopManager() def get_create_distributor_subflow(self): return self.driver.get_create_distributor_subflow() def get_delete_distributor_subflow(self): return self.driver.get_delete_distributor_subflow() def get_add_vip_subflow(self): return self.driver.get_add_vip_subflow() def get_remove_vip_subflow(self): return self.driver.get_remove_vip_subflow() def get_register_amphorae_subflow(self): return self.driver.get_register_amphorae_subflow() def get_drain_amphorae_subflow(self): self.driver.get_drain_amphorae_subflow() def get_unregister_amphorae_subflow(self): self.driver.get_unregister_amphorae_subflow() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/hacking/0000775000175000017500000000000000000000000016224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/hacking/__init__.py0000664000175000017500000000107400000000000020337 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/hacking/checks.py0000664000175000017500000002237700000000000020051 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Octavia specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range O3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the O3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to octavia/tests/unit/test_hacking.py """ import re from hacking import core _all_log_levels = {'critical', 'error', 'exception', 'info', 'warning'} _all_hints = {'_LC', '_LE', '_LI', '_', '_LW'} _log_translation_hint = re.compile( r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % { 'levels': '|'.join(_all_log_levels), 'hints': '|'.join(_all_hints), }) assert_trueinst_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " r"(\w|\.|\'|\"|\[|\])+\)\)") assert_equal_in_end_with_true_or_false_re = re.compile( r"assertEqual\((\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") assert_equal_in_start_with_true_or_false_re = re.compile( r"assertEqual\((True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") assert_equal_with_true_re = re.compile( r"assertEqual\(True,") assert_equal_with_false_re = re.compile( r"assertEqual\(False,") mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") assert_equal_end_with_none_re = re.compile(r"(.)*assertEqual\(.+, None\)") assert_equal_start_with_none_re = re.compile(r".*assertEqual\(None, .+\)") assert_not_equal_end_with_none_re = re.compile( r"(.)*assertNotEqual\(.+, None\)") assert_not_equal_start_with_none_re = re.compile( r"(.)*assertNotEqual\(None, .+\)") revert_must_have_kwargs_re = re.compile( r'[ ]*def revert\(.+,[ ](?!\*\*kwargs)\w+\):') untranslated_exception_re = re.compile(r"raise (?:\w*)\((.*)\)") no_eventlet_re = re.compile(r'(import|from)\s+[(]?eventlet') no_line_continuation_backslash_re = re.compile(r'.*(\\)\n') no_logging_re = re.compile(r'(import|from)\s+[(]?logging') def _translation_checks_not_enforced(filename): # Do not do these validations on tests return any(pat in filename for pat in ["/tests/", "rally-jobs/plugins/"]) @core.flake8ext def assert_true_instance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences O316 """ if assert_trueinst_re.match(logical_line): yield (0, "O316: assertTrue(isinstance(a, b)) sentences not allowed. " "Use assertIsInstance instead.") @core.flake8ext def assert_equal_or_not_none(logical_line): """Check for assertEqual(A, None) or assertEqual(None, A) sentences, assertNotEqual(A, None) or assertNotEqual(None, A) sentences O318 """ msg = ("O318: assertEqual/assertNotEqual(A, None) or " "assertEqual/assertNotEqual(None, A) sentences not allowed") res = (assert_equal_start_with_none_re.match(logical_line) or assert_equal_end_with_none_re.match(logical_line) or assert_not_equal_start_with_none_re.match(logical_line) or assert_not_equal_end_with_none_re.match(logical_line)) if res: yield (0, msg) @core.flake8ext def assert_equal_true_or_false(logical_line): """Check for assertEqual(True, A) or assertEqual(False, A) sentences O323 """ res = (assert_equal_with_true_re.search(logical_line) or assert_equal_with_false_re.search(logical_line)) if res: yield (0, "O323: assertEqual(True, A) or assertEqual(False, A) " "sentences not allowed") @core.flake8ext def no_mutable_default_args(logical_line): msg = "O324: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) @core.flake8ext def assert_equal_in(logical_line): """Check for assertEqual(A in B, True), assertEqual(True, A in B), assertEqual(A in B, False) or assertEqual(False, A in B) sentences O338 """ res = (assert_equal_in_start_with_true_or_false_re.search(logical_line) or assert_equal_in_end_with_true_or_false_re.search(logical_line)) if res: yield (0, "O338: Use assertIn/NotIn(A, B) rather than " "assertEqual(A in B, True/False) when checking collection " "contents.") @core.flake8ext def no_log_warn(logical_line): """Disallow 'LOG.warn(' O339 """ if logical_line.startswith('LOG.warn('): yield(0, "O339:Use LOG.warning() rather than LOG.warn()") @core.flake8ext def no_translate_logs(logical_line, filename): """O341 - Don't translate logs. Check for 'LOG.*(_(' and 'LOG.*(_Lx(' Translators don't provide translations for log messages, and operators asked not to translate them. * This check assumes that 'LOG' is a logger. :param logical_line: The logical line to check. :param filename: The file name where the logical line exists. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if _translation_checks_not_enforced(filename): return msg = "O341: Log messages should not be translated!" match = _log_translation_hint.match(logical_line) if match: yield (logical_line.index(match.group()), msg) @core.flake8ext def check_raised_localized_exceptions(logical_line, filename): """O342 - Untranslated exception message. :param logical_line: The logical line to check. :param filename: The file name where the logical line exists. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if _translation_checks_not_enforced(filename): return logical_line = logical_line.strip() raised_search = untranslated_exception_re.match(logical_line) if raised_search: exception_msg = raised_search.groups()[0] if exception_msg.startswith("\"") or exception_msg.startswith("\'"): msg = "O342: Untranslated exception message." yield (logical_line.index(exception_msg), msg) @core.flake8ext def check_no_eventlet_imports(logical_line): """O345 - Usage of Python eventlet module not allowed. :param logical_line: The logical line to check. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if no_eventlet_re.match(logical_line): msg = 'O345 Usage of Python eventlet module not allowed' yield logical_line.index('eventlet'), msg @core.flake8ext def check_line_continuation_no_backslash(logical_line, tokens): """O346 - Don't use backslashes for line continuation. :param logical_line: The logical line to check. Not actually used. :param tokens: List of tokens to check. :returns: None if the tokens don't contain any issues, otherwise a tuple is yielded that contains the offending index in the logical line and a message describe the check validation failure. """ backslash = None for token_type, text, start, end, orig_line in tokens: m = no_line_continuation_backslash_re.match(orig_line) if m: backslash = (start[0], m.start(1)) break if backslash is not None: msg = 'O346 Backslash line continuations not allowed' yield backslash, msg @core.flake8ext def revert_must_have_kwargs(logical_line): """O347 - Taskflow revert methods must have \\*\\*kwargs. :param logical_line: The logical line to check. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if revert_must_have_kwargs_re.match(logical_line): msg = 'O347 Taskflow revert methods must have **kwargs' yield 0, msg @core.flake8ext def check_no_logging_imports(logical_line): """O348 - Usage of Python logging module not allowed. :param logical_line: The logical line to check. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if no_logging_re.match(logical_line): msg = 'O348 Usage of Python logging module not allowed, use oslo_log' yield logical_line.index('logging'), msg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/i18n.py0000664000175000017500000000140300000000000015747 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n as i18n _translators = i18n.TranslatorFactory(domain='octavia') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/network/0000775000175000017500000000000000000000000016311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/__init__.py0000664000175000017500000000107400000000000020424 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/base.py0000664000175000017500000003135000000000000017577 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from octavia.common import exceptions class NetworkException(exceptions.OctaviaException): pass class PlugVIPException(NetworkException): pass class UnplugVIPException(NetworkException): pass class AllocateVIPException(NetworkException): pass class DeallocateVIPException(NetworkException): pass class PlugNetworkException(NetworkException): pass class UnplugNetworkException(NetworkException): pass class VIPInUseException(NetworkException): pass class PortNotFound(NetworkException): pass class NetworkNotFound(NetworkException): pass class SubnetNotFound(NetworkException): pass class AmphoraNotFound(NetworkException): pass class PluggedVIPNotFound(NetworkException): pass class TimeoutException(NetworkException): pass class QosPolicyNotFound(NetworkException): pass class SecurityGroupNotFound(NetworkException): pass class CreatePortException(NetworkException): pass class AbstractNetworkDriver(object, metaclass=abc.ABCMeta): """This class defines the methods for a fully functional network driver. Implementations of this interface can expect a rollback to occur if any of the non-nullipotent methods raise an exception. """ @abc.abstractmethod def allocate_vip(self, load_balancer): """Allocates a virtual ip. Reserves it for later use as the frontend connection of a load balancer. :param load_balancer: octavia.common.data_models.LoadBalancer instance :return: octavia.common.data_models.VIP :raises: AllocateVIPException, PortNotFound, SubnetNotFound """ @abc.abstractmethod def create_port(self, network_id, name=None, fixed_ips=(), secondary_ips=(), security_group_ids=(), admin_state_up=True, qos_policy_id=None): """Creates a network port. fixed_ips = [{'subnet_id': , ('ip_address': ')},] ip_address is optional in the fixed_ips dictionary. :param network_id: The network the port should be created on. :param name: The name to apply to the port. :param fixed_ips: A list of fixed IP dicts. :param secondary_ips: A list of secondary IPs to add to the port. :param security_group_ids: A list of security group IDs for the port. :param qos_policy_id: The QoS policy ID to apply to the port. :returns port: A port data model object. """ @abc.abstractmethod def deallocate_vip(self, vip): """Removes any resources that reserved this virtual ip. :param vip: octavia.common.data_models.VIP instance :return: None :raises: DeallocateVIPException, VIPInUseException, VIPConfiigurationNotFound """ @abc.abstractmethod def delete_port(self, port_id): """Delete a network port. :param port_id: The port ID to delete. :returns: None """ @abc.abstractmethod def plug_vip(self, load_balancer, vip): """Plugs a virtual ip as the frontend connection of a load balancer. Sets up the routing of traffic from the vip to the load balancer and its amphorae. :param load_balancer: octavia.common.data_models.LoadBalancer instance :param vip: octavia.common.data_models.VIP instance :return: dict consisting of amphora_id as key and bind_ip as value. bind_ip is the ip that the amphora should listen on to receive traffic to load balance. :raises: PlugVIPException, PortNotFound """ @abc.abstractmethod def unplug_vip(self, load_balancer, vip): """Unplugs a virtual ip as the frontend connection of a load balancer. Removes the routing of traffic from the vip to the load balancer and its amphorae. :param load_balancer: octavia.common.data_models.LoadBalancer instance :param vip: octavia.common.data_models.VIP instance :return: octavia.common.data_models.VIP instance :raises: UnplugVIPException, PluggedVIPNotFound """ @abc.abstractmethod def plug_network(self, compute_id, network_id, ip_address=None): """Connects an existing amphora to an existing network. :param compute_id: id of an amphora in the compute service :param network_id: id of a network :param ip_address: ip address to attempt to be assigned to interface :return: octavia.network.data_models.Interface instance :raises: PlugNetworkException, AmphoraNotFound, NetworkNotFound """ @abc.abstractmethod def unplug_network(self, compute_id, network_id, ip_address=None): """Disconnects an existing amphora from an existing network. If ip_address is not specificed, all the interfaces plugged on network_id should be unplugged. :param compute_id: id of an amphora in the compute service :param network_id: id of a network :param ip_address: specific ip_address to unplug :return: None :raises: UnplugNetworkException, AmphoraNotFound, NetworkNotFound, NetworkException """ @abc.abstractmethod def get_plugged_networks(self, compute_id): """Retrieves the current plugged networking configuration. :param compute_id: id of an amphora in the compute service :return: [octavia.network.data_models.Instance] """ def update_vip(self, load_balancer, for_delete): """Hook for the driver to update the VIP information. This method will be called upon the change of a load_balancer configuration. It is an optional method to be implemented by drivers. It allows the driver to update any VIP information based on the state of the passed in load_balancer. :param load_balancer: octavia.common.data_models.LoadBalancer instance :param for_delete: Boolean indicating if this update is for a delete :raises: MissingVIPSecurityGroup :return: None """ @abc.abstractmethod def get_network(self, network_id, context=None): """Retrieves network from network id. :param network_id: id of an network to retrieve :param context: A request context :return: octavia.network.data_models.Network :raises: NetworkException, NetworkNotFound """ @abc.abstractmethod def get_subnet(self, subnet_id, context=None): """Retrieves subnet from subnet id. :param subnet_id: id of a subnet to retrieve :param context: A request context :return: octavia.network.data_models.Subnet :raises: NetworkException, SubnetNotFound """ @abc.abstractmethod def get_port(self, port_id, context=None): """Retrieves port from port id. :param port_id: id of a port to retrieve :param context: A request context :return: octavia.network.data_models.Port :raises: NetworkException, PortNotFound """ @abc.abstractmethod def get_network_by_name(self, network_name): """Retrieves network from network name. :param network_name: name of a network to retrieve :return: octavia.network.data_models.Network :raises: NetworkException, NetworkNotFound """ @abc.abstractmethod def get_subnet_by_name(self, subnet_name): """Retrieves subnet from subnet name. :param subnet_name: name of a subnet to retrieve :return: octavia.network.data_models.Subnet :raises: NetworkException, SubnetNotFound """ @abc.abstractmethod def get_port_by_name(self, port_name): """Retrieves port from port name. :param port_name: name of a port to retrieve :return: octavia.network.data_models.Port :raises: NetworkException, PortNotFound """ @abc.abstractmethod def get_port_by_net_id_device_id(self, network_id, device_id): """Retrieves port from network id and device id. :param network_id: id of a network to filter by :param device_id: id of a network device to filter by :return: octavia.network.data_models.Port :raises: NetworkException, PortNotFound """ @abc.abstractmethod def get_security_group(self, sg_name): """Retrieves the security group by it's name. :param sg_name: The security group name. :return: octavia.network.data_models.SecurityGroup, None if not enabled :raises: NetworkException, SecurityGroupNotFound """ @abc.abstractmethod def failover_preparation(self, amphora): """Prepare an amphora for failover. :param amphora: amphora object to failover :return: None :raises: PortNotFound """ @abc.abstractmethod def plug_port(self, amphora, port): """Plug a neutron port in to a compute instance :param amphora: amphora object to plug the port into :param port: port to plug into the compute instance :return: None :raises: PlugNetworkException, AmphoraNotFound, NetworkNotFound """ @abc.abstractmethod def get_network_configs(self, load_balancer, amphora=None): """Retrieve network configurations This method assumes that a dictionary of AmphoraNetworkConfigs keyed off of the related amphora id are returned. The configs contain data pertaining to each amphora that is later used for finalization of the entire load balancer configuration. The data provided to these configs is left up to the driver, this means the driver is responsible for providing data that is appropriate for the amphora network configurations. Example return: {: } :param load_balancer: The load_balancer configuration :param amphora: Optional amphora to only query. :return: dict of octavia.network.data_models.AmphoraNetworkConfig keyed off of the amphora id the config is associated with. :raises: NotFound, NetworkNotFound, SubnetNotFound, PortNotFound """ @abc.abstractmethod def wait_for_port_detach(self, amphora): """Waits for the amphora ports device_id to be unset. This method waits for the ports on an amphora device_id parameter to be '' or None which signifies that nova has finished detaching the port from the instance. :param amphora: Amphora to wait for ports to detach. :returns: None :raises TimeoutException: Port did not detach in interval. :raises PortNotFound: Port was not found by neutron. """ @abc.abstractmethod def update_vip_sg(self, load_balancer, vip): """Updates the security group for a VIP :param load_balancer: Load Balancer to rpepare the VIP for :param vip: The VIP to plug """ @abc.abstractmethod def plug_aap_port(self, load_balancer, vip, amphora, subnet): """Plugs the AAP port to the amp :param load_balancer: Load Balancer to prepare the VIP for :param vip: The VIP to plug :param amphora: The amphora to plug the VIP into :param subnet: The subnet to plug the aap into """ @abc.abstractmethod def unplug_aap_port(self, vip, amphora, subnet): """Unplugs the AAP port to the amp :param vip: The VIP to plug :param amphora: The amphora to plug the VIP into :param subnet: The subnet to plug the aap into """ @abc.abstractmethod def qos_enabled(self): """Whether QoS is enabled :return: Boolean """ @abc.abstractmethod def get_network_ip_availability(self, network): """Retrieves network IP availability. :param network: octavia.network.data_models.Network :return: octavia.network.data_models.Network_IP_Availability :raises: NetworkException, NetworkNotFound """ @abc.abstractmethod def set_port_admin_state_up(self, port_id, state): """Set the admin state of a port. True is up, False is down. :param port_id: The port ID to update. :param state: True for up, False for down. :returns: None """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/data_models.py0000664000175000017500000001437100000000000021145 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common import data_models class Interface(data_models.BaseDataModel): def __init__(self, id=None, compute_id=None, network_id=None, fixed_ips=None, port_id=None): self.id = id self.compute_id = compute_id self.network_id = network_id self.port_id = port_id self.fixed_ips = fixed_ips class Delta(data_models.BaseDataModel): def __init__(self, amphora_id=None, compute_id=None, add_nics=None, delete_nics=None): self.compute_id = compute_id self.amphora_id = amphora_id self.add_nics = add_nics self.delete_nics = delete_nics class Network(data_models.BaseDataModel): def __init__(self, id=None, name=None, subnets=None, project_id=None, admin_state_up=None, mtu=None, provider_network_type=None, provider_physical_network=None, provider_segmentation_id=None, router_external=None, port_security_enabled=None): self.id = id self.name = name self.subnets = subnets self.project_id = project_id self.admin_state_up = admin_state_up self.provider_network_type = provider_network_type self.provider_physical_network = provider_physical_network self.provider_segmentation_id = provider_segmentation_id self.router_external = router_external self.mtu = mtu self.port_security_enabled = port_security_enabled class Subnet(data_models.BaseDataModel): def __init__(self, id=None, name=None, network_id=None, project_id=None, gateway_ip=None, cidr=None, ip_version=None, host_routes=None): self.id = id self.name = name self.network_id = network_id self.project_id = project_id self.gateway_ip = gateway_ip self.cidr = cidr self.ip_version = ip_version self.host_routes = host_routes class Port(data_models.BaseDataModel): def __init__(self, id=None, name=None, device_id=None, device_owner=None, mac_address=None, network_id=None, status=None, project_id=None, admin_state_up=None, fixed_ips=None, network=None, qos_policy_id=None, security_group_ids=None): self.id = id self.name = name self.device_id = device_id self.device_owner = device_owner self.mac_address = mac_address self.network_id = network_id self.status = status self.project_id = project_id self.admin_state_up = admin_state_up self.fixed_ips = fixed_ips or [] self.network = network self.qos_policy_id = qos_policy_id self.security_group_ids = security_group_ids or [] def get_subnet_id(self, fixed_ip_address): for fixed_ip in self.fixed_ips: if fixed_ip.ip_address == fixed_ip_address: return fixed_ip.subnet_id return None class FixedIP(data_models.BaseDataModel): def __init__(self, subnet_id=None, ip_address=None, subnet=None): self.subnet_id = subnet_id self.ip_address = ip_address self.subnet = subnet class FloatingIP(data_models.BaseDataModel): def __init__(self, id=None, description=None, project_id=None, status=None, router_id=None, port_id=None, floating_network_id=None, floating_ip_address=None, fixed_ip_address=None, fixed_port_id=None): self.id = id self.description = description self.project_id = project_id self.status = status self.router_id = router_id self.port_id = port_id self.floating_network_id = floating_network_id self.floating_ip_address = floating_ip_address self.fixed_ip_address = fixed_ip_address self.fixed_port_id = fixed_port_id # Need to provide this for compatibility in case it is used as a VIP self.network_id = floating_network_id class AmphoraNetworkConfig(data_models.BaseDataModel): def __init__(self, amphora=None, vip_subnet=None, vip_port=None, vrrp_subnet=None, vrrp_port=None, ha_subnet=None, ha_port=None): self.amphora = amphora self.vip_subnet = vip_subnet self.vip_port = vip_port self.vrrp_subnet = vrrp_subnet self.vrrp_port = vrrp_port self.ha_subnet = ha_subnet self.ha_port = ha_port class HostRoute(data_models.BaseDataModel): def __init__(self, nexthop=None, destination=None): self.nexthop = nexthop self.destination = destination class QosPolicy(data_models.BaseDataModel): def __init__(self, id): self.id = id class Network_IP_Availability(data_models.BaseDataModel): def __init__(self, network_id=None, tenant_id=None, project_id=None, network_name=None, total_ips=None, used_ips=None, subnet_ip_availability=None): self.network_id = network_id self.tenant_id = tenant_id self.project_id = project_id self.network_name = network_name self.total_ips = total_ips self.used_ips = used_ips self.subnet_ip_availability = subnet_ip_availability class SecurityGroup(data_models.BaseDataModel): def __init__(self, id=None, project_id=None, name=None, description=None, security_group_rule_ids=None, tags=None, stateful=None): self.id = id self.project_id = project_id self.name = name self.description = description self.security_group_rule_ids = security_group_rule_ids or [] self.tags = tags or [] self.stateful = stateful ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/network/drivers/0000775000175000017500000000000000000000000017767 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/drivers/__init__.py0000664000175000017500000000107400000000000022102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/network/drivers/neutron/0000775000175000017500000000000000000000000021461 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/drivers/neutron/__init__.py0000664000175000017500000000107400000000000023574 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/drivers/neutron/allowed_address_pairs.py0000664000175000017500000011616500000000000026377 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import time from neutronclient.common import exceptions as neutron_client_exceptions from novaclient import exceptions as nova_client_exceptions from oslo_config import cfg from oslo_log import log as logging from stevedore import driver as stevedore_driver from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.common import utils as common_utils from octavia.i18n import _ from octavia.network import base from octavia.network import data_models as n_data_models from octavia.network.drivers.neutron import base as neutron_base from octavia.network.drivers.neutron import utils LOG = logging.getLogger(__name__) AAP_EXT_ALIAS = 'allowed-address-pairs' PROJECT_ID_ALIAS = 'project-id' OCTAVIA_OWNER = 'Octavia' CONF = cfg.CONF class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver): def __init__(self): super(AllowedAddressPairsDriver, self).__init__() self._check_aap_loaded() self.compute = stevedore_driver.DriverManager( namespace='octavia.compute.drivers', name=CONF.controller_worker.compute_driver, invoke_on_load=True ).driver def _check_aap_loaded(self): if not self._check_extension_enabled(AAP_EXT_ALIAS): raise base.NetworkException( 'The {alias} extension is not enabled in neutron. This ' 'driver cannot be used with the {alias} extension ' 'disabled.'.format(alias=AAP_EXT_ALIAS)) def _get_interfaces_to_unplug(self, interfaces, network_id, ip_address=None): ret = [] for interface in interfaces: if interface.network_id == network_id: if ip_address: for fixed_ip in interface.fixed_ips: if ip_address == fixed_ip.ip_address: ret.append(interface) else: ret.append(interface) return ret def _get_plugged_interface(self, compute_id, network_id, lb_network_ip): interfaces = self.get_plugged_networks(compute_id) for interface in interfaces: is_correct_interface = interface.network_id == network_id for ip in interface.fixed_ips: if ip.ip_address == lb_network_ip: is_correct_interface = False if is_correct_interface: return interface return None def _plug_amphora_vip(self, amphora, subnet): # We need a vip port owned by Octavia for Act/Stby and failover try: port = {constants.PORT: { constants.NAME: 'octavia-lb-vrrp-' + amphora.id, constants.NETWORK_ID: subnet.network_id, constants.FIXED_IPS: [{'subnet_id': subnet.id}], constants.ADMIN_STATE_UP: True, constants.DEVICE_OWNER: OCTAVIA_OWNER}} new_port = self.neutron_client.create_port(port) new_port = utils.convert_port_dict_to_model(new_port) LOG.debug('Created vip port: %(port_id)s for amphora: %(amp)s', {'port_id': new_port.id, 'amp': amphora.id}) except Exception: message = _('Error creating the base (VRRP) port for the VIP with ' 'port details: {}').format(port) LOG.exception(message) raise base.PlugVIPException(message) try: interface = self.plug_port(amphora, new_port) except Exception: message = _('Error plugging amphora (compute_id: {compute_id}) ' 'into vip network {network_id}.').format( compute_id=amphora.compute_id, network_id=subnet.network_id) LOG.exception(message) try: if new_port: self.neutron_client.delete_port(new_port.id) LOG.debug('Deleted base (VRRP) port %s due to plug_port ' 'failure.', new_port.id) except Exception: LOG.exception('Failed to delete base (VRRP) port %s after ' 'plug_port failed. This resource is being ' 'abandoned and should be manually deleted when ' 'neutron is functional.', new_port.id) raise base.PlugVIPException(message) return interface def _add_vip_address_pair(self, port_id, vip_address): try: self._add_allowed_address_pair_to_port(port_id, vip_address) except neutron_client_exceptions.PortNotFoundClient as e: raise base.PortNotFound(str(e)) except Exception: message = _('Error adding allowed address pair {ip} ' 'to port {port_id}.').format(ip=vip_address, port_id=port_id) LOG.exception(message) raise base.PlugVIPException(message) def _get_lb_security_group(self, load_balancer_id): sec_grp_name = common_utils.get_vip_security_group_name( load_balancer_id) sec_grps = self.neutron_client.list_security_groups(name=sec_grp_name) if sec_grps and sec_grps.get(constants.SECURITY_GROUPS): return sec_grps.get(constants.SECURITY_GROUPS)[0] return None def _get_ethertype_for_ip(self, ip): address = ipaddress.ip_address(ip) return 'IPv6' if address.version == 6 else 'IPv4' def _update_security_group_rules(self, load_balancer, sec_grp_id): rules = self.neutron_client.list_security_group_rules( security_group_id=sec_grp_id) updated_ports = [] listener_peer_ports = [] for listener in load_balancer.listeners: if (listener.provisioning_status in [constants.PENDING_DELETE, constants.DELETED]): continue protocol = constants.PROTOCOL_TCP.lower() if listener.protocol == constants.PROTOCOL_UDP: protocol = constants.PROTOCOL_UDP.lower() if listener.allowed_cidrs: for ac in listener.allowed_cidrs: port = (listener.protocol_port, protocol, ac.cidr) updated_ports.append(port) else: port = (listener.protocol_port, protocol, None) updated_ports.append(port) listener_peer_ports.append(listener.peer_port) # As the peer port will hold the tcp connection for keepalived and # haproxy session synchronization, so here the security group rule # should be just related with tcp protocol only. To avoid adding # duplicate rules, peer_port info should be added if updated_ports # does not have the peer_port entry with allowed_cidr 0.0.0.0/0 tcp_lower = constants.PROTOCOL_TCP.lower() for peer_port in listener_peer_ports: if (peer_port, tcp_lower, "0.0.0.0/0") not in updated_ports: updated_ports.append((peer_port, tcp_lower, None)) # Just going to use port_range_max for now because we can assume that # port_range_max and min will be the same since this driver is # responsible for creating these rules old_ports = [] for rule in rules.get('security_group_rules', []): # Don't remove egress rules and don't confuse other protocols with # None ports with the egress rules. VRRP uses protocol 51 and 112 if (rule.get('direction') == 'egress' or rule.get('protocol').upper() not in [constants.PROTOCOL_TCP, constants.PROTOCOL_UDP]): continue old_ports.append((rule.get('port_range_max'), rule.get('protocol').lower(), rule.get('remote_ip_prefix'))) add_ports = set(updated_ports) - set(old_ports) del_ports = set(old_ports) - set(updated_ports) for rule in rules.get('security_group_rules', []): if (rule.get('protocol', '') and rule.get('protocol', '').lower() in ['tcp', 'udp'] and (rule.get('port_range_max'), rule.get('protocol'), rule.get('remote_ip_prefix')) in del_ports): rule_id = rule.get(constants.ID) try: self.neutron_client.delete_security_group_rule(rule_id) except neutron_client_exceptions.NotFound: LOG.info("Security group rule %s not found, will assume " "it is already deleted.", rule_id) ethertype = self._get_ethertype_for_ip(load_balancer.vip.ip_address) for port_protocol in add_ports: self._create_security_group_rule(sec_grp_id, port_protocol[1], port_min=port_protocol[0], port_max=port_protocol[0], ethertype=ethertype, cidr=port_protocol[2]) # Currently we are using the VIP network for VRRP # so we need to open up the protocols for it if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: try: self._create_security_group_rule( sec_grp_id, constants.VRRP_PROTOCOL_NUM, direction='ingress', ethertype=ethertype) except neutron_client_exceptions.Conflict: # It's ok if this rule already exists pass except Exception as e: raise base.PlugVIPException(str(e)) try: self._create_security_group_rule( sec_grp_id, constants.AUTH_HEADER_PROTOCOL_NUMBER, direction='ingress', ethertype=ethertype) except neutron_client_exceptions.Conflict: # It's ok if this rule already exists pass except Exception as e: raise base.PlugVIPException(str(e)) def _add_vip_security_group_to_port(self, load_balancer_id, port_id, sec_grp_id=None): sec_grp_id = (sec_grp_id or self._get_lb_security_group(load_balancer_id).get( constants.ID)) try: self._add_security_group_to_port(sec_grp_id, port_id) except base.PortNotFound: raise except base.NetworkException as e: raise base.PlugVIPException(str(e)) def _delete_vip_security_group(self, sec_grp): """Deletes a security group in neutron. Retries upon an exception because removing a security group from a neutron port does not happen immediately. """ attempts = 0 while attempts <= CONF.networking.max_retries: try: self.neutron_client.delete_security_group(sec_grp) LOG.info("Deleted security group %s", sec_grp) return except neutron_client_exceptions.NotFound: LOG.info("Security group %s not found, will assume it is " "already deleted", sec_grp) return except Exception: LOG.warning("Attempt %(attempt)s to remove security group " "%(sg)s failed.", {'attempt': attempts + 1, 'sg': sec_grp}) attempts += 1 time.sleep(CONF.networking.retry_interval) message = _("All attempts to remove security group {0} have " "failed.").format(sec_grp) LOG.exception(message) raise base.DeallocateVIPException(message) def _delete_security_group(self, vip, port): if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(vip.load_balancer.id) if sec_grp: sec_grp_id = sec_grp.get(constants.ID) LOG.info( "Removing security group %(sg)s from port %(port)s", {'sg': sec_grp_id, constants.PORT: vip.port_id}) raw_port = None try: if port: raw_port = self.neutron_client.show_port(port.id) except Exception: LOG.warning('Unable to get port information for port ' '%s. Continuing to delete the security ' 'group.', port.id) if raw_port: sec_grps = raw_port.get( constants.PORT, {}).get(constants.SECURITY_GROUPS, []) if sec_grp_id in sec_grps: sec_grps.remove(sec_grp_id) port_update = {constants.PORT: { constants.SECURITY_GROUPS: sec_grps}} try: self.neutron_client.update_port(port.id, port_update) except neutron_client_exceptions.PortNotFoundClient: LOG.warning('Unable to update port information ' 'for port %s. Continuing to delete ' 'the security group since port not ' 'found', port.id) try: self._delete_vip_security_group(sec_grp_id) except base.DeallocateVIPException: # Try to delete any leftover ports on this security group. # Because this security group is created and managed by us, # it *should* only return ports that we own / can delete. LOG.warning('Failed to delete security group on first ' 'pass: %s', sec_grp_id) extra_ports = self._get_ports_by_security_group(sec_grp_id) for extra_port in extra_ports: port_id = extra_port.get(constants.ID) try: LOG.warning('Deleting extra port %s on security ' 'group %s...', port_id, sec_grp_id) self.neutron_client.delete_port(port_id) except Exception: LOG.warning('Failed to delete extra port %s on ' 'security group %s.', port_id, sec_grp_id) # Now try it again self._delete_vip_security_group(sec_grp_id) def deallocate_vip(self, vip): """Delete the vrrp_port (instance port) in case nova didn't This can happen if a failover has occurred. """ for amphora in vip.load_balancer.amphorae: try: self.neutron_client.delete_port(amphora.vrrp_port_id) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): LOG.debug('VIP instance port %s already deleted. Skipping.', amphora.vrrp_port_id) try: port = self.get_port(vip.port_id) except base.PortNotFound: LOG.warning("Can't deallocate VIP because the vip port %s " "cannot be found in neutron. " "Continuing cleanup.", vip.port_id) port = None self._delete_security_group(vip, port) if port and port.device_owner == OCTAVIA_OWNER: try: self.neutron_client.delete_port(vip.port_id) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): LOG.debug('VIP port %s already deleted. Skipping.', vip.port_id) except Exception: message = _('Error deleting VIP port_id {port_id} from ' 'neutron').format(port_id=vip.port_id) LOG.exception(message) raise base.DeallocateVIPException(message) elif port: LOG.info("Port %s will not be deleted by Octavia as it was " "not created by Octavia.", vip.port_id) def update_vip_sg(self, load_balancer, vip): if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(load_balancer.id) if not sec_grp: sec_grp_name = common_utils.get_vip_security_group_name( load_balancer.id) sec_grp = self._create_security_group(sec_grp_name) self._update_security_group_rules(load_balancer, sec_grp.get(constants.ID)) self._add_vip_security_group_to_port(load_balancer.id, vip.port_id, sec_grp.get(constants.ID)) return sec_grp.get(constants.ID) return None def plug_aap_port(self, load_balancer, vip, amphora, subnet): interface = self._get_plugged_interface( amphora.compute_id, subnet.network_id, amphora.lb_network_ip) if not interface: interface = self._plug_amphora_vip(amphora, subnet) self._add_vip_address_pair(interface.port_id, vip.ip_address) if self.sec_grp_enabled: self._add_vip_security_group_to_port(load_balancer.id, interface.port_id) vrrp_ip = None for fixed_ip in interface.fixed_ips: is_correct_subnet = fixed_ip.subnet_id == subnet.id is_management_ip = fixed_ip.ip_address == amphora.lb_network_ip if is_correct_subnet and not is_management_ip: vrrp_ip = fixed_ip.ip_address break return data_models.Amphora( id=amphora.id, compute_id=amphora.compute_id, vrrp_ip=vrrp_ip, ha_ip=vip.ip_address, vrrp_port_id=interface.port_id, ha_port_id=vip.port_id) # todo (xgerman): Delete later def plug_vip(self, load_balancer, vip): self.update_vip_sg(load_balancer, vip) plugged_amphorae = [] subnet = self.get_subnet(vip.subnet_id) for amphora in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, load_balancer.amphorae): plugged_amphorae.append(self.plug_aap_port(load_balancer, vip, amphora, subnet)) return plugged_amphorae def _validate_fixed_ip(self, fixed_ips, subnet_id, ip_address): """Validate an IP address exists in a fixed_ips dict :param fixed_ips: A port fixed_ups dict :param subnet_id: The subnet that should contain the IP :param ip_address: The IP address to validate :returns: True if the ip address is in the dict, False if not """ for fixed_ip in fixed_ips: normalized_fixed_ip = ipaddress.ip_address( fixed_ip.ip_address).compressed normalized_ip = ipaddress.ip_address(ip_address).compressed if (fixed_ip.subnet_id == subnet_id and normalized_fixed_ip == normalized_ip): return True return False @staticmethod def _fixed_ips_to_list_of_dicts(fixed_ips): list_of_dicts = [] for fixed_ip in fixed_ips: list_of_dicts.append(fixed_ip.to_dict()) return list_of_dicts def allocate_vip(self, load_balancer): if load_balancer.vip.port_id: try: port = self.get_port(load_balancer.vip.port_id) fixed_ip_found = self._validate_fixed_ip( port.fixed_ips, load_balancer.vip.subnet_id, load_balancer.vip.ip_address) if (port.network_id == load_balancer.vip.network_id and fixed_ip_found): LOG.info('Port %s already exists. Nothing to be done.', load_balancer.vip.port_id) return self._port_to_vip(port, load_balancer) LOG.error('Neutron VIP mis-match. Expected ip %s on ' 'subnet %s in network %s. Neutron has fixed_ips %s ' 'in network %s. Deleting and recreating the VIP ' 'port.', load_balancer.vip.ip_address, load_balancer.vip.subnet_id, load_balancer.vip.network_id, self._fixed_ips_to_list_of_dicts(port.fixed_ips), port.network_id) if load_balancer.vip.octavia_owned: self.delete_port(load_balancer.vip.port_id) else: raise base.AllocateVIPException( 'VIP port {0} is broken, but is owned by project {1} ' 'so will not be recreated. Aborting VIP allocation.' .format(port.id, port.project_id)) except base.AllocateVIPException as e: # Catch this explicitly because otherwise we blame Neutron LOG.error(getattr(e, constants.MESSAGE, None)) raise except base.PortNotFound: LOG.warning('VIP port %s is missing from neutron. Rebuilding.', load_balancer.vip.port_id) except Exception as e: message = _('Neutron is failing to service requests due to: ' '{}. Aborting.').format(str(e)) LOG.error(message) raise base.AllocateVIPException( message, orig_msg=getattr(e, constants.MESSAGE, None), orig_code=getattr(e, constants.STATUS_CODE, None),) fixed_ip = {} if load_balancer.vip.subnet_id: fixed_ip['subnet_id'] = load_balancer.vip.subnet_id if load_balancer.vip.ip_address: fixed_ip[constants.IP_ADDRESS] = load_balancer.vip.ip_address # Make sure we are backward compatible with older neutron if self._check_extension_enabled(PROJECT_ID_ALIAS): project_id_key = 'project_id' else: project_id_key = 'tenant_id' # It can be assumed that network_id exists port = {constants.PORT: { constants.NAME: 'octavia-lb-' + load_balancer.id, constants.NETWORK_ID: load_balancer.vip.network_id, constants.ADMIN_STATE_UP: False, 'device_id': 'lb-{0}'.format(load_balancer.id), constants.DEVICE_OWNER: OCTAVIA_OWNER, project_id_key: load_balancer.project_id}} if fixed_ip: port[constants.PORT][constants.FIXED_IPS] = [fixed_ip] try: new_port = self.neutron_client.create_port(port) except Exception as e: message = _('Error creating neutron port on network ' '{network_id} due to {e}.').format( network_id=load_balancer.vip.network_id, e=str(e)) LOG.exception(message) raise base.AllocateVIPException( message, orig_msg=getattr(e, constants.MESSAGE, None), orig_code=getattr(e, constants.STATUS_CODE, None), ) new_port = utils.convert_port_dict_to_model(new_port) return self._port_to_vip(new_port, load_balancer, octavia_owned=True) def unplug_aap_port(self, vip, amphora, subnet): interface = self._get_plugged_interface( amphora.compute_id, subnet.network_id, amphora.lb_network_ip) if not interface: # Thought about raising PluggedVIPNotFound exception but # then that wouldn't evaluate all amphorae, so just continue LOG.debug('Cannot get amphora %s interface, skipped', amphora.compute_id) return try: self.unplug_network(amphora.compute_id, subnet.network_id) except Exception: pass try: aap_update = {constants.PORT: { constants.ALLOWED_ADDRESS_PAIRS: [] }} self.neutron_client.update_port(interface.port_id, aap_update) except Exception: message = _('Error unplugging VIP. Could not clear ' 'allowed address pairs from port ' '{port_id}.').format(port_id=vip.port_id) LOG.exception(message) raise base.UnplugVIPException(message) # Delete the VRRP port if we created it try: port = self.get_port(amphora.vrrp_port_id) if port.name.startswith('octavia-lb-vrrp-'): self.neutron_client.delete_port(amphora.vrrp_port_id) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): pass except Exception as e: LOG.error('Failed to delete port. Resources may still be in ' 'use for port: %(port)s due to error: %(except)s', {constants.PORT: amphora.vrrp_port_id, 'except': str(e)}) def unplug_vip(self, load_balancer, vip): try: subnet = self.get_subnet(vip.subnet_id) except base.SubnetNotFound: msg = ("Can't unplug vip because vip subnet {0} was not " "found").format(vip.subnet_id) LOG.exception(msg) raise base.PluggedVIPNotFound(msg) for amphora in filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, load_balancer.amphorae): self.unplug_aap_port(vip, amphora, subnet) def plug_network(self, compute_id, network_id, ip_address=None): try: interface = self.compute.attach_network_or_port( compute_id=compute_id, network_id=network_id, ip_address=ip_address) except exceptions.NotFound as e: if 'Instance' in str(e): raise base.AmphoraNotFound(str(e)) if 'Network' in str(e): raise base.NetworkNotFound(str(e)) raise base.PlugNetworkException(str(e)) except Exception: message = _('Error plugging amphora (compute_id: {compute_id}) ' 'into network {network_id}.').format( compute_id=compute_id, network_id=network_id) LOG.exception(message) raise base.PlugNetworkException(message) return self._nova_interface_to_octavia_interface(compute_id, interface) def unplug_network(self, compute_id, network_id, ip_address=None): interfaces = self.get_plugged_networks(compute_id) if not interfaces: msg = ('Amphora with compute id {compute_id} does not have any ' 'plugged networks').format(compute_id=compute_id) raise base.NetworkNotFound(msg) unpluggers = self._get_interfaces_to_unplug(interfaces, network_id, ip_address=ip_address) for index, unplugger in enumerate(unpluggers): self.compute.detach_port( compute_id=compute_id, port_id=unplugger.port_id) def update_vip(self, load_balancer, for_delete=False): sec_grp = self._get_lb_security_group(load_balancer.id) if sec_grp: self._update_security_group_rules(load_balancer, sec_grp.get(constants.ID)) elif not for_delete: raise exceptions.MissingVIPSecurityGroup(lb_id=load_balancer.id) else: LOG.warning('VIP security group missing when updating the VIP for ' 'delete on load balancer: %s. Skipping update ' 'because this is for delete.', load_balancer.id) def failover_preparation(self, amphora): if self.dns_integration_enabled: self._failover_preparation(amphora) def _failover_preparation(self, amphora): interfaces = self.get_plugged_networks(compute_id=amphora.compute_id) ports = [] for interface_ in interfaces: port = self.get_port(port_id=interface_.port_id) ips = port.fixed_ips lb_network = False for ip in ips: if ip.ip_address == amphora.lb_network_ip: lb_network = True if not lb_network: ports.append(port) for port in ports: try: self.neutron_client.update_port( port.id, {constants.PORT: {'dns_name': ''}}) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): raise base.PortNotFound() def plug_port(self, amphora, port): try: interface = self.compute.attach_network_or_port( compute_id=amphora.compute_id, network_id=None, ip_address=None, port_id=port.id) plugged_interface = self._nova_interface_to_octavia_interface( amphora.compute_id, interface) except exceptions.NotFound as e: if 'Instance' in str(e): raise base.AmphoraNotFound(str(e)) if 'Network' in str(e): raise base.NetworkNotFound(str(e)) raise base.PlugNetworkException(str(e)) except nova_client_exceptions.Conflict: LOG.info('Port %(portid)s is already plugged, ' 'skipping', {'portid': port.id}) plugged_interface = n_data_models.Interface( compute_id=amphora.compute_id, network_id=port.network_id, port_id=port.id, fixed_ips=port.fixed_ips) except Exception: message = _('Error plugging amphora (compute_id: ' '{compute_id}) into port ' '{port_id}.').format( compute_id=amphora.compute_id, port_id=port.id) LOG.exception(message) raise base.PlugNetworkException(message) return plugged_interface def _get_amp_net_configs(self, amp, amp_configs, vip_subnet, vip_port): if amp.status != constants.DELETED: LOG.debug("Retrieving network details for amphora %s", amp.id) vrrp_port = self.get_port(amp.vrrp_port_id) vrrp_subnet = self.get_subnet( vrrp_port.get_subnet_id(amp.vrrp_ip)) vrrp_port.network = self.get_network(vrrp_port.network_id) ha_port = self.get_port(amp.ha_port_id) ha_subnet = self.get_subnet( ha_port.get_subnet_id(amp.ha_ip)) amp_configs[amp.id] = n_data_models.AmphoraNetworkConfig( amphora=amp, vip_subnet=vip_subnet, vip_port=vip_port, vrrp_subnet=vrrp_subnet, vrrp_port=vrrp_port, ha_subnet=ha_subnet, ha_port=ha_port ) def get_network_configs(self, loadbalancer, amphora=None): vip_subnet = self.get_subnet(loadbalancer.vip.subnet_id) vip_port = self.get_port(loadbalancer.vip.port_id) amp_configs = {} if amphora: self._get_amp_net_configs(amphora, amp_configs, vip_subnet, vip_port) else: for amp in loadbalancer.amphorae: try: self._get_amp_net_configs(amp, amp_configs, vip_subnet, vip_port) except Exception as e: LOG.warning('Getting network configurations for amphora ' '%(amp)s failed due to %(err)s.', {'amp': amp.id, 'err': str(e)}) return amp_configs # TODO(johnsom) This may be dead code now. Remove in failover for v2 patch def wait_for_port_detach(self, amphora): """Waits for the amphora ports device_id to be unset. This method waits for the ports on an amphora device_id parameter to be '' or None which signifies that nova has finished detaching the port from the instance. :param amphora: Amphora to wait for ports to detach. :returns: None :raises TimeoutException: Port did not detach in interval. :raises PortNotFound: Port was not found by neutron. """ interfaces = self.get_plugged_networks(compute_id=amphora.compute_id) ports = [] port_detach_timeout = CONF.networking.port_detach_timeout for interface_ in interfaces: port = self.get_port(port_id=interface_.port_id) ips = port.fixed_ips lb_network = False for ip in ips: if ip.ip_address == amphora.lb_network_ip: lb_network = True if not lb_network: ports.append(port) for port in ports: try: neutron_port = self.neutron_client.show_port( port.id).get(constants.PORT) device_id = neutron_port['device_id'] start = int(time.time()) while device_id: time.sleep(CONF.networking.retry_interval) neutron_port = self.neutron_client.show_port( port.id).get(constants.PORT) device_id = neutron_port['device_id'] timed_out = int(time.time()) - start >= port_detach_timeout if device_id and timed_out: message = ('Port %s failed to detach (device_id %s) ' 'within the required time (%s s).' % (port.id, device_id, port_detach_timeout)) raise base.TimeoutException(message) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): pass def delete_port(self, port_id): """delete a neutron port. :param port_id: The port ID to delete. :returns: None """ try: self.neutron_client.delete_port(port_id) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): LOG.debug('VIP instance port %s already deleted. Skipping.', port_id) except Exception as e: raise exceptions.NetworkServiceError(net_error=str(e)) def set_port_admin_state_up(self, port_id, state): """Set the admin state of a port. True is up, False is down. :param port_id: The port ID to update. :param state: True for up, False for down. :returns: None """ try: self.neutron_client.update_port( port_id, {constants.PORT: {constants.ADMIN_STATE_UP: state}}) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient) as e: raise base.PortNotFound(str(e)) except Exception as e: raise exceptions.NetworkServiceError(net_error=str(e)) def create_port(self, network_id, name=None, fixed_ips=(), secondary_ips=(), security_group_ids=(), admin_state_up=True, qos_policy_id=None): """Creates a network port. fixed_ips = [{'subnet_id': , ('ip_addrss': ')},] ip_address is optional in the fixed_ips dictionary. :param network_id: The network the port should be created on. :param name: The name to apply to the port. :param fixed_ips: A list of fixed IP dicts. :param secondary_ips: A list of secondary IPs to add to the port. :param security_group_ids: A list of security group IDs for the port. :param qos_policy_id: The QoS policy ID to apply to the port. :returns port: A port data model object. """ try: aap_list = [] for ip in secondary_ips: aap_list.append({constants.IP_ADDRESS: ip}) port = {constants.NETWORK_ID: network_id, constants.ADMIN_STATE_UP: admin_state_up, constants.DEVICE_OWNER: OCTAVIA_OWNER} if aap_list: port[constants.ALLOWED_ADDRESS_PAIRS] = aap_list if fixed_ips: port[constants.FIXED_IPS] = fixed_ips if name: port[constants.NAME] = name if qos_policy_id: port[constants.QOS_POLICY_ID] = qos_policy_id if security_group_ids: port[constants.SECURITY_GROUPS] = security_group_ids new_port = self.neutron_client.create_port({constants.PORT: port}) LOG.debug('Created port: %(port)s', {constants.PORT: new_port}) return utils.convert_port_dict_to_model(new_port) except Exception as e: message = _('Error creating a port on network ' '{network_id} due to {error}.').format( network_id=network_id, error=str(e)) LOG.exception(message) raise base.CreatePortException(message) def get_security_group(self, sg_name): """Retrieves the security group by it's name. :param sg_name: The security group name. :return: octavia.network.data_models.SecurityGroup, None if not enabled :raises: NetworkException, SecurityGroupNotFound """ try: if self.sec_grp_enabled and sg_name: sec_grps = self.neutron_client.list_security_groups( name=sg_name) if sec_grps and sec_grps.get(constants.SECURITY_GROUPS): sg_dict = sec_grps.get(constants.SECURITY_GROUPS)[0] return utils.convert_security_group_dict_to_model(sg_dict) message = _('Security group {name} not found.').format( name=sg_name) raise base.SecurityGroupNotFound(message) return None except base.SecurityGroupNotFound: raise except Exception as e: message = _('Error when getting security group {name} due to ' '{error}').format(name=sg_name, error=str(e)) LOG.exception(message) raise base.NetworkException(message) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/drivers/neutron/base.py0000664000175000017500000002743700000000000022762 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import exceptions as neutron_client_exceptions from oslo_config import cfg from oslo_log import log as logging from octavia.common import clients from octavia.common import data_models from octavia.i18n import _ from octavia.network import base from octavia.network import data_models as network_models from octavia.network.drivers.neutron import utils LOG = logging.getLogger(__name__) DNS_INT_EXT_ALIAS = 'dns-integration' SEC_GRP_EXT_ALIAS = 'security-group' QOS_EXT_ALIAS = 'qos' CONF = cfg.CONF class BaseNeutronDriver(base.AbstractNetworkDriver): def __init__(self): self.neutron_client = clients.NeutronAuth.get_neutron_client( endpoint=CONF.neutron.endpoint, region=CONF.neutron.region_name, endpoint_type=CONF.neutron.endpoint_type, service_name=CONF.neutron.service_name, insecure=CONF.neutron.insecure, ca_cert=CONF.neutron.ca_certificates_file ) self._check_extension_cache = {} self.sec_grp_enabled = self._check_extension_enabled(SEC_GRP_EXT_ALIAS) self.dns_integration_enabled = self._check_extension_enabled( DNS_INT_EXT_ALIAS) self._qos_enabled = self._check_extension_enabled(QOS_EXT_ALIAS) self.project_id = self.neutron_client.get_auth_info().get( 'auth_tenant_id') def _check_extension_enabled(self, extension_alias): if extension_alias in self._check_extension_cache: status = self._check_extension_cache[extension_alias] LOG.debug('Neutron extension %(ext)s cached as %(status)s', { 'ext': extension_alias, 'status': 'enabled' if status else 'disabled' }) else: try: self.neutron_client.show_extension(extension_alias) LOG.debug('Neutron extension %(ext)s found enabled', {'ext': extension_alias}) self._check_extension_cache[extension_alias] = True except neutron_client_exceptions.NotFound: LOG.debug('Neutron extension %(ext)s is not enabled', {'ext': extension_alias}) self._check_extension_cache[extension_alias] = False return self._check_extension_cache[extension_alias] def _port_to_vip(self, port, load_balancer, octavia_owned=False): fixed_ip = None for port_fixed_ip in port.fixed_ips: if port_fixed_ip.subnet_id == load_balancer.vip.subnet_id: fixed_ip = port_fixed_ip break if fixed_ip: return data_models.Vip(ip_address=fixed_ip.ip_address, subnet_id=fixed_ip.subnet_id, network_id=port.network_id, port_id=port.id, load_balancer=load_balancer, load_balancer_id=load_balancer.id, octavia_owned=octavia_owned) return data_models.Vip(ip_address=None, subnet_id=None, network_id=port.network_id, port_id=port.id, load_balancer=load_balancer, load_balancer_id=load_balancer.id, octavia_owned=octavia_owned) def _nova_interface_to_octavia_interface(self, compute_id, nova_interface): fixed_ips = [utils.convert_fixed_ip_dict_to_model(fixed_ip) for fixed_ip in nova_interface.fixed_ips] return network_models.Interface(compute_id=compute_id, network_id=nova_interface.net_id, port_id=nova_interface.port_id, fixed_ips=fixed_ips) def _port_to_octavia_interface(self, compute_id, port): fixed_ips = [utils.convert_fixed_ip_dict_to_model(fixed_ip) for fixed_ip in port.get('fixed_ips', [])] return network_models.Interface(compute_id=compute_id, network_id=port['network_id'], port_id=port['id'], fixed_ips=fixed_ips) def _add_allowed_address_pair_to_port(self, port_id, ip_address): aap = { 'port': { 'allowed_address_pairs': [ {'ip_address': ip_address} ] } } self.neutron_client.update_port(port_id, aap) def _add_security_group_to_port(self, sec_grp_id, port_id): port_update = {'port': {'security_groups': [sec_grp_id]}} # Note: Neutron accepts the SG even if it already exists try: self.neutron_client.update_port(port_id, port_update) except neutron_client_exceptions.PortNotFoundClient as e: raise base.PortNotFound(str(e)) except Exception as e: raise base.NetworkException(str(e)) def _get_ports_by_security_group(self, sec_grp_id): all_ports = self.neutron_client.list_ports(project_id=self.project_id) filtered_ports = [] for port in all_ports.get('ports', []): if sec_grp_id in port.get('security_groups', []): filtered_ports.append(port) return filtered_ports def _create_security_group(self, name): new_sec_grp = {'security_group': {'name': name}} sec_grp = self.neutron_client.create_security_group(new_sec_grp) return sec_grp['security_group'] def _create_security_group_rule(self, sec_grp_id, protocol, direction='ingress', port_min=None, port_max=None, ethertype='IPv6', cidr=None): rule = { 'security_group_rule': { 'security_group_id': sec_grp_id, 'direction': direction, 'protocol': protocol, 'port_range_min': port_min, 'port_range_max': port_max, 'ethertype': ethertype, 'remote_ip_prefix': cidr, } } self.neutron_client.create_security_group_rule(rule) def apply_qos_on_port(self, qos_id, port_id): body = { 'port': {'qos_policy_id': qos_id} } try: self.neutron_client.update_port(port_id, body) except neutron_client_exceptions.PortNotFoundClient as e: raise base.PortNotFound(str(e)) except Exception as e: raise base.NetworkException(str(e)) def get_plugged_networks(self, compute_id): # List neutron ports associated with the Amphora try: ports = self.neutron_client.list_ports(device_id=compute_id) except Exception: LOG.debug('Error retrieving plugged networks for compute ' 'device %s.', compute_id) ports = {'ports': []} return [self._port_to_octavia_interface( compute_id, port) for port in ports['ports']] def _get_resource(self, resource_type, resource_id, context=None): neutron_client = self.neutron_client if context and not CONF.networking.allow_invisible_resource_usage: neutron_client = clients.NeutronAuth.get_user_neutron_client( context) try: resource = getattr(neutron_client, 'show_%s' % resource_type)(resource_id) return getattr(utils, 'convert_%s_dict_to_model' % resource_type)(resource) except neutron_client_exceptions.NotFound: message = _('{resource_type} not found ' '({resource_type} id: {resource_id}).').format( resource_type=resource_type, resource_id=resource_id) raise getattr(base, '%sNotFound' % ''.join( [w.capitalize() for w in resource_type.split('_')]))(message) except Exception: message = _('Error retrieving {resource_type} ' '({resource_type} id: {resource_id}.').format( resource_type=resource_type, resource_id=resource_id) LOG.exception(message) raise base.NetworkException(message) def _get_resources_by_filters(self, resource_type, unique_item=False, **filters): """Retrieves item(s) from filters. By default, a list is returned. If unique_item set to True, only the first resource is returned. """ try: resource = getattr(self.neutron_client, 'list_%ss' % resource_type)(**filters) conversion_function = getattr( utils, 'convert_%s_dict_to_model' % resource_type) if not resource['%ss' % resource_type]: # no items found raise neutron_client_exceptions.NotFound() if unique_item: return conversion_function(resource['%ss' % resource_type][0]) return list(map(conversion_function, resource['%ss' % resource_type])) except neutron_client_exceptions.NotFound: message = _('{resource_type} not found ' '({resource_type} Filters: {filters}.').format( resource_type=resource_type, filters=filters) raise getattr(base, '%sNotFound' % ''.join( [w.capitalize() for w in resource_type.split('_')]))(message) except Exception: message = _('Error retrieving {resource_type} ' '({resource_type} Filters: {filters}.').format( resource_type=resource_type, filters=filters) LOG.exception(message) raise base.NetworkException(message) def get_network(self, network_id, context=None): return self._get_resource('network', network_id, context=context) def get_subnet(self, subnet_id, context=None): return self._get_resource('subnet', subnet_id, context=context) def get_port(self, port_id, context=None): return self._get_resource('port', port_id, context=context) def get_network_by_name(self, network_name): return self._get_resources_by_filters( 'network', unique_item=True, name=network_name) def get_subnet_by_name(self, subnet_name): return self._get_resources_by_filters( 'subnet', unique_item=True, name=subnet_name) def get_port_by_name(self, port_name): return self._get_resources_by_filters( 'port', unique_item=True, name=port_name) def get_port_by_net_id_device_id(self, network_id, device_id): return self._get_resources_by_filters( 'port', unique_item=True, network_id=network_id, device_id=device_id) def get_qos_policy(self, qos_policy_id): return self._get_resource('qos_policy', qos_policy_id) def qos_enabled(self): return self._qos_enabled def get_network_ip_availability(self, network): return self._get_resource('network_ip_availability', network.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/drivers/neutron/utils.py0000664000175000017500000001271300000000000023177 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common import constants from octavia.network import data_models as network_models def convert_subnet_dict_to_model(subnet_dict): subnet = subnet_dict.get('subnet', subnet_dict) subnet_hrs = subnet.get('host_routes', []) host_routes = [network_models.HostRoute(nexthop=hr.get('nexthop'), destination=hr.get('destination')) for hr in subnet_hrs] return network_models.Subnet(id=subnet.get(constants.ID), name=subnet.get(constants.NAME), network_id=subnet.get('network_id'), project_id=subnet.get(constants.TENANT_ID), gateway_ip=subnet.get('gateway_ip'), cidr=subnet.get('cidr'), ip_version=subnet.get('ip_version'), host_routes=host_routes ) def convert_port_dict_to_model(port_dict): port = port_dict.get('port', port_dict) fixed_ips = [network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'), ip_address=fixed_ip.get('ip_address')) for fixed_ip in port.get('fixed_ips', [])] return network_models.Port( id=port.get(constants.ID), name=port.get(constants.NAME), device_id=port.get('device_id'), device_owner=port.get('device_owner'), mac_address=port.get('mac_address'), network_id=port.get('network_id'), status=port.get('status'), project_id=port.get(constants.TENANT_ID), admin_state_up=port.get('admin_state_up'), fixed_ips=fixed_ips, qos_policy_id=port.get('qos_policy_id'), security_group_ids=port.get(constants.SECURITY_GROUPS, []) ) def convert_network_dict_to_model(network_dict): nw = network_dict.get('network', network_dict) return network_models.Network( id=nw.get(constants.ID), name=nw.get(constants.NAME), subnets=nw.get('subnets'), project_id=nw.get(constants.TENANT_ID), admin_state_up=nw.get('admin_state_up'), mtu=nw.get('mtu'), provider_network_type=nw.get('provider:network_type'), provider_physical_network=nw.get('provider:physical_network'), provider_segmentation_id=nw.get('provider:segmentation_id'), router_external=nw.get('router:external'), port_security_enabled=nw.get('port_security_enabled') ) def convert_fixed_ip_dict_to_model(fixed_ip_dict): fixed_ip = fixed_ip_dict.get('fixed_ip', fixed_ip_dict) return network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'), ip_address=fixed_ip.get('ip_address')) def convert_qos_policy_dict_to_model(qos_policy_dict): qos_policy = qos_policy_dict.get('policy', qos_policy_dict) return network_models.QosPolicy(id=qos_policy.get(constants.ID)) # We can't use "floating_ip" because we need to match the neutron client method def convert_floatingip_dict_to_model(floating_ip_dict): floating_ip = floating_ip_dict.get('floatingip', floating_ip_dict) return network_models.FloatingIP( id=floating_ip.get(constants.ID), description=floating_ip.get(constants.DESCRIPTION), project_id=floating_ip.get(constants.PROJECT_ID, floating_ip.get(constants.TENANT_ID)), status=floating_ip.get('status'), router_id=floating_ip.get('router_id'), port_id=floating_ip.get('port_id'), floating_network_id=floating_ip.get('floating_network_id'), floating_ip_address=floating_ip.get('floating_ip_address'), fixed_ip_address=floating_ip.get('fixed_ip_address'), fixed_port_id=floating_ip.get('fixed_port_id') ) def convert_network_ip_availability_dict_to_model( network_ip_availability_dict): nw_ip_avail = network_ip_availability_dict.get( 'network_ip_availability', network_ip_availability_dict) ip_avail = network_models.Network_IP_Availability.from_dict(nw_ip_avail) ip_avail.subnet_ip_availability = nw_ip_avail.get('subnet_ip_availability') return ip_avail def convert_security_group_dict_to_model(security_group_dict): sg_rule_ids = [rule.get(constants.ID) for rule in security_group_dict.get(constants.SECURITY_GROUP_RULES, [])] return network_models.SecurityGroup( id=security_group_dict.get(constants.ID), project_id=security_group_dict.get( constants.PROJECT_ID, security_group_dict.get(constants.TENANT_ID)), name=security_group_dict.get(constants.NAME), description=security_group_dict.get(constants.DESCRIPTION), security_group_rule_ids=sg_rule_ids, tags=security_group_dict.get(constants.TAGS, []), stateful=security_group_dict.get('stateful')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4022167 octavia-6.2.2/octavia/network/drivers/noop_driver/0000775000175000017500000000000000000000000022315 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/drivers/noop_driver/__init__.py0000664000175000017500000000107400000000000024430 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/network/drivers/noop_driver/driver.py0000664000175000017500000004561500000000000024175 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils from octavia.common import data_models from octavia.network import base as driver_base from octavia.network import data_models as network_models LOG = logging.getLogger(__name__) class NoopManager(object): def __init__(self): super(NoopManager, self).__init__() self.networkconfigconfig = {} self._qos_extension_enabled = True def allocate_vip(self, loadbalancer): LOG.debug("Network %s no-op, allocate_vip loadbalancer %s", self.__class__.__name__, loadbalancer) self.networkconfigconfig[loadbalancer.id] = ( loadbalancer, 'allocate_vip') subnet_id = uuidutils.generate_uuid() network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() ip_address = '198.51.100.1' if loadbalancer.vip: subnet_id = loadbalancer.vip.subnet_id or subnet_id network_id = loadbalancer.vip.network_id or network_id port_id = loadbalancer.vip.port_id or port_id ip_address = loadbalancer.vip.ip_address or ip_address return data_models.Vip(ip_address=ip_address, subnet_id=subnet_id, network_id=network_id, port_id=port_id, load_balancer_id=loadbalancer.id) def deallocate_vip(self, vip): LOG.debug("Network %s no-op, deallocate_vip vip %s", self.__class__.__name__, vip.ip_address) self.networkconfigconfig[vip.ip_address] = (vip, 'deallocate_vip') def plug_vip(self, loadbalancer, vip): LOG.debug("Network %s no-op, plug_vip loadbalancer %s, vip %s", self.__class__.__name__, loadbalancer.id, vip.ip_address) self.update_vip_sg(loadbalancer, vip) amps = [] for amphora in loadbalancer.amphorae: amps.append(self.plug_aap_port(loadbalancer, vip, amphora, None)) self.networkconfigconfig[(loadbalancer.id, vip.ip_address)] = (loadbalancer, vip, 'plug_vip') return amps def update_vip_sg(self, load_balancer, vip): LOG.debug("Network %s no-op, update_vip_sg loadbalancer %s, vip %s", self.__class__.__name__, load_balancer.id, vip.ip_address) self.networkconfigconfig[(load_balancer.id, vip.ip_address)] = (load_balancer, vip, 'update_vip_sg') def plug_aap_port(self, load_balancer, vip, amphora, subnet): LOG.debug("Network %s no-op, plug_aap_port loadbalancer %s, vip %s," " amphora %s, subnet %s", self.__class__.__name__, load_balancer.id, vip.ip_address, amphora, subnet) self.networkconfigconfig[(amphora.id, vip.ip_address)] = ( load_balancer, vip, amphora, subnet, 'plug_aap_port') return data_models.Amphora( id=amphora.id, compute_id=amphora.compute_id, vrrp_ip='198.51.100.1', ha_ip='198.51.100.1', vrrp_port_id=uuidutils.generate_uuid(), ha_port_id=uuidutils.generate_uuid() ) def unplug_vip(self, loadbalancer, vip): LOG.debug("Network %s no-op, unplug_vip loadbalancer %s, vip %s", self.__class__.__name__, loadbalancer.id, vip.ip_address) self.networkconfigconfig[(loadbalancer.id, vip.ip_address)] = (loadbalancer, vip, 'unplug_vip') def unplug_aap_port(self, vip, amphora, subnet): LOG.debug("Network %s no-op, unplug_aap_port vip %s amp: %s " "subnet: %s", self.__class__.__name__, vip.ip_address, amphora.id, subnet.id) self.networkconfigconfig[(amphora.id, vip.ip_address)] = (vip, amphora, subnet, 'unplug_aap_port') def plug_network(self, compute_id, network_id, ip_address=None): LOG.debug("Network %s no-op, plug_network compute_id %s, network_id " "%s, ip_address %s", self.__class__.__name__, compute_id, network_id, ip_address) self.networkconfigconfig[(compute_id, network_id, ip_address)] = ( compute_id, network_id, ip_address, 'plug_network') return network_models.Interface( id=uuidutils.generate_uuid(), compute_id=compute_id, network_id=network_id, fixed_ips=[], port_id=uuidutils.generate_uuid() ) def unplug_network(self, compute_id, network_id, ip_address=None): LOG.debug("Network %s no-op, unplug_network compute_id %s, " "network_id %s", self.__class__.__name__, compute_id, network_id) self.networkconfigconfig[(compute_id, network_id, ip_address)] = ( compute_id, network_id, ip_address, 'unplug_network') def get_plugged_networks(self, compute_id): LOG.debug("Network %s no-op, get_plugged_networks amphora_id %s", self.__class__.__name__, compute_id) self.networkconfigconfig[compute_id] = ( compute_id, 'get_plugged_networks') return [] def update_vip(self, loadbalancer, for_delete=False): LOG.debug("Network %s no-op, update_vip loadbalancer %s " "with for delete %s", self.__class__.__name__, loadbalancer, for_delete) self.networkconfigconfig[loadbalancer.id] = ( loadbalancer, for_delete, 'update_vip') def get_network(self, network_id): LOG.debug("Network %s no-op, get_network network_id %s", self.__class__.__name__, network_id) self.networkconfigconfig[network_id] = (network_id, 'get_network') network = network_models.Network(id=uuidutils.generate_uuid(), port_security_enabled=True) class ItIsInsideMe(network_models.Subnet): def to_dict(self, **kwargs): return [{}] def __contains__(self, item): return True def __iter__(self): yield uuidutils.generate_uuid() network.subnets = ItIsInsideMe() return network def get_subnet(self, subnet_id): LOG.debug("Subnet %s no-op, get_subnet subnet_id %s", self.__class__.__name__, subnet_id) self.networkconfigconfig[subnet_id] = (subnet_id, 'get_subnet') return network_models.Subnet(id=uuidutils.generate_uuid()) def get_port(self, port_id): LOG.debug("Port %s no-op, get_port port_id %s", self.__class__.__name__, port_id) self.networkconfigconfig[port_id] = (port_id, 'get_port') return network_models.Port(id=uuidutils.generate_uuid()) def get_network_by_name(self, network_name): LOG.debug("Network %s no-op, get_network_by_name network_name %s", self.__class__.__name__, network_name) self.networkconfigconfig[network_name] = (network_name, 'get_network_by_name') return network_models.Network(id=uuidutils.generate_uuid(), port_security_enabled=True) def get_subnet_by_name(self, subnet_name): LOG.debug("Subnet %s no-op, get_subnet_by_name subnet_name %s", self.__class__.__name__, subnet_name) self.networkconfigconfig[subnet_name] = (subnet_name, 'get_subnet_by_name') return network_models.Subnet(id=uuidutils.generate_uuid()) def get_port_by_name(self, port_name): LOG.debug("Port %s no-op, get_port_by_name port_name %s", self.__class__.__name__, port_name) self.networkconfigconfig[port_name] = (port_name, 'get_port_by_name') return network_models.Port(id=uuidutils.generate_uuid()) def get_port_by_net_id_device_id(self, network_id, device_id): LOG.debug("Port %s no-op, get_port_by_net_id_device_id network_id %s" " device_id %s", self.__class__.__name__, network_id, device_id) self.networkconfigconfig[(network_id, device_id)] = ( network_id, device_id, 'get_port_by_net_id_device_id') return network_models.Port(id=uuidutils.generate_uuid()) def get_security_group(self, sg_name): LOG.debug("Network %s no-op, get_security_group name %s", self.__class__.__name__, sg_name) self.networkconfigconfig[(sg_name)] = (sg_name, 'get_security_group') return network_models.SecurityGroup(id=uuidutils.generate_uuid()) def failover_preparation(self, amphora): LOG.debug("failover %s no-op, failover_preparation, amphora id %s", self.__class__.__name__, amphora.id) def plug_port(self, amphora, port): LOG.debug("Network %s no-op, plug_port amphora.id %s, port_id " "%s", self.__class__.__name__, amphora.id, port.id) self.networkconfigconfig[(amphora.id, port.id)] = ( amphora, port, 'plug_port') def _get_amp_net_configs(self, amp, amp_configs, vip_subnet, vip_port): vrrp_port = self.get_port(amp.vrrp_port_id) ha_port = self.get_port(amp.ha_port_id) amp_configs[amp.id] = network_models.AmphoraNetworkConfig( amphora=amp, vrrp_subnet=self.get_subnet( vrrp_port.get_subnet_id(amp.vrrp_ip)), vrrp_port=vrrp_port, ha_subnet=self.get_subnet( ha_port.get_subnet_id(amp.ha_ip)), ha_port=ha_port) def get_network_configs(self, loadbalancer, amphora=None): amphora_id = amphora.id if amphora else None LOG.debug("Network %s no-op, get_network_configs loadbalancer id " "%s amphora id: %s", self.__class__.__name__, loadbalancer.id, amphora_id) self.networkconfigconfig[(loadbalancer.id)] = ( loadbalancer, 'get_network_configs') vip_subnet = self.get_subnet(loadbalancer.vip.subnet_id) vip_port = self.get_port(loadbalancer.vip.port_id) amp_configs = {} if amphora: self._get_amp_net_configs(amphora, amp_configs, vip_subnet, vip_port) else: for amp in loadbalancer.amphorae: self._get_amp_net_configs(amp, amp_configs, vip_subnet, vip_port) return amp_configs def wait_for_port_detach(self, amphora): LOG.debug("failover %s no-op, wait_for_port_detach, amphora id %s", self.__class__.__name__, amphora.id) def get_qos_policy(self, qos_policy_id): LOG.debug("Qos Policy %s no-op, get_qos_policy qos_policy_id %s", self.__class__.__name__, qos_policy_id) self.networkconfigconfig[qos_policy_id] = (qos_policy_id, 'get_qos_policy') return qos_policy_id def apply_qos_on_port(self, qos_id, port_id): LOG.debug("Network %s no-op, apply_qos_on_port qos_id %s, port_id " "%s", self.__class__.__name__, qos_id, port_id) self.networkconfigconfig[(qos_id, port_id)] = ( qos_id, port_id, 'apply_qos_on_port') def qos_enabled(self): return self._qos_extension_enabled def get_network_ip_availability(self, network): LOG.debug("Network %s no-op, network_ip_availability network_id %s", self.__class__.__name__, network.id) self.networkconfigconfig[(network.id, 'ip_availability')] = ( network.id, 'get_network_ip_availability') ip_avail = network_models.Network_IP_Availability( network_id=network.id) subnet_ip_availability = [] network.subnets = list(network.subnets) for subnet_id in network.subnets: subnet_ip_availability.append({'subnet_id': subnet_id, 'used_ips': 0, 'total_ips': 254}) ip_avail.subnet_ip_availability = subnet_ip_availability return ip_avail def delete_port(self, port_id): LOG.debug("Network %s no-op, delete_port port_id %s", self.__class__.__name__, port_id) self.networkconfigconfig[port_id] = (port_id, 'delete_port') def set_port_admin_state_up(self, port_id, state): LOG.debug("Network %s no-op, set_port_admin_state_up port_id %s, " "state %s", self.__class__.__name__, port_id, state) self.networkconfigconfig[(port_id, state)] = (port_id, state, 'admin_down_port') def create_port(self, network_id, name=None, fixed_ips=(), secondary_ips=(), security_group_ids=(), admin_state_up=True, qos_policy_id=None): LOG.debug("Network %s no-op, create_port network_id %s", self.__class__.__name__, network_id) if not name: name = 'no-op-port' port_id = uuidutils.generate_uuid() project_id = uuidutils.generate_uuid() fixed_ip_obj_list = [] for fixed_ip in fixed_ips: if fixed_ip and not fixed_ip.get('ip_address'): fixed_ip_obj_list.append( network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'), ip_address='198.51.100.56')) else: fixed_ip_obj_list.append( network_models.FixedIP( subnet_id=fixed_ip.get('subnet_id'), ip_address=fixed_ip.get('ip_address'))) if not fixed_ip_obj_list: fixed_ip_obj_list = [network_models.FixedIP( subnet_id=uuidutils.generate_uuid(), ip_address='198.51.100.56')] self.networkconfigconfig[(network_id, 'create_port')] = ( network_id, name, fixed_ip_obj_list, secondary_ips, security_group_ids, admin_state_up, qos_policy_id) return network_models.Port( id=port_id, name=name, device_id='no-op-device-id', device_owner='Octavia', mac_address='00:00:5E:00:53:05', network_id=network_id, status='UP', project_id=project_id, admin_state_up=admin_state_up, fixed_ips=fixed_ip_obj_list, qos_policy_id=qos_policy_id, security_group_ids=security_group_ids) class NoopNetworkDriver(driver_base.AbstractNetworkDriver): def __init__(self): super(NoopNetworkDriver, self).__init__() self.driver = NoopManager() def allocate_vip(self, loadbalancer): return self.driver.allocate_vip(loadbalancer) def deallocate_vip(self, vip): self.driver.deallocate_vip(vip) def plug_vip(self, loadbalancer, vip): return self.driver.plug_vip(loadbalancer, vip) def unplug_vip(self, loadbalancer, vip): self.driver.unplug_vip(loadbalancer, vip) def plug_network(self, amphora_id, network_id, ip_address=None): return self.driver.plug_network(amphora_id, network_id, ip_address) def unplug_network(self, amphora_id, network_id, ip_address=None): self.driver.unplug_network(amphora_id, network_id, ip_address=ip_address) def get_plugged_networks(self, amphora_id): return self.driver.get_plugged_networks(amphora_id) def update_vip(self, loadbalancer, for_delete=False): self.driver.update_vip(loadbalancer, for_delete) def get_network(self, network_id, context=None): return self.driver.get_network(network_id) def get_subnet(self, subnet_id, context=None): return self.driver.get_subnet(subnet_id) def get_port(self, port_id, context=None): return self.driver.get_port(port_id) def get_qos_policy(self, qos_policy_id): return self.driver.get_qos_policy(qos_policy_id) def get_network_by_name(self, network_name): return self.driver.get_network_by_name(network_name) def get_subnet_by_name(self, subnet_name): return self.driver.get_subnet_by_name(subnet_name) def get_port_by_name(self, port_name): return self.driver.get_port_by_name(port_name) def get_port_by_net_id_device_id(self, network_id, device_id): return self.driver.get_port_by_net_id_device_id(network_id, device_id) def get_security_group(self, sg_name): return self.driver.get_security_group(sg_name) def failover_preparation(self, amphora): self.driver.failover_preparation(amphora) def plug_port(self, amphora, port): return self.driver.plug_port(amphora, port) def get_network_configs(self, loadbalancer, amphora=None): return self.driver.get_network_configs(loadbalancer, amphora) def wait_for_port_detach(self, amphora): self.driver.wait_for_port_detach(amphora) def apply_qos_on_port(self, qos_id, port_id): self.driver.apply_qos_on_port(qos_id, port_id) def update_vip_sg(self, load_balancer, vip): self.driver.update_vip_sg(load_balancer, vip) def plug_aap_port(self, load_balancer, vip, amphora, subnet): return self.driver.plug_aap_port(load_balancer, vip, amphora, subnet) def unplug_aap_port(self, vip, amphora, subnet): self.driver.unplug_aap_port(vip, amphora, subnet) def qos_enabled(self): return self.driver.qos_enabled() def get_network_ip_availability(self, network): return self.driver.get_network_ip_availability(network) def delete_port(self, port_id): self.driver.delete_port(port_id) def set_port_admin_state_up(self, port_id, state): self.driver.set_port_admin_state_up(port_id, state) def create_port(self, network_id, name=None, fixed_ips=(), secondary_ips=(), security_group_ids=(), admin_state_up=True, qos_policy_id=None): return self.driver.create_port( network_id, name, fixed_ips, secondary_ips, security_group_ids, admin_state_up, qos_policy_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/opts.py0000664000175000017500000000546500000000000016171 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import itertools import operator from keystoneauth1 import loading as ks_loading from oslo_config import cfg import octavia.certificates.common.local import octavia.common.config from octavia.common import constants def list_opts(): return [ ('DEFAULT', itertools.chain(octavia.common.config.core_opts)), ('api_settings', octavia.common.config.api_opts), ('amphora_agent', octavia.common.config.amphora_agent_opts), ('compute', octavia.common.config.compute_opts), ('networking', octavia.common.config.networking_opts), ('oslo_messaging', octavia.common.config.oslo_messaging_opts), ('haproxy_amphora', octavia.common.config.haproxy_amphora_opts), ('health_manager', octavia.common.config.healthmanager_opts), ('controller_worker', octavia.common.config.controller_worker_opts), ('task_flow', octavia.common.config.task_flow_opts), ('certificates', itertools.chain( octavia.common.config.certificate_opts, octavia.certificates.common.local.certgen_opts)), ('house_keeping', octavia.common.config.house_keeping_opts), ('keepalived_vrrp', octavia.common.config.keepalived_vrrp_opts), ('nova', octavia.common.config.nova_opts), ('neutron', octavia.common.config.neutron_opts), ('glance', octavia.common.config.glance_opts), ('quotas', octavia.common.config.quota_opts), ('audit', octavia.common.config.audit_opts), ('driver_agent', octavia.common.config.driver_agent_opts), add_auth_opts(), ] def add_auth_opts(): opts = ks_loading.register_session_conf_options( cfg.CONF, constants.SERVICE_AUTH) opt_list = copy.deepcopy(opts) opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0]) # NOTE(mhickey): There are a lot of auth plugins, we just generate # the config options for a few common ones plugins = ['password', 'v2password', 'v3password'] for name in plugins: for plugin_option in ks_loading.get_auth_plugin_conf_options(name): if all(option.name != plugin_option.name for option in opt_list): opt_list.append(plugin_option) opt_list.sort(key=operator.attrgetter('name')) return (constants.SERVICE_AUTH, opt_list) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/policies/0000775000175000017500000000000000000000000016427 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/__init__.py0000664000175000017500000000357300000000000020550 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from octavia.policies import amphora from octavia.policies import availability_zone from octavia.policies import availability_zone_profile from octavia.policies import base from octavia.policies import flavor from octavia.policies import flavor_profile from octavia.policies import healthmonitor from octavia.policies import l7policy from octavia.policies import l7rule from octavia.policies import listener from octavia.policies import loadbalancer from octavia.policies import member from octavia.policies import pool from octavia.policies import provider from octavia.policies import provider_availability_zone from octavia.policies import provider_flavor from octavia.policies import quota def list_rules(): return itertools.chain( base.list_rules(), flavor.list_rules(), flavor_profile.list_rules(), availability_zone.list_rules(), availability_zone_profile.list_rules(), healthmonitor.list_rules(), l7policy.list_rules(), l7rule.list_rules(), listener.list_rules(), loadbalancer.list_rules(), member.list_rules(), pool.list_rules(), provider.list_rules(), quota.list_rules(), amphora.list_rules(), provider_flavor.list_rules(), provider_availability_zone.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/amphora.py0000664000175000017500000000446600000000000020442 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AMPHORA, action=constants.RBAC_GET_ALL), constants.RULE_API_ADMIN, "List Amphorae", [{'method': 'GET', 'path': '/v2/octavia/amphorae'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AMPHORA, action=constants.RBAC_GET_ONE), constants.RULE_API_ADMIN, "Show Amphora details", [{'method': 'GET', 'path': '/v2/octavia/amphorae/{amphora_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AMPHORA, action=constants.RBAC_PUT_CONFIG), constants.RULE_API_ADMIN, "Update Amphora Agent Configuration", [{'method': 'PUT', 'path': '/v2/octavia/amphorae/{amphora_id}/config'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AMPHORA, action=constants.RBAC_PUT_FAILOVER), constants.RULE_API_ADMIN, "Failover Amphora", [{'method': 'PUT', 'path': '/v2/octavia/amphorae/{amphora_id}/failover'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AMPHORA, action=constants.RBAC_GET_STATS), constants.RULE_API_ADMIN, "Show Amphora statistics", [{'method': 'GET', 'path': '/v2/octavia/amphorae/{amphora_id}/stats'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/availability_zone.py0000664000175000017500000000464200000000000022514 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AVAILABILITY_ZONE, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List Availability Zones", [{'method': 'GET', 'path': '/v2.0/lbaas/availabilityzones'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AVAILABILITY_ZONE, action=constants.RBAC_POST), constants.RULE_API_ADMIN, "Create an Availability Zone", [{'method': 'POST', 'path': '/v2.0/lbaas/availabilityzones'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AVAILABILITY_ZONE, action=constants.RBAC_PUT), constants.RULE_API_ADMIN, "Update an Availability Zone", [{'method': 'PUT', 'path': '/v2.0/lbaas/availabilityzones/{availability_zone_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AVAILABILITY_ZONE, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show Availability Zone details", [{'method': 'GET', 'path': '/v2.0/lbaas/availabilityzones/{availability_zone_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_AVAILABILITY_ZONE, action=constants.RBAC_DELETE), constants.RULE_API_ADMIN, "Remove an Availability Zone", [{'method': 'DELETE', 'path': '/v2.0/lbaas/availabilityzones/{availability_zone_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/availability_zone_profile.py0000664000175000017500000000501700000000000024231 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format( rbac_obj=constants.RBAC_AVAILABILITY_ZONE_PROFILE, action=constants.RBAC_GET_ALL), constants.RULE_API_ADMIN, "List Availability Zones", [{'method': 'GET', 'path': '/v2.0/lbaas/availabilityzoneprofiles'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format( rbac_obj=constants.RBAC_AVAILABILITY_ZONE_PROFILE, action=constants.RBAC_POST), constants.RULE_API_ADMIN, "Create an Availability Zone", [{'method': 'POST', 'path': '/v2.0/lbaas/availabilityzoneprofiles'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format( rbac_obj=constants.RBAC_AVAILABILITY_ZONE_PROFILE, action=constants.RBAC_PUT), constants.RULE_API_ADMIN, "Update an Availability Zone", [{'method': 'PUT', 'path': '/v2.0/lbaas/availabilityzoneprofiles/' '{availability_zone_profile_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format( rbac_obj=constants.RBAC_AVAILABILITY_ZONE_PROFILE, action=constants.RBAC_GET_ONE), constants.RULE_API_ADMIN, "Show Availability Zone details", [{'method': 'GET', 'path': '/v2.0/lbaas/availabilityzoneprofiles/' '{availability_zone_profile_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format( rbac_obj=constants.RBAC_AVAILABILITY_ZONE_PROFILE, action=constants.RBAC_DELETE), constants.RULE_API_ADMIN, "Remove an Availability Zone", [{'method': 'DELETE', 'path': '/v2.0/lbaas/availabilityzoneprofiles/' '{availability_zone_profile_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/base.py0000664000175000017500000000742600000000000017724 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy rules = [ # The default is to not allow access unless the auth_strategy is 'noauth'. # Users must be a member of one of the following roles to have access to # the load-balancer API: # # role:load-balancer_observer # User has access to load-balancer read-only APIs # role:load-balancer_global_observer # User has access to load-balancer read-only APIs including resources # owned by others. # role:load-balancer_member # User has access to load-balancer read and write APIs # role:load-balancer_admin # User is considered an admin for all load-balnacer APIs including # resources owned by others. # role:admin # User is admin to all APIs policy.RuleDefault('context_is_admin', 'role:admin or role:load-balancer_admin'), # Note: 'is_admin:True' is a policy rule that takes into account the # auth_strategy == noauth configuration setting. # It is equivalent to 'rule:context_is_admin or {auth_strategy == noauth}' policy.RuleDefault('load-balancer:owner', 'project_id:%(project_id)s'), # API access roles policy.RuleDefault('load-balancer:admin', 'is_admin:True or ' 'role:admin or ' 'role:load-balancer_admin'), policy.RuleDefault('load-balancer:observer_and_owner', 'role:load-balancer_observer and ' 'rule:load-balancer:owner'), policy.RuleDefault('load-balancer:global_observer', 'role:load-balancer_global_observer'), policy.RuleDefault('load-balancer:member_and_owner', 'role:load-balancer_member and ' 'rule:load-balancer:owner'), # API access methods policy.RuleDefault('load-balancer:read', 'rule:load-balancer:observer_and_owner or ' 'rule:load-balancer:global_observer or ' 'rule:load-balancer:member_and_owner or ' 'rule:load-balancer:admin'), policy.RuleDefault('load-balancer:read-global', 'rule:load-balancer:global_observer or ' 'rule:load-balancer:admin'), policy.RuleDefault('load-balancer:write', 'rule:load-balancer:member_and_owner or ' 'rule:load-balancer:admin'), policy.RuleDefault('load-balancer:read-quota', 'rule:load-balancer:observer_and_owner or ' 'rule:load-balancer:global_observer or ' 'rule:load-balancer:member_and_owner or ' 'role:load-balancer_quota_admin or ' 'rule:load-balancer:admin'), policy.RuleDefault('load-balancer:read-quota-global', 'rule:load-balancer:global_observer or ' 'role:load-balancer_quota_admin or ' 'rule:load-balancer:admin'), policy.RuleDefault('load-balancer:write-quota', 'role:load-balancer_quota_admin or ' 'rule:load-balancer:admin'), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/flavor.py0000664000175000017500000000433300000000000020275 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc.. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List Flavors", [{'method': 'GET', 'path': '/v2.0/lbaas/flavors'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR, action=constants.RBAC_POST), constants.RULE_API_ADMIN, "Create a Flavor", [{'method': 'POST', 'path': '/v2.0/lbaas/flavors'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR, action=constants.RBAC_PUT), constants.RULE_API_ADMIN, "Update a Flavor", [{'method': 'PUT', 'path': '/v2.0/lbaas/flavors/{flavor_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show Flavor details", [{'method': 'GET', 'path': '/v2.0/lbaas/flavors/{flavor_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR, action=constants.RBAC_DELETE), constants.RULE_API_ADMIN, "Remove a Flavor", [{'method': 'DELETE', 'path': '/v2.0/lbaas/flavors/{flavor_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/flavor_profile.py0000664000175000017500000000456200000000000022021 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc.. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR_PROFILE, action=constants.RBAC_GET_ALL), constants.RULE_API_ADMIN, "List Flavor Profiles", [{'method': 'GET', 'path': '/v2.0/lbaas/flavorprofiles'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR_PROFILE, action=constants.RBAC_POST), constants.RULE_API_ADMIN, "Create a Flavor Profile", [{'method': 'POST', 'path': '/v2.0/lbaas/flavorprofiles'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR_PROFILE, action=constants.RBAC_PUT), constants.RULE_API_ADMIN, "Update a Flavor Profile", [{'method': 'PUT', 'path': '/v2.0/lbaas/flavorprofiles/{flavor_profile_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR_PROFILE, action=constants.RBAC_GET_ONE), constants.RULE_API_ADMIN, "Show Flavor Profile details", [{'method': 'GET', 'path': '/v2.0/lbaas/flavorprofiles/{flavor_profile_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_FLAVOR_PROFILE, action=constants.RBAC_DELETE), constants.RULE_API_ADMIN, "Remove a Flavor Profile", [{'method': 'DELETE', 'path': '/v2.0/lbaas/flavorprofiles/{flavor_profile_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/healthmonitor.py0000664000175000017500000000532000000000000021656 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_HEALTHMONITOR, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List Health Monitors of a Pool", [{'method': 'GET', 'path': '/v2/lbaas/healthmonitors'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_HEALTHMONITOR, action=constants.RBAC_GET_ALL_GLOBAL), constants.RULE_API_READ_GLOBAL, "List Health Monitors including resources owned by others", [{'method': 'GET', 'path': '/v2/lbaas/healthmonitors'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_HEALTHMONITOR, action=constants.RBAC_POST), constants.RULE_API_WRITE, "Create a Health Monitor", [{'method': 'POST', 'path': '/v2/lbaas/healthmonitors'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_HEALTHMONITOR, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show Health Monitor details", [{'method': 'GET', 'path': '/v2/lbaas/healthmonitors/{healthmonitor_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_HEALTHMONITOR, action=constants.RBAC_PUT), constants.RULE_API_WRITE, "Update a Health Monitor", [{'method': 'PUT', 'path': '/v2/lbaas/healthmonitors/{healthmonitor_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_HEALTHMONITOR, action=constants.RBAC_DELETE), constants.RULE_API_WRITE, "Remove a Health Monitor", [{'method': 'DELETE', 'path': '/v2/lbaas/healthmonitors/{healthmonitor_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/l7policy.py0000664000175000017500000000514300000000000020546 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7POLICY, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List L7 Policys", [{'method': 'GET', 'path': '/v2/lbaas/l7policies'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7POLICY, action=constants.RBAC_GET_ALL_GLOBAL), constants.RULE_API_READ_GLOBAL, "List L7 Policys including resources owned by others", [{'method': 'GET', 'path': '/v2/lbaas/l7policies'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7POLICY, action=constants.RBAC_POST), constants.RULE_API_WRITE, "Create a L7 Policy", [{'method': 'POST', 'path': '/v2/lbaas/l7policies'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7POLICY, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show L7 Policy details", [{'method': 'GET', 'path': '/v2/lbaas/l7policies/{l7policy_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7POLICY, action=constants.RBAC_PUT), constants.RULE_API_WRITE, "Update a L7 Policy", [{'method': 'PUT', 'path': '/v2/lbaas/l7policies/{l7policy_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7POLICY, action=constants.RBAC_DELETE), constants.RULE_API_WRITE, "Remove a L7 Policy", [{'method': 'DELETE', 'path': '/v2/lbaas/l7policies/{l7policy_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/l7rule.py0000664000175000017500000000454400000000000020222 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7RULE, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List L7 Rules", [{'method': 'GET', 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7RULE, action=constants.RBAC_POST), constants.RULE_API_WRITE, "Create a L7 Rule", [{'method': 'POST', 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7RULE, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show L7 Rule details", [{'method': 'GET', 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7RULE, action=constants.RBAC_PUT), constants.RULE_API_WRITE, "Update a L7 Rule", [{'method': 'PUT', 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_L7RULE, action=constants.RBAC_DELETE), constants.RULE_API_WRITE, "Remove a L7 Rule", [{'method': 'DELETE', 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/listener.py0000664000175000017500000000565200000000000020636 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LISTENER, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List Listeners", [{'method': 'GET', 'path': '/v2/lbaas/listeners'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LISTENER, action=constants.RBAC_GET_ALL_GLOBAL), constants.RULE_API_READ_GLOBAL, "List Listeners including resources owned by others", [{'method': 'GET', 'path': '/v2/lbaas/listeners'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LISTENER, action=constants.RBAC_POST), constants.RULE_API_WRITE, "Create a Listener", [{'method': 'POST', 'path': '/v2/lbaas/listeners'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LISTENER, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show Listener details", [{'method': 'GET', 'path': '/v2/lbaas/listeners/{listener_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LISTENER, action=constants.RBAC_PUT), constants.RULE_API_WRITE, "Update a Listener", [{'method': 'PUT', 'path': '/v2/lbaas/listeners/{listener_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LISTENER, action=constants.RBAC_DELETE), constants.RULE_API_WRITE, "Remove a Listener", [{'method': 'DELETE', 'path': '/v2/lbaas/listeners/{listener_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LISTENER, action=constants.RBAC_GET_STATS), constants.RULE_API_READ, "Show Listener statistics", [{'method': 'GET', 'path': '/v2/lbaas/listeners/{listener_id}/stats'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/loadbalancer.py0000664000175000017500000000733500000000000021420 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List Load Balancers", [{'method': 'GET', 'path': '/v2/lbaas/loadbalancers'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_GET_ALL_GLOBAL), constants.RULE_API_READ_GLOBAL, "List Load Balancers including resources owned by others", [{'method': 'GET', 'path': '/v2/lbaas/loadbalancers'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_POST), constants.RULE_API_WRITE, "Create a Load Balancer", [{'method': 'POST', 'path': '/v2/lbaas/loadbalancers'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show Load Balancer details", [{'method': 'GET', 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_PUT), constants.RULE_API_WRITE, "Update a Load Balancer", [{'method': 'PUT', 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_DELETE), constants.RULE_API_WRITE, "Remove a Load Balancer", [{'method': 'DELETE', 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_GET_STATS), constants.RULE_API_READ, "Show Load Balancer statistics", [{'method': 'GET', 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}/stats'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_GET_STATUS), constants.RULE_API_READ, "Show Load Balancer status", [{'method': 'GET', 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}/status'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_LOADBALANCER, action=constants.RBAC_PUT_FAILOVER), constants.RULE_API_ADMIN, "Failover a Load Balancer", [{'method': 'PUT', 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}/failover'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/member.py0000664000175000017500000000446200000000000020256 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_MEMBER, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List Members of a Pool", [{'method': 'GET', 'path': '/v2/lbaas/pools/{pool_id}/members'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_MEMBER, action=constants.RBAC_POST), constants.RULE_API_WRITE, "Create a Member", [{'method': 'POST', 'path': '/v2/lbaas/pools/{pool_id}/members'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_MEMBER, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show Member details", [{'method': 'GET', 'path': '/v2/lbaas/pools/{pool_id}/members/{member_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_MEMBER, action=constants.RBAC_PUT), constants.RULE_API_WRITE, "Update a Member", [{'method': 'PUT', 'path': '/v2/lbaas/pools/{pool_id}/members/{member_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_MEMBER, action=constants.RBAC_DELETE), constants.RULE_API_WRITE, "Remove a Member", [{'method': 'DELETE', 'path': '/v2/lbaas/pools/{pool_id}/members/{member_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/pool.py0000664000175000017500000000500300000000000017750 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_POOL, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List Pools", [{'method': 'GET', 'path': '/v2/lbaas/pools'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_POOL, action=constants.RBAC_GET_ALL_GLOBAL), constants.RULE_API_READ_GLOBAL, "List Pools including resources owned by others", [{'method': 'GET', 'path': '/v2/lbaas/pools'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_POOL, action=constants.RBAC_POST), constants.RULE_API_WRITE, "Create a Pool", [{'method': 'POST', 'path': '/v2/lbaas/pools'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_POOL, action=constants.RBAC_GET_ONE), constants.RULE_API_READ, "Show Pool details", [{'method': 'GET', 'path': '/v2/lbaas/pools/{pool_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_POOL, action=constants.RBAC_PUT), constants.RULE_API_WRITE, "Update a Pool", [{'method': 'PUT', 'path': '/v2/lbaas/pools/{pool_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_POOL, action=constants.RBAC_DELETE), constants.RULE_API_WRITE, "Remove a Pool", [{'method': 'DELETE', 'path': '/v2/lbaas/pools/{pool_id}'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/provider.py0000664000175000017500000000201400000000000020630 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_PROVIDER, action=constants.RBAC_GET_ALL), constants.RULE_API_READ, "List enabled providers", [{'method': 'GET', 'path': '/v2/lbaas/providers'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/provider_availability_zone.py0000664000175000017500000000217000000000000024420 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format( rbac_obj=constants.RBAC_PROVIDER_AVAILABILITY_ZONE, action=constants.RBAC_GET_ALL), constants.RULE_API_ADMIN, "List the provider availability zone capabilities.", [{'method': 'GET', 'path': '/v2/lbaas/providers/{provider}/' 'availability_zone_capabilities'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/provider_flavor.py0000664000175000017500000000211500000000000022203 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_PROVIDER_FLAVOR, action=constants.RBAC_GET_ALL), constants.RULE_API_ADMIN, "List the provider flavor capabilities.", [{'method': 'GET', 'path': '/v2/lbaas/providers/{provider}/flavor_capabilities'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/policies/quota.py0000664000175000017500000000517000000000000020135 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from octavia.common import constants rules = [ policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_QUOTA, action=constants.RBAC_GET_ALL), constants.RULE_API_READ_QUOTA, "List Quotas", [{'method': 'GET', 'path': '/v2/lbaas/quotas'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_QUOTA, action=constants.RBAC_GET_ALL_GLOBAL), constants.RULE_API_READ_QUOTA_GLOBAL, "List Quotas including resources owned by others", [{'method': 'GET', 'path': '/v2/lbaas/quotas'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_QUOTA, action=constants.RBAC_GET_ONE), constants.RULE_API_READ_QUOTA, "Show Quota details", [{'method': 'GET', 'path': '/v2/lbaas/quotas/{project_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_QUOTA, action=constants.RBAC_PUT), constants.RULE_API_WRITE_QUOTA, "Update a Quota", [{'method': 'PUT', 'path': '/v2/lbaas/quotas/{project_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_QUOTA, action=constants.RBAC_DELETE), constants.RULE_API_WRITE_QUOTA, "Reset a Quota", [{'method': 'DELETE', 'path': '/v2/lbaas/quotas/{project_id}'}] ), policy.DocumentedRuleDefault( '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_QUOTA, action=constants.RBAC_GET_DEFAULTS), constants.RULE_API_READ_QUOTA, "Show Default Quota for a Project", [{'method': 'GET', 'path': '/v2/lbaas/quotas/{project_id}/default'}] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/0000775000175000017500000000000000000000000015762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/__init__.py0000664000175000017500000000107400000000000020075 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/common/0000775000175000017500000000000000000000000017252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/common/__init__.py0000664000175000017500000000107400000000000021365 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/common/constants.py0000664000175000017500000002772200000000000021652 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common import constants class MockNovaInterface(object): net_id = None port_id = None fixed_ips = [] MOCK_NETWORK_ID = 'mock-network-1' MOCK_NETWORK_ID2 = 'mock-network-2' MOCK_NETWORK_NAME = 'TestNet1' MOCK_SUBNET_ID = 'mock-subnet-1' MOCK_SUBNET_ID2 = 'mock-subnet-2' MOCK_SUBNET_NAME = 'TestSubnet1' MOCK_PORT_ID = 'mock-port-1' MOCK_PORT_ID2 = 'mock-port-2' MOCK_PORT_NAME = 'TestPort1' MOCK_PORT_NAME2 = 'TestPort2' MOCK_COMPUTE_ID = 'mock-compute-1' MOCK_IP_ADDRESS = '10.0.0.1' MOCK_IP_ADDRESS2 = '10.0.0.2' MOCK_GATEWAY_IP = '10.0.0.3' MOCK_IP_VERSION = 4 MOCK_CIDR = '10.0.0.0/24' MOCK_MAC_ADDR = 'fe:16:3e:00:95:5c' MOCK_MAC_ADDR2 = 'fe:16:3e:00:95:5d' MOCK_PROJECT_ID = 'mock-project-1' MOCK_HOST_ROUTES = [] MOCK_SUBNET = {'subnet': {'id': MOCK_SUBNET_ID, 'network_id': MOCK_NETWORK_ID, 'name': MOCK_SUBNET_NAME, 'tenant_id': MOCK_PROJECT_ID, 'gateway_ip': MOCK_GATEWAY_IP, 'cidr': MOCK_CIDR, 'ip_version': MOCK_IP_VERSION, 'host_routes': MOCK_HOST_ROUTES}} MOCK_SUBNET2 = {'subnet': {'id': MOCK_SUBNET_ID2, 'network_id': MOCK_NETWORK_ID2}} MOCK_HOST_ROUTES = [] MOCK_NOVA_INTERFACE = MockNovaInterface() MOCK_NOVA_INTERFACE.net_id = MOCK_NETWORK_ID MOCK_NOVA_INTERFACE.port_id = MOCK_PORT_ID MOCK_NOVA_INTERFACE.fixed_ips = [{'ip_address': MOCK_IP_ADDRESS}] MOCK_NOVA_INTERFACE2 = MockNovaInterface() MOCK_NOVA_INTERFACE2.net_id = MOCK_NETWORK_ID2 MOCK_NOVA_INTERFACE2.port_id = MOCK_PORT_ID2 MOCK_NOVA_INTERFACE2.fixed_ips = [{'ip_address': MOCK_IP_ADDRESS2}] MOCK_DEVICE_OWNER = 'Moctavia' MOCK_DEVICE_ID = 'Moctavia123' MOCK_DEVICE_ID2 = 'Moctavia124' MOCK_SECURITY_GROUP_ID = 'security-group-1' MOCK_SECURITY_GROUP_NAME = 'SecurityGroup1' MOCK_SECURITY_GROUP = { "id": MOCK_SECURITY_GROUP_ID, "name": MOCK_SECURITY_GROUP_NAME, "tenant_id": MOCK_PROJECT_ID, "description": "", "security_group_rules": [{ "id": "85f1c72b-cdd4-484f-a9c8-b3205f4e6f53", "tenant_id": MOCK_PROJECT_ID, "security_group_id": MOCK_SECURITY_GROUP_ID, "ethertype": "IPv4", "direction": "ingress", "protocol": "tcp", "port_range_min": 80, "port_range_max": 80, "remote_ip_prefix": None, "remote_group_id": None, "description": "", "tags": [], "created_at": "2020-03-12T20:44:48Z", "updated_at": "2020-03-12T20:44:48Z", "revision_number": 0, "project_id": MOCK_PROJECT_ID }, { "id": "aa16ae5f-eac2-40b5-994b-5169a06228a4", "tenant_id": MOCK_PROJECT_ID, "security_group_id": "6530d536-3083-4d5c-a4a9-272ac7b8f3de", "ethertype": "IPv4", "direction": "egress", "protocol": None, "port_range_min": None, "port_range_max": None, "remote_ip_prefix": None, "remote_group_id": None, "description": None, "tags": [], "created_at": "2020-03-12T20:43:31Z", "updated_at": "2020-03-12T20:43:31Z", "revision_number": 0, "project_id": MOCK_PROJECT_ID, }], "tags": [], "created_at": "2020-03-12T20:43:31Z", "updated_at": "2020-03-12T20:44:48Z", "revision_number": 3, "project_id": MOCK_PROJECT_ID} MOCK_ADMIN_STATE_UP = True MOCK_STATUS = 'ACTIVE' MOCK_MTU = 1500 MOCK_NETWORK_TYPE = 'flat' MOCK_SEGMENTATION_ID = 1 MOCK_ROUTER_EXTERNAL = False MOCK_NEUTRON_PORT = {'port': {'network_id': MOCK_NETWORK_ID, 'device_id': MOCK_DEVICE_ID, 'device_owner': MOCK_DEVICE_OWNER, 'id': MOCK_PORT_ID, 'name': MOCK_PORT_NAME, 'tenant_id': MOCK_PROJECT_ID, 'admin_state_up': MOCK_ADMIN_STATE_UP, 'status': MOCK_STATUS, 'mac_address': MOCK_MAC_ADDR, 'fixed_ips': [{'ip_address': MOCK_IP_ADDRESS, 'subnet_id': MOCK_SUBNET_ID}], 'security_groups': [MOCK_SECURITY_GROUP_ID]}} MOCK_NEUTRON_QOS_POLICY_ID = 'mock-qos-id' MOCK_QOS_POLICY_ID1 = 'qos1-id' MOCK_QOS_POLICY_ID2 = 'qos2-id' MOCK_NEUTRON_PORT2 = {'port': {'network_id': MOCK_NETWORK_ID2, 'device_id': MOCK_DEVICE_ID2, 'device_owner': MOCK_DEVICE_OWNER, 'id': MOCK_PORT_ID2, 'name': MOCK_PORT_NAME2, 'tenant_id': MOCK_PROJECT_ID, 'admin_state_up': MOCK_ADMIN_STATE_UP, 'status': MOCK_STATUS, 'mac_address': MOCK_MAC_ADDR2, 'fixed_ips': [{'ip_address': MOCK_IP_ADDRESS2, 'subnet_id': MOCK_SUBNET_ID2}]}} MOCK_NETWORK = {'network': {'id': MOCK_NETWORK_ID, 'name': MOCK_NETWORK_NAME, 'tenant_id': MOCK_PROJECT_ID, 'admin_state_up': MOCK_ADMIN_STATE_UP, 'subnets': [MOCK_SUBNET_ID], 'mtu': MOCK_MTU, 'provider:network_type': 'flat', 'provider:physical_network': MOCK_NETWORK_NAME, 'provider:segmentation_id': MOCK_SEGMENTATION_ID, 'router:external': MOCK_ROUTER_EXTERNAL}} MOCK_FIXED_IP = {'fixed_ip': {'subnet_id': MOCK_SUBNET_ID, 'ip_address': MOCK_IP_ADDRESS}} MOCK_FLOATING_IP_ID = 'floating-ip-1' MOCK_FLOATING_IP_DESC = 'TestFloatingIP1' MOCK_ROUTER_ID = 'mock-router-1' MOCK_FLOATING_IP = {'floatingip': {'id': MOCK_FLOATING_IP_ID, 'description': MOCK_FLOATING_IP_DESC, 'tenant_id': MOCK_PROJECT_ID, 'status': MOCK_STATUS, 'port_id': MOCK_PORT_ID, 'router_id': MOCK_ROUTER_ID, 'floating_network_id': MOCK_NETWORK_ID, 'floating_ip_address': MOCK_IP_ADDRESS, 'fixed_ip_address': MOCK_IP_ADDRESS2, 'fixed_port_id': MOCK_PORT_ID2}} MOCK_AMP_ID1 = 'amp1-id' MOCK_AMP_ID2 = 'amp2-id' MOCK_AMP_COMPUTE_ID1 = 'amp1-compute-id' MOCK_AMP_COMPUTE_ID2 = 'amp2-compute-id' MOCK_MANAGEMENT_SUBNET_ID = 'mgmt-subnet-1' MOCK_MANAGEMENT_NET_ID = 'mgmt-net-1' MOCK_MANAGEMENT_PORT_ID1 = 'mgmt-port-1' MOCK_MANAGEMENT_PORT_ID2 = 'mgmt-port-2' # These IPs become lb_network_ip MOCK_MANAGEMENT_IP1 = '99.99.99.1' MOCK_MANAGEMENT_IP2 = '99.99.99.2' MOCK_MANAGEMENT_FIXED_IPS1 = [{'ip_address': MOCK_MANAGEMENT_IP1, 'subnet_id': MOCK_MANAGEMENT_SUBNET_ID}] MOCK_MANAGEMENT_FIXED_IPS2 = [{'ip_address': MOCK_MANAGEMENT_IP2, 'subnet_id': MOCK_MANAGEMENT_SUBNET_ID}] MOCK_MANAGEMENT_INTERFACE1 = MockNovaInterface() MOCK_MANAGEMENT_INTERFACE1.net_id = MOCK_MANAGEMENT_NET_ID MOCK_MANAGEMENT_INTERFACE1.port_id = MOCK_MANAGEMENT_PORT_ID1 MOCK_MANAGEMENT_INTERFACE1.fixed_ips = MOCK_MANAGEMENT_FIXED_IPS1 MOCK_MANAGEMENT_INTERFACE2 = MockNovaInterface() MOCK_MANAGEMENT_INTERFACE2.net_id = MOCK_MANAGEMENT_NET_ID MOCK_MANAGEMENT_INTERFACE2.port_id = MOCK_MANAGEMENT_PORT_ID2 MOCK_MANAGEMENT_INTERFACE2.fixed_ips = MOCK_MANAGEMENT_FIXED_IPS2 MOCK_MANAGEMENT_PORT1 = {'port': {'network_id': MOCK_MANAGEMENT_NET_ID, 'device_id': MOCK_AMP_COMPUTE_ID1, 'device_owner': MOCK_DEVICE_OWNER, 'id': MOCK_MANAGEMENT_PORT_ID1, 'fixed_ips': MOCK_MANAGEMENT_FIXED_IPS1}} MOCK_MANAGEMENT_PORT2 = {'port': {'network_id': MOCK_MANAGEMENT_NET_ID, 'device_id': MOCK_AMP_COMPUTE_ID2, 'device_owner': MOCK_DEVICE_OWNER, 'id': MOCK_MANAGEMENT_PORT_ID2, 'fixed_ips': MOCK_MANAGEMENT_FIXED_IPS2}} MOCK_VIP_SUBNET_ID = 'vip-subnet-1' MOCK_VIP_NET_ID = 'vip-net-1' MOCK_VRRP_PORT_ID1 = 'vrrp-port-1' MOCK_VRRP_PORT_ID2 = 'vrrp-port-2' # These IPs become vrrp_ip MOCK_VRRP_IP1 = '55.55.55.1' MOCK_VRRP_IP2 = '55.55.55.2' MOCK_VRRP_FIXED_IPS1 = [{'ip_address': MOCK_VRRP_IP1, 'subnet_id': MOCK_VIP_SUBNET_ID}] MOCK_VRRP_FIXED_IPS2 = [{'ip_address': MOCK_VRRP_IP2, 'subnet_id': MOCK_VIP_SUBNET_ID}] MOCK_VRRP_INTERFACE1 = MockNovaInterface() MOCK_VRRP_INTERFACE1.net_id = MOCK_VIP_NET_ID MOCK_VRRP_INTERFACE1.port_id = MOCK_VRRP_PORT_ID1 MOCK_VRRP_INTERFACE1.fixed_ips = MOCK_VRRP_FIXED_IPS1 MOCK_VRRP_INTERFACE2 = MockNovaInterface() MOCK_VRRP_INTERFACE2.net_id = MOCK_VIP_NET_ID MOCK_VRRP_INTERFACE2.port_id = MOCK_VRRP_PORT_ID2 MOCK_VRRP_INTERFACE2.fixed_ips = MOCK_VRRP_FIXED_IPS2 MOCK_VRRP_PORT1 = {'port': {'network_id': MOCK_VIP_NET_ID, 'device_id': MOCK_AMP_COMPUTE_ID1, 'device_owner': MOCK_DEVICE_OWNER, 'id': MOCK_VRRP_PORT_ID1, 'fixed_ips': MOCK_VRRP_FIXED_IPS1}} MOCK_VRRP_PORT2 = {'port': {'network_id': MOCK_VIP_NET_ID, 'device_id': MOCK_AMP_COMPUTE_ID2, 'device_owner': MOCK_DEVICE_OWNER, 'id': MOCK_VRRP_PORT_ID2, 'fixed_ips': MOCK_VRRP_FIXED_IPS2}} MOCK_NETWORK_TOTAL_IPS = 254 MOCK_NETWORK_USED_IPS = 0 MOCK_SUBNET_TOTAL_IPS = 254 MOCK_SUBNET_USED_IPS = 0 MOCK_SUBNET_IP_AVAILABILITY = [{'used_ips': MOCK_SUBNET_USED_IPS, 'subnet_id': MOCK_SUBNET_ID, 'total_ips': MOCK_SUBNET_TOTAL_IPS}] MOCK_NETWORK_IP_AVAILABILITY = {'network_ip_availability': ( {'network_id': MOCK_NETWORK_ID, 'tenant_id': MOCK_PROJECT_ID, 'network_name': MOCK_NETWORK_NAME, 'total_ips': MOCK_NETWORK_TOTAL_IPS, 'used_ips': MOCK_NETWORK_USED_IPS, 'subnet_ip_availability': MOCK_SUBNET_IP_AVAILABILITY})} INVALID_LISTENER_POOL_PROTOCOL_MAP = { constants.PROTOCOL_HTTP: [constants.PROTOCOL_HTTPS, constants.PROTOCOL_TCP, constants.PROTOCOL_TERMINATED_HTTPS, constants.PROTOCOL_UDP], constants.PROTOCOL_HTTPS: [constants.PROTOCOL_HTTP, constants.PROTOCOL_TERMINATED_HTTPS, constants.PROTOCOL_UDP], constants.PROTOCOL_TCP: [constants.PROTOCOL_TERMINATED_HTTPS, constants.PROTOCOL_UDP], constants.PROTOCOL_TERMINATED_HTTPS: [constants.PROTOCOL_HTTPS, constants.PROTOCOL_TCP, constants.PROTOCOL_UDP], constants.PROTOCOL_UDP: [constants.PROTOCOL_TCP, constants.PROTOCOL_HTTP, constants.PROTOCOL_HTTPS, constants.PROTOCOL_TERMINATED_HTTPS, constants.PROTOCOL_PROXY]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/common/data_model_helpers.py0000664000175000017500000000545000000000000023443 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common import constants from octavia.common import data_models from octavia.tests.common import constants as ut_constants def generate_load_balancer_tree(): vip = generate_vip() amps = [generate_amphora(), generate_amphora()] lb = generate_load_balancer(vip=vip, amphorae=amps) return lb LB_SEED = 0 def generate_load_balancer(vip=None, amphorae=None, topology=constants.TOPOLOGY_SINGLE): amphorae = amphorae or [] global LB_SEED LB_SEED += 1 lb = data_models.LoadBalancer(id='lb{0}-id'.format(LB_SEED), project_id='2', name='lb{0}'.format(LB_SEED), description='lb{0}'.format(LB_SEED), vip=vip, topology=topology, amphorae=amphorae) for amp in lb.amphorae: amp.load_balancer = lb amp.load_balancer_id = lb.id amp.status = constants.AMPHORA_ALLOCATED if vip: vip.load_balancer = lb vip.load_balancer_id = lb.id return lb VIP_SEED = 0 def generate_vip(load_balancer=None): global VIP_SEED VIP_SEED += 1 vip = data_models.Vip(ip_address='10.0.0.{0}'.format(VIP_SEED), subnet_id=ut_constants.MOCK_VIP_SUBNET_ID, port_id='vrrp-port-{0}'.format(VIP_SEED), load_balancer=load_balancer) if load_balancer: vip.load_balancer_id = load_balancer.id return vip AMP_SEED = 0 def generate_amphora(load_balancer=None): global AMP_SEED AMP_SEED += 1 amp = data_models.Amphora(id='amp{0}-id'.format(AMP_SEED), compute_id='amp{0}-compute-id'.format(AMP_SEED), status='ACTIVE', lb_network_ip='99.99.99.{0}'.format(AMP_SEED), vrrp_ip='55.55.55.{0}'.format(AMP_SEED), vrrp_port_id='vrrp_port-{0}-id'.format(AMP_SEED), load_balancer=load_balancer) if load_balancer: amp.load_balancer_id = load_balancer.id return amp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/common/sample_certs.py0000664000175000017500000014352000000000000022312 0ustar00zuulzuul00000000000000# Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import base64 import pkg_resources X509_CERT_CN = 'www.example.com' X509_CERT_SHA1 = '9965834d856a7e24459522af0b91df69323947b3' X509_CERT = b"""-----BEGIN CERTIFICATE----- MIIE8TCCAtmgAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwIzEhMB8GA1UEAwwYY2Et aW50QHNiYWx1a29mZi5pYm0uY29tMB4XDTE2MDkyNzA4MjkzNFoXDTI2MDkyNTA4 MjkzNFowGjEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0B AQEFAAOCAQ8AMIIBCgKCAQEA34asqEe1MexBKGmBcrco08LYYFfJjpmW8m1yKJsm S2nmHNhJy4Fl+3cPDyHYOiVxnsaMIv1Q8ZMRpjYH2LhvzLt2doyMiiJrqA3ScdhZ VlGKaURvASSj9dmbRBMqdXZBvTZnMH4aSkL4DalU7NiW+jbMb5Gmf+bozE4ZAOES 6eXsP5+yEhJvzgmT/RvD/2w7EtCtrRnnAlMwHJACqozRQYXuY8iLw7YJZtk35wyc EJRilXIcKUCuwQfHG6akd6da8PIzEZ5bbsYLtpslIoh53vG3htXTp7eGDp+MXzlr yB0+QqjXuOMR1ml1sNwVMpHO4oUFuXFGvuIYnT2QhYerdwIDAQABo4IBNjCCATIw CQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4QgENBCYWJE9w ZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUVXOS 1PSqVuhOP1OKBMNfSHfhAsAwgZgGA1UdIwSBkDCBjYAUN1MP5SS5ZJyrWuPVSkEF KK2SnXShcaRvMG0xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAw DgYDVQQHDAdTZWF0dGxlMQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1j YS10ZXN0QHNiYWx1a29mZi5pYm0uY29tggIQADAOBgNVHQ8BAf8EBAMCBaAwEwYD VR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggIBAFcxJtGRESeflY6+ WNp9q3LGYP+uzyUvjimdFQzKRi+Sq0Mi2YI7agpvtE9IZHov+JwzPaBXM4yC6Cap lI88cE0KNowkjkON4g99F8m9WvaXRChtlJ53BizRkGKgw4Zg/0PAbpjLN7IU/Zrm hOwyhBxmewMX3WAk76xvgFXTVN7c9FnCRvuN/6xO+CUb/a8fcNASdD+8aw+iS7iq gvV1WGeGY8n8F19NggWSiRyb/z4Y1VoqaeIPfD9kjFrGApEGpiZphbzl9jSX8cPQ YbDbbxBsUyfxtMK1aVx258ow92NRsDsoLGELpzF1AekzfQDWtHOpqkaPNunV2l4f UGRi5J5stDi80Zf1t5JiFkHRXLeWAPa16AifF4WhmAaw0+zxINUqYH1/kt7LQP62 PT5g3TK1S7TLvqfouw69AQUZAezBUfEkfy1816WGpuntWEIe3x4sCviqVHdjDtE6 Pntzq5bvIIQ6/em2y5gvG68yOXYNTWmxOVaXPJ60eilbPyCD8UrkSMbqX+ZlAfFJ dsAnySgPfz47dhd9jHulx4/rBZfPx330DNiO/wQZxQMTbjhlTJViojfQuNRaBT4E Vi/XwUwVUqRURyQtuP8QJdPh9KD7uX6xHjqBALdwzCYAFaqelPue7TJ7R/I5+02A DV8BnY7U3zPtHtPf6i8vdYwgAOJG -----END CERTIFICATE-----""" X509_CERT_KEY = b"""-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEA34asqEe1MexBKGmBcrco08LYYFfJjpmW8m1yKJsmS2nmHNhJ y4Fl+3cPDyHYOiVxnsaMIv1Q8ZMRpjYH2LhvzLt2doyMiiJrqA3ScdhZVlGKaURv ASSj9dmbRBMqdXZBvTZnMH4aSkL4DalU7NiW+jbMb5Gmf+bozE4ZAOES6eXsP5+y EhJvzgmT/RvD/2w7EtCtrRnnAlMwHJACqozRQYXuY8iLw7YJZtk35wycEJRilXIc KUCuwQfHG6akd6da8PIzEZ5bbsYLtpslIoh53vG3htXTp7eGDp+MXzlryB0+QqjX uOMR1ml1sNwVMpHO4oUFuXFGvuIYnT2QhYerdwIDAQABAoIBACVPOmSAS5tAnwOa 0LOQJO1ruWgjXw5BTrO6VvK2Kuctju5Dn9WrDJWzorzY3lmeRF/HLj7s32TjMm/2 1spyxp56r+RLw22PHz8Wx4ifHxJMW/kEJi8fqYpwvvzW4iBnE8P8X671bXf1w6es GvPJlzG+kdMRkaQJq9PmOUAvUVPe7+xLuouU+7q4DAiq4oXMoidVbhm0FC5k4LB2 q+oMzcdMiQ6rQfQB1uh2s659zwW7wAtRMgx4LeY+lIpyf0Bh83Ibi4JybH+DQk8g AkrEXDE9aslNx9ZXVdfdQiCRDklbg41HejZPRhsRntH0v4cnjOGCYrVDfTKEatka ltWYyXECgYEA+LFxGZH9vn2nh2PtAs5wnsjc/79ZOKJwq6j4stuRr8MBazRtMhP1 T1g6pEkBjEoUUomcpH/eG49PrB2G9oYIfhJqkRo07m5Fyom3r35/V9Q/biqcGLEM EodujvziHbUQDFxO7jLigRjsVoG4Uo0TXT6V8KzKxHGgpdCvYKNP3A8CgYEA5hfv 829n55dkNUFU33VKhlyLD1+mAUdPkjRHYiOianv1m8C5z8rzjxs+Fa8Xl+Ujsr0m JpRvOiNEwm/b6bF4NLKOhaBPK2IAYzGPwy2yhXELcxHuxNArJ4kVp+YdwvvRSWCa 767r/CBS7gCCM5bXlU3saMS03goZd+l4fo778hkCgYBxkVZ8vtaJbwhaI5/QcEWt vTxu7groegXJ3lf0FaDqCrtTIZXcEJEtsrTU4SH71riBGKaX2GytWTyg9Lr1STAH opFXwgf5+hGU9F8VnUa57QsqW/r8q50/uOkcEw+PUWgKvPyuej5FhgQnXQW3bQUy x6nhRocyPlGGZ04va2TEsQKBgDlIpFh61+d0bWJEzZiEXvVsfMJrEa0nz8uacFsi fAD+s3r/VENDR7fNFHvZh4otZeHN7X2VXsuelDPEHX/kywRzn7/s1Uj7sRUA9cWl ztgh+LPBNyyQlu3U1ythwu8UOlqGTox1hBLVCVBvl/q4BxwItl6u+kh9QzHzUihP +LGhAoGAGRjYSOy4aiFK1Ds/5doEcBj3eGsfNx0va85UYDMnoMxkw+qHlFOCzrG1 nUBaaqVibLaROn4V1QnlSOA2vjc2jMMDKMfnjawtqBC018tQDVcE75sun7UzyxtS OWaQy6KhqrKpPy3tS1wt1vAYPWZw/EIo4dDXYBo55REI5mSBZrM= -----END RSA PRIVATE KEY-----""" X509_CERT_KEY_ENCRYPTED = b"""-----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: AES-256-CBC,086BA545587FF5F6F4DD9AACC122603A mWGjuSlBRAU3QruM/CI7m2LCN7wTHp00V9XbSDHXQ9D2MqgRJTf09iRlSsH7GxKF jbc0TWvrYkojb5BZBg3PePfRUxhiwGu7hYk+GLbRQsA0iL53wA2a7aPnwzBFuwky u/d0bK39n8QoIj+vUgVe/7C4Xj6eRC9SGlGBO5syCQqx/KCmovv+cqKG+hti9KFm e7KAsdd/7noEQNwo2Don+0gZwDc7lKzR29NlyqnASkbllKMzGaMXVfdDPGjC62AF 3rT0HllONHo7McmRfbCWs7nMEvKFgxKvHoP2/0ph5DD+DOKFCnLSfWdK22EgG9TT UUcNiNCY/A88M2GnHdYjBMVokL/sQ4LAsf9Tz7aO1D6c2p50t9gBhDpOwKwgYGJu sp2FLO3/HzHS30s8kfOg2ZDzRm5jOlFsK8XY175xUGrsCkSQmQPY11b7v8baBHp7 KOA6xeJHD7+K1oKvxAqlU7Lwfmm0lbS9/JnIDiDel7oTHESk5mqSUZkkyWze+iNb S/3J/8mtnHl72UpULoWkvSfE5xTu5W7uhXqCOayiOeiUpalKG+gwUZI1lgvDlXn/ 2LpFEFY/y21NWGIm9c1lxZdOzJpnfzvXw+27lGPjNhtjhro0wIFjQ7YCTyq7Ky36 qPdJfU+206vkX7tzETyGPh0oO/1eP5b1QjJrtP7tMNS45yn4yzjICNhC5NXAXgbU F5bUkWqhQDJ6UDa6hCrJ6bf63AdnqTtJ4layKyl6dz06qrVNpCyGTNNhJykdlSq/ PkVes4X4yh6TA5pJowV2bVnM8nqN7H8TXbEetF9MP3ImYnVzDTnBWugWT1cVA45h GyV/j4VHBqwPojGhRwFDM9reQ38tTrmss4l0hxC6B5ivIJtUvCqNa+E/cKecopmb 5fAdiROnS548tXuPzsz1EtcVor7k1i//SJJrSgqpaQb8E36uYw6r8yXQ6zhOyoUF Pz4OVN9WR21G5R4sAjHV9U2l6ulgzwpE7O7Z5fSuTzZBttFX4U0OZZDfrDIF6jNB jrd2RBacsjsm0PRGw2qrMZlPmhhHl0prfIPOrkRffre3wDk7POOoa2U/+CKcn86Y 780WrIGL6jMp31D8HDmLZbvsWtzKjTqMIsqo3gsFwCgtu9PKZ/z/sQGND6f9b8u9 gpt/osBxSi5b7lHE34InizhzakEMtQ/bshO4WAayGY3Kaf0dG89mwQEOOzUw54Xk x9F+hzYGb42IaTHO+h+mMznB4sh0iLyekt7eybwYGX/1/Oz8WQ/EfDHYu16XG681 Zb5ev/6rojAWe6yib3MEWVjVcsoNUUA+51+hEO4UKEliNX0FvOe3q0aflqPVzi/0 VVB3erVNQ/5uunGdZVzjgef0EbhFlHANjIcSD8N80NEaG2JmhVBd6kc7Ev1uoCK5 r3kHNhyy/fipKamS84mhjTq3rgSeUCndf/TI+HSvJwQaA3sm1Bu5UuErjf9Qpq5P osar1zVgWl2jEUejqwnt4626J8g0MG92amHHsHG1htzjAzaTqtMlORdUmWgppYVs dlGLDA9eMkmOBo1WdQYZDDnCcNVdT6MoeKmDsqmM6+ma4vpHuelYmDJ5l0a3hGbF -----END RSA PRIVATE KEY-----""" X509_CERT_KEY_PASSPHRASE = """asdf""" X509_CERT_CN_2 = 'www2.example.com' X509_CERT_2 = b"""-----BEGIN CERTIFICATE----- MIIEbjCCAlagAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwIzEhMB8GA1UEAwwYY2Et aW50QHNiYWx1a29mZi5pYm0uY29tMB4XDTE2MDkyOTIzNDk0MFoXDTI2MDkyNzIz NDk0MFowGzEZMBcGA1UEAwwQd3d3Mi5leGFtcGxlLmNvbTCBnzANBgkqhkiG9w0B AQEFAAOBjQAwgYkCgYEAp8q9ybIlTP+Aka1jaLE22gE784t3rQ0KC83ODSY0283R QX6BfHrAVTj1ctyvz0D6hxXiYXwi9mXXHvBzzxScPxImQ7jbvYyP0CtagQ4QGj7w +XVWY94bY7X5cF5NlGHl0EIHBO2G0wc455Mgzlakkfoa7k9YJM37hfwlBV6IX9UC AwEAAaOCATYwggEyMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCG SAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUw HQYDVR0OBBYEFLc+lXNhKO+47kWgrMirpmSU2FMWMIGYBgNVHSMEgZAwgY2AFDdT D+UkuWScq1rj1UpBBSitkp10oXGkbzBtMQswCQYDVQQGEwJVUzETMBEGA1UECAwK V2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEMMAoGA1UECgwDSUJNMSkwJwYD VQQDDCBtYXN0ZXItY2EtdGVzdEBzYmFsdWtvZmYuaWJtLmNvbYICEAAwDgYDVR0P AQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IC AQCpLBSdj9eeXWZNFXZzhtrI34f+oPcyZeERo04gBcNik6nSL+iwgv7TqqEnH+X2 lSCXAjxQEsXHSf6Ox/YWlXhr20jCoTpZBQIB0kbu3wBfowxNFIwKKyIbpt4pbJzC Hnx2EsOVkxywAzE9aos7JyXk4ya2U30v/m54shC0Jxxpp8KqNAUQ535NCM+563eN 8GXAV8uS6UwTnDwepU79xixDmk1XIsMlJP0e6ROsNFBSdZ1QwCjOwbA7clAdlpch f7eF0mJTXKkrFUBVqZh2iGFQ4lasoXeTM6yR3be/tO12NdM1tGT9HT88WeRpRin5 73pTSETUMy9+80T57DxpGNOVkBLI1AhRWkqQ7kgyNmm9jajZVyZTuSPhXpQAunxs XxS9gPqe7LuBoRXsxLEGrXJ4h5+D3OBr4KGMHcFbI3o71ZzgDMWQ8Hyik7j6BE3J zXmoSZjbvJBiz3asU74/a3dH2XkNOdzErN8RkMRzL8Z1TdgL+SRndXMpSM8cI44v jpyx6T1AdxgMrstDuPX6U0EMl2WoEvkwtePUc3hBYCkm376yVbtbJcAqndFW2lAY HULxFHp3QLrnbQEvPIcD0EWppJ1GMqb/Gv8jORzOks56UtOIfavrzGrcvRSKoC4Q lDApYKCiRvvBSVfgpoiVungh2NWSmNW5bn2uOkPt+vTjcA== -----END CERTIFICATE-----""" X509_CERT_KEY_2 = b"""-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQCnyr3JsiVM/4CRrWNosTbaATvzi3etDQoLzc4NJjTbzdFBfoF8 esBVOPVy3K/PQPqHFeJhfCL2Zdce8HPPFJw/EiZDuNu9jI/QK1qBDhAaPvD5dVZj 3htjtflwXk2UYeXQQgcE7YbTBzjnkyDOVqSR+hruT1gkzfuF/CUFXohf1QIDAQAB AoGBAJevcq8ZuxrGiAYqBwon2nxuTTI4TLJGbWSKYtIEThi/EYLxyEEt/x2L5mg2 FUF5boIcSJD0Ve8F1dmEak00RqJO96V7riNe3a0SGhYjak6okEXB1W75LfFQ7Jik I2Wkdg+M2gdcHNKXmVWrO83aR+zWFXv0yHINANQLaUhunW4BAkEA1TKfKbdSkTkn T98j6rGM73nd56yCIK45CZmHg33ICyKjH/fUiNpHmZtBxCgrYTeFOJtLEW4QENy8 vusxB1zbQQJBAMl6eOliCfr6y85pCIKGFHL/Mwzij41xRPv50wWkFpdNOPW8lqlK SGZHdMn6fWi8e02tkcIQqeRrg2cU6WsrA5UCQCMBqeLGqDcSRGM4B4S83KBhyU8G I2PMV68812R+3b7+U/ymy+4gsUsGlDjqQ5016ZkO3reg8+Bg7lkG80j7NUECQHJr DPNs68IOX2OPHngRcNeFuhYdK+zlYInAbGiNsQ6nmitjuCPXvZnoBpkVmda7A0Mv yNDu6ayAqhUGOTDVMqkCQG9Vk7xjpe8iLkI4h7PaxaqiSwY+pyY3QoErlumALffM t3c9Zw9YGbij+605loxv5jREFeSQMYgp2GK7rO7DTbI= -----END RSA PRIVATE KEY-----""" X509_CERT_KEY_ENCRYPTED_2 = b"""-----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: AES-256-CBC,3CAEB474D1526248CA20B5E4F84A6BB7 t6D+JPlgkUbxQzP4dSifigWPrY64diq85Kl3R+XSjh4az3hsvi6dZCEna7f+G+UH Bgoak3/EPVcTe5g09ewxfnkBvjej78ktc4fUDWqcPwl6xwSbqVkz+ejEe8MAOR4d VN5bG559HXD1AYbhr3XyONpUrNlyQrdtaxNjPtt2U77aPfEo96/sEaYA3KXKq6pd NEXU0K/4MSRP2sErybUubyJBz6XJLZ3LwILXRONV41GvFmnDGJ20I1X+IzlV/YDo HpFKspuTrDzXttlMFMcQVdCWX450Zs988FWa4vwN0Ma1sgl8VwjcbWDAgx5tM1Ml +t0PT1yL2kIGIPbnsVoPphIet+qjZZmmOFCRwfvXiYSTf9FZ8eawnqQrmoSN5iNt T63Aidf1dV0nHk+IZxkdgzm3C7ffeIPG4yMx6px8NnJzp7lCMx76FudeeqUx0ezC Del0Thfh8/N7RX7mUP7HdybXIrR9Gp+p9WUelag6DpMgCcGWNvTtk8NUK+3TXAax Ud+eZLP6k5LXiqhwSuWb0/r6I7OSgseOBsSvAw8PVfDsg6LwyhLqLmOLgxVas1Ay EXJVqD0QviMl9aXBK/kpsg6rdhJCBJ6WQlytS73Iyx0plD38SwAS84d6B4ASLHye wXyd6UrKQ3c6hQV8c9jzHvllaEafF3WUjacwuwmNOlBuWh7887JsFeYqbEIlY82u pVM7cDTfJhEggpKK+q3muntMeLTVaIKcqvYoITbVoRJG8F4Zc29cibZjz19zshBM OEUKHsL+I+kFr0SBLY8UnAOjIt9AjJLgo3uVC13fj6omO4EeXQjY82GKo70RRszs -----END RSA PRIVATE KEY-----""" X509_CERT_KEY_PASSPHRASE_2 = """asdf""" # Wildcard cert for testing X509_CERT_CN_3 = '*.www3.example.com' X509_CERT_3 = b"""-----BEGIN CERTIFICATE----- MIIFJTCCAw2gAwIBAgICEAUwDQYJKoZIhvcNAQELBQAwIzEhMB8GA1UEAwwYY2Et aW50QHNiYWx1a29mZi5pYm0uY29tMB4XDTE2MDkzMDE3MDkyNloXDTI2MDkyODE3 MDkyNlowHTEbMBkGA1UEAwwSKi53d3czLmV4YW1wbGUuY29tMIIBIjANBgkqhkiG 9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6v/973etopk2Vz95DUcx8X6hLfJ5m8s+scn7 nMZ37fSqAGPF0veGpqyqxorwh+GYLjlrvZkhVi7IZJAsLU2ztG4+MEoYzbyhgJer FmepBC7xPIJEjh8FKhtpvxVOMFcXJ1CZT89Ww0rVPnaoE09DS0DRo5s+lW0dD6Ta QW0S/6RCZ5RpD1q5MP86JvTspkWhhKY29eEMFZQYDwc9HEPE+C2scapGM6reP+Ix c/Q8806BUXNkLoXvGo+LqmeONquCUGCXL9HLP70Osp2jfqgTT3RfOFx3k5OaASeZ MhHRqntdReYXN16PhMU/eDvKr42QxCwNAVLDSrkJGG8eChOgVwIDAQABo4IBZzCC AWMwCQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4QgENBCYW JE9wZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU eOw2E5rYzvCuhuAR11GtoyT/qgswgZgGA1UdIwSBkDCBjYAUN1MP5SS5ZJyrWuPV SkEFKK2SnXShcaRvMG0xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9u MRAwDgYDVQQHDAdTZWF0dGxlMQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rl ci1jYS10ZXN0QHNiYWx1a29mZi5pYm0uY29tggIQADAOBgNVHQ8BAf8EBAMCBaAw EwYDVR0lBAwwCgYIKwYBBQUHAwEwLwYDVR0RBCgwJoIQd3d3My5leGFtcGxlLmNv bYISKi53d3czLmV4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4ICAQBvDBAbwipb h1bgfOIo8Wv17QdyFDVvzj23eO+BbYbUNF+JD7HAcq/Z53RSj9Kv76NS48OLqWUk RM81TPiHimolFvF6AZLXeYKVpl48nCQRQixHSkmW0I8BlpQ5/Cl1WUqFRcDAl3i8 lMLpAUhMb8dt4d2hviktK+L5CiBLdmKCnlz0LOK/4GuF4Z586jrrWyjw/GBYvmXX 0ujjli4J6WMJXVZ1IIwIM438N0eG6wKRNBbJQl5tJjKVX56hSkVdgQPz0gjhNGlJ VvImaAtLORgBUqXXs2PhcZ5HHeSd/dF2pJeOYC6P4qjb2BqhDHwDKjsSDD2sPoMF fvI6pQ0zPCpx7waCxpk+UxshJk3CG1XoWdlWZmDBLMl2KjDH0nWM7nI6oWPXK8K1 R+iBL4IUp8p/ZvGGcGeP2dUpm6AKcz45kYEWPm5OtB10eUaCQPkeUvWRmY35f0e5 /7LlFF1VDlRlxJPkroxrDDm5IIWS1VPTnelXzvBKenqTFFbQUzS1mmEEY/pPEKvS Z8NAha3g0/jex5sT6KwB0JI8fvyCzfCS8U9/n4N87IrFcKThw+KMWkR3qjZD0iz1 LwW88v99ZsWWIkE6O22+MmJGs4kxPXBFhlDUCC9zPBn2UBK8dXSYL0+F3O7cjWQ7 UUddoYPP4r24JRrqzBEldSDzWeNSORpUkg== -----END CERTIFICATE-----""" X509_CERT_KEY_3 = b"""-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA6v/973etopk2Vz95DUcx8X6hLfJ5m8s+scn7nMZ37fSqAGPF 0veGpqyqxorwh+GYLjlrvZkhVi7IZJAsLU2ztG4+MEoYzbyhgJerFmepBC7xPIJE jh8FKhtpvxVOMFcXJ1CZT89Ww0rVPnaoE09DS0DRo5s+lW0dD6TaQW0S/6RCZ5Rp D1q5MP86JvTspkWhhKY29eEMFZQYDwc9HEPE+C2scapGM6reP+Ixc/Q8806BUXNk LoXvGo+LqmeONquCUGCXL9HLP70Osp2jfqgTT3RfOFx3k5OaASeZMhHRqntdReYX N16PhMU/eDvKr42QxCwNAVLDSrkJGG8eChOgVwIDAQABAoIBAQCbp336bKn9BkCh H7C9o8en7rkx5ua307KuLlxTpn3vhlxIL83rg/qTUdE3+vWA+2NCUtRTXCdhn1Eq kvg/9bSvMUpNz/aH54aN12gCSh0AYVt2Oc7Q2Ckij8/GOoV0rWrvpoo1+967Mkj2 u79uMtUe9ksldAHLFd/m6cmLBoVL/6rxByO9JsQjb+qFcNcLmNwTsGWttAT1a/Sa Cy6JESzJzL6jMB1hNr/UI4nh8CkD2Ox+G6efs6FyMtayOP/AVwr8jSywVWZ+9tiX kidCNS5xzazt1aMeJcu1h3yzYt2PvNHVE17T5imQGDUKuhmH/PZdySldnAU2srm5 b6tGNAJpAoGBAPcjPNJHnUSYh5GooeOPCG8QtnhwEDbuwcOzoeFvJBNlSs2XF25O cXPjUx5HVpJqeBTiOX2zvWr6jK6AggC8K+eF7ElecEeCEFf4feO6iv9n97bzntmi lPlfKBkQOYfUA/Syva6CiLuz+dZS8zYIDiB6C5/hhIFi+O5fG+hny8ILAoGBAPNt VBxjz8bl/oaj6wm5oVk7HMBABBbAbPcH31YelF/cEFe+sTMk07Dpqebq3BQcEQAZ YgERoZeqA7Ix47N2LUfvUWba8Kg83JvwSYV2WRLxbGBMsubUHBX3J7+2d7mMbaUb NycvS3K+M5HYDOdGuXwObJod54pl0D+8Kk6QHXZlAoGAOPfLdmGBtCVA4uevYag/ 9nIwzbRvWGpTCgynXTLkawAnbRrOEOROLLuTFmC1aQzX32CChiJfoIBe237N+ONn b3gkjokRcrpdkBm80zjV/6f0pxyjWmGq24z+zkA6MsBBpS9qoAaBBFupVKlMXQEg WIYpldJDXBv3a+NKqJj8lB8CgYA0rjlgt30U11tg+gJ4pVK0w+Ux+eion9Y1E+AS fCWyJSboRl2E6vhhNgBN+A/OzlAjjFq4Tn+BGgsYulFD2lRsV+9u6fzg++LmYHcY ygb24YaJxK+G4up9GnLgu3Vnk2t7Ksuh0EtstprkejQ4rQahQWHhbI1aVzRdRrSF Mg0ePQKBgFn2yh/gKf0InDtR6IlIG9HVI+lMKxyU5iRH/9MQ7GS+sSjiAXdOtGJJ 1QT9hTtPzR9SBXqu6yWJiIvYAfnrmea6kMb9XH5L/XIciZA86DapUl+TWicpI6jH KX8jFiCL+HcZX+pqAaUuifgwnqd88EX7MPoU6Yjq02To9ZAPA+SA -----END RSA PRIVATE KEY-----""" X509_CERT_KEY_ENCRYPTED_3 = b"""-----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: AES-256-CBC,088E3835B2238C332FC7AED391C9CF8D qKPZwlDrkX/0J8Un29cmvUuVi4FzSWRRQe+m63/ezh1vzv8IOtmw87lDffPhbJ/C mLAj09l9Z5I2yLWb8tIJoYOzFjxU/lKI/M85EPEpuAggzpqCf4uwor7MUx+++fKv aJZuwgbmfb4//A9iNfSriWfrrS7dzVm2RrlNtj0ityiigzTSfY8oiL7fGTfocdx3 F0mXGEumySEMge3RF/I6/bAa8c7T4JRUc4yN1kzMftxH1H36QMjV+2thyo8UrB4Y cBIIUncFbTvnhvjqszQrCrh08Tes+AtxUXevCLRiaLqOlnF1LIyic/PRob+yqXKw jqUyjaj48MvJwxP1l4NtzzPz1QM4aaXZAq8O79DVHxaE9d6Qe9fw8cASFGrTzU2l 8jbHo+loXcG8LWp6Cqdvv0VMuK44G5TIOuFmuZ1YHgawCtihocoqUOyXJ3JryMUN 0RiaF3ybkplMEVtZBtTYkhx6vYnq7KX3CghpigcOajvzZ5jj6JMXkcenFSU87VaW tfzbZRk5LYuJOLu9MN3joftvD4m/mnFVXM4SF5ypTUW5PRSZGEa0im4LPWq9LH3s lrgh44jVxqfyAxtyVC8Mf3X7tOmm2dlHWLB8kBcqHfcJjRaZQeD25V2DbmCAp7NN UsUKT0ftRfSKGTmsSfPv62mFo4RrdI+/Xws1iOY8V1LekGvKc6zpSwYfQnrwIUi2 7PkslX0UyXaN7j020anNE9LV2NnccAWX/lkGCoUn8EPPrAum5wopLxm02caNKUlK RM+Te+LJeexLadkFStDentCmH3m9GoehDvWBLHGbdb/5sXqvxuemBxkyhjqXvOau +cyDRmfUtLf8ik9PvzP/dQqBn93fMkWRlJ6zRjn5q4lG+qKbw43UDWuYMmSBQd5Z ZUuTaT7bymQyPLUFmjkQlQm+WOFgCg26WuaXn8sXvQCtK3Ha9v0C21gJWQ7PnhKh hXFwuD0Jfu7G2Pie5ToBhsxC5PNYyVYZQOCJ3ZcvH1Hv8RCvIDPHMFdZohJVGwdA 8X90Z433Nv1ke2jAjMX9+Ph4txxRYwcV3IpfdyAFk6cjukdkBrcPPFARZiOSeNwO XskiNT6E0KUAc1KNyhsBRTSxmNkkzfqe4hzEkLukWBsyJ11/jmgKJqApyKZfePGR /kDGbJVbSlMvftmBNCkT9owMDjKmwHvo5iiV+rkhWEq3jaISu3+qtTj11S4+bRS8 vlh3G+BjSvpA2SBbXKWM0UrSnxtLow41kIZTZJ+5QnuQ9LYER1CAuMxlqManBWq9 JwHGmLHqcLVPxDXo2fTsDHAZlw6TD3pC53WDYbAZC7SsePyNvbPk9P8YG47F2IZP ioxamytTKal/abrfrU8izw1HM87LNVQ4yAGCIlbj+0utN+aZfFDXgm+/FafraANr Ti580sCEkDrRrzAp0lG3AcSGTM83Jxz5Sz1o6xdWDBdshfcPIJgv9g6NlzPWzy3/ 39Xhe11dMDqKOdiY+KtdDCT4R3rp49Zctc8KopEX9yjzmPm8aekgyzIG8ak4ka6I V1OqZUUKNVGYtDAMDqqDEKNp3Y1mmeD8637oWVTQvbVJpatVIVoKb+MtKrGkVf0d -----END RSA PRIVATE KEY-----""" X509_CERT_KEY_PASSPHRASE_3 = """asdf""" # The following intermediates were used to sign all of the above # certificates and keys. Listing the same information various # ways so that we can test the different ways users may load # intermediate certificate chains into barbican. X509_IMDS_LIST = [ b"""-----BEGIN CERTIFICATE----- MIIFcjCCA1qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCVVMx EzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAKBgNVBAoM A0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmlibS5jb20w HhcNMTYwOTI3MDgxODMzWhcNMjYwOTI1MDgxODMzWjAjMSEwHwYDVQQDDBhjYS1p bnRAc2JhbHVrb2ZmLmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK AoICAQC8KiZi1t9vX8IBtiOgOXHwyTo4ljYOqz92BqyIj4r/YJNHxeJ7JHNiM29e u6K/LdJleOkDXV+m0LDchn6pMf1/VJ5/Zpr7eWTqyKSaP5zTVbIOI1It60MQpsSi GbcfVPJ7TrwzNKtyBXREHwC+mEvIuEwDRHd8DljJG5J2CpS3Re/CgxR8DrKXnT6Z XHikog3yYJ7vULtxz8ktgOjM7uFh27YmHU4o1WyeAmMwpesVkqY7E7frbIYYbQo5 B1D1eWqM3KldqOQqvq34kPkf0vdfXxPurysNJrEaOXzDRdI6GiXaCKtnSEF0urCR denIDWMeqq2w5H1ekNgpK3XeFFxQYkhvXN85HiRmPO9wrd4qdEs1nvTIVVEDpB5F Fe4aFQmsAe57Ll1DTKZcja30VD1uJ5PbeoysHhN270+IbFeXK3x/icS5F1QdfE/p YIA0L3JRSY88IXw4giHnlLnYb55NwLac3EXmr2Qks/T87/gbk5gk9G+0XK3FSRxF +MDdmRiLAKLSb4Ej3wX1dXnSgelx5cBZ0n+NBY/865oauui27/OIaL7ZaDCDZU/t jIJDy/uQkuAjH4UVF4m5PqRaykqrjbyRJeADbL2E7CxamOsgyAfzhgIt04hpKkvZ oCCTRREeNp9rRITQiGMsfCUuxackDajsW/pnFD7m1ZKej8fcdQIDAQABo2YwZDAd BgNVHQ4EFgQUN1MP5SS5ZJyrWuPVSkEFKK2SnXQwHwYDVR0jBBgwFoAUhmmo2HE3 7bnky9h7IQ5phCFGU18wEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMC AYYwDQYJKoZIhvcNAQELBQADggIBABGdyLUs9Pm9SXpSQLSY4BWnYdZvUoS5rhJ7 coVrkTm3bBmI6haSWyoSy/l0Qno+7rXz1YruKYqKaYTAakuDL8C/J2VuL/p29nyC lH0rDKNuz+6jU8xLVmcZwBtsKK1QxNgAdQ3DWygtmXzZ/tigfIVVIyaTkOvOjovu IxTWAbmJu/xbkq4zZEaYJ0xobK8LGq3ZDauCsPNykqH/jwLwYWHQbKPmppTkNff7 unXrxQ+eSH/a1aCCldZI/NZywjZpNUdylEcnZhWshiWChD6j+CgrirdO0JeH9sGR 0La71VqujFWvVJUYYSbb7l4KFBLFw8Od5Z5rpYXm/qTHd6OvyS3qajse8ardqN0g 2Hunu0AtJ99JBHxzTP6blAcuTTrwS2XjB83/7k5YfN0jGbqQOYCJMTZ3pk3JkrZi pxhjY1ZX1N8Opb7IwgjIXwzNy/joL7smUNBQlTPDN1IfM5b83NGNSDKaS1pWiqaL XO6erkwabZxCVfGgvIk9hE4x6+Cu+jdOLTpAwq1mcQroAp1+CInHrZeHdnhz0zR8 4CUmddOos2WYTF+OvRfel32rBCaKlH6Ssij0JGxSYT24WXygsCdpDXfimg3O4Fk2 sJlV015O7iIu22bowsDcF9RfvkdHNULrClWI12sRspXF9VmRjbDyG4eASBiulJQV bk9D26vP -----END CERTIFICATE-----""", b"""-----BEGIN CERTIFICATE----- MIIFwDCCA6igAwIBAgIJAJLWg/Z3x5xpMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV BAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxlMQww CgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1jYS10ZXN0QHNiYWx1a29mZi5p Ym0uY29tMB4XDTE2MDkyNzA4MDU1M1oXDTI2MDkyNTA4MDU1M1owbTELMAkGA1UE BhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAK BgNVBAoMA0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmli bS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDdYtZguUzRpohJ 8GI2/KCXZxd6FzmZtqrKz1JhZxSV56WhYnYljzQgRsPX8lxUWC/nSm13cjitfKG/ TvDNTs6bb9t7VkYM0k0ewvArcNRSSR/YHO0r7fWv7XkwTvt3yFupWkeNBaqDChaZ vlblcQxNUgXI3r/dOJDhOlfzhF0LML5FIIHgkgQCHAUQ62OfLkmXqNPYAKa9K/TE 4UGtG9LYT0dy3AwKUpvXfnKJSEgrRd8Nul7Rp6BgYWoJg6pZD4GLFiqT2bxphJJa AYulgtF1jDHeZgyunm7WrwZvxPC8AIcFcksRMxB5XOEo8PBXaGHxbIjl+PCw6WpF 5g7ZO95keYonpQ8nK9Vcn7BgWcQUY5SuZCaMTk79Hs/kD1upc22IHg//t1qy+0i2 SNTxj7n7mkynBHoKSrlVviUkyZHQYniuAGciYYKTfRy0F1LaM3QOUF3XA9j+2g1j CWolMPWpzWFTOkBwoCmCs0MX7FaYvsAeLx4rDVLRQWzvKZKGTubDBWS8wBsAq0hD v4b3r4k6cIz9a4PYNFARsnShkKHwln9lM5HjPHUNSZ6oaaIdi4wEf0xwipMiEi+x h3Ukztq6pBGlNbdxdlBP3PVap0AI81alswLWqCL5yBHzv0NQp+x7/EODJDcvE6sK PRmBVTzO9Y20KMlHrcdlPiNbBDhJ+QIDAQABo2MwYTAdBgNVHQ4EFgQUhmmo2HE3 7bnky9h7IQ5phCFGU18wHwYDVR0jBBgwFoAUhmmo2HE37bnky9h7IQ5phCFGU18w DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD ggIBAAbqGW0/XCmvptoh/6oHH8OvMUbn4m8Y9DqEnlWkfadhYkYaOh5SAZ51mD7O uwi+he8gV0evMgmW9liDw/i/2IWfHLT56yHSQqj8kRYRu8ZEdfC8IQg7Pj8mCJru JKtWWfCHWnoqxPmCkYfWrb2wb5K3Me33zMdvtdMg3Hzxg9UoRFvqyRJtYj7coK8A 3uWiX6vjDZTG+x7SF03zB40n6pR2mq+i7gqXeZBxV6VrIuYMQFVUk2VICe9hlsLs MFzq5Y3/P9evKMAI8JoxLLVlmI29pLY6A6VCiAFfyjiflXGtFRGNfHyo6FTMPzoL fGb0R/jAli47CVhvI7JyNqGMb6Oa4jqoVw5+RMmrgkaI5RhOplcTnqnxuEBqvxpk utnLNFTZ4LLRjYyaGYiYybZF9NG/OkCbTzT4fwLxqHqa4HCzijnbdAZbLtGC2aL/ SXMqHf1EHZmii9NZ/ndseom0l2+eVMaR8auZsSrpSbgzBB+UssVcBTD79Qb8LBQy C6WXGJPCEOfOYsxdZMDbD7q9CqgT5P4kI8VfryB5iqaLfDtUwjT8GPoTybFiWHMk 0DiS1quLYFZK2QhyFY2D1VLweyTQl8Hb/yYbxmd9QZDpDGCaIRkDt5H+rX17+MG2 n3yPHeLbGBLg9jphH7MMmsn57Z9fYjJADOOLFKG+W6txAQV3 -----END CERTIFICATE-----"""] X509_IMDS = b'\n'.join(X509_IMDS_LIST) PKCS7_PEM = b"""This line of spam should be ignored, as should the next line. -----BEGIN PKCS7----- MIILZwYJKoZIhvcNAQcCoIILWDCCC1QCAQExADALBgkqhkiG9w0BBwGgggs6MIIF cjCCA1qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCVVMxEzAR BgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAKBgNVBAoMA0lC TTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmlibS5jb20wHhcN MTYwOTI3MDgxODMzWhcNMjYwOTI1MDgxODMzWjAjMSEwHwYDVQQDDBhjYS1pbnRA c2JhbHVrb2ZmLmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQC8KiZi1t9vX8IBtiOgOXHwyTo4ljYOqz92BqyIj4r/YJNHxeJ7JHNiM29eu6K/ LdJleOkDXV+m0LDchn6pMf1/VJ5/Zpr7eWTqyKSaP5zTVbIOI1It60MQpsSiGbcf VPJ7TrwzNKtyBXREHwC+mEvIuEwDRHd8DljJG5J2CpS3Re/CgxR8DrKXnT6ZXHik og3yYJ7vULtxz8ktgOjM7uFh27YmHU4o1WyeAmMwpesVkqY7E7frbIYYbQo5B1D1 eWqM3KldqOQqvq34kPkf0vdfXxPurysNJrEaOXzDRdI6GiXaCKtnSEF0urCRdenI DWMeqq2w5H1ekNgpK3XeFFxQYkhvXN85HiRmPO9wrd4qdEs1nvTIVVEDpB5FFe4a FQmsAe57Ll1DTKZcja30VD1uJ5PbeoysHhN270+IbFeXK3x/icS5F1QdfE/pYIA0 L3JRSY88IXw4giHnlLnYb55NwLac3EXmr2Qks/T87/gbk5gk9G+0XK3FSRxF+MDd mRiLAKLSb4Ej3wX1dXnSgelx5cBZ0n+NBY/865oauui27/OIaL7ZaDCDZU/tjIJD y/uQkuAjH4UVF4m5PqRaykqrjbyRJeADbL2E7CxamOsgyAfzhgIt04hpKkvZoCCT RREeNp9rRITQiGMsfCUuxackDajsW/pnFD7m1ZKej8fcdQIDAQABo2YwZDAdBgNV HQ4EFgQUN1MP5SS5ZJyrWuPVSkEFKK2SnXQwHwYDVR0jBBgwFoAUhmmo2HE37bnk y9h7IQ5phCFGU18wEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYw DQYJKoZIhvcNAQELBQADggIBABGdyLUs9Pm9SXpSQLSY4BWnYdZvUoS5rhJ7coVr kTm3bBmI6haSWyoSy/l0Qno+7rXz1YruKYqKaYTAakuDL8C/J2VuL/p29nyClH0r DKNuz+6jU8xLVmcZwBtsKK1QxNgAdQ3DWygtmXzZ/tigfIVVIyaTkOvOjovuIxTW AbmJu/xbkq4zZEaYJ0xobK8LGq3ZDauCsPNykqH/jwLwYWHQbKPmppTkNff7unXr xQ+eSH/a1aCCldZI/NZywjZpNUdylEcnZhWshiWChD6j+CgrirdO0JeH9sGR0La7 1VqujFWvVJUYYSbb7l4KFBLFw8Od5Z5rpYXm/qTHd6OvyS3qajse8ardqN0g2Hun u0AtJ99JBHxzTP6blAcuTTrwS2XjB83/7k5YfN0jGbqQOYCJMTZ3pk3JkrZipxhj Y1ZX1N8Opb7IwgjIXwzNy/joL7smUNBQlTPDN1IfM5b83NGNSDKaS1pWiqaLXO6e rkwabZxCVfGgvIk9hE4x6+Cu+jdOLTpAwq1mcQroAp1+CInHrZeHdnhz0zR84CUm ddOos2WYTF+OvRfel32rBCaKlH6Ssij0JGxSYT24WXygsCdpDXfimg3O4Fk2sJlV 015O7iIu22bowsDcF9RfvkdHNULrClWI12sRspXF9VmRjbDyG4eASBiulJQVbk9D 26vPMIIFwDCCA6igAwIBAgIJAJLWg/Z3x5xpMA0GCSqGSIb3DQEBCwUAMG0xCzAJ BgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxl MQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1jYS10ZXN0QHNiYWx1a29m Zi5pYm0uY29tMB4XDTE2MDkyNzA4MDU1M1oXDTI2MDkyNTA4MDU1M1owbTELMAkG A1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUx DDAKBgNVBAoMA0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2Zm LmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDdYtZguUzR pohJ8GI2/KCXZxd6FzmZtqrKz1JhZxSV56WhYnYljzQgRsPX8lxUWC/nSm13cjit fKG/TvDNTs6bb9t7VkYM0k0ewvArcNRSSR/YHO0r7fWv7XkwTvt3yFupWkeNBaqD ChaZvlblcQxNUgXI3r/dOJDhOlfzhF0LML5FIIHgkgQCHAUQ62OfLkmXqNPYAKa9 K/TE4UGtG9LYT0dy3AwKUpvXfnKJSEgrRd8Nul7Rp6BgYWoJg6pZD4GLFiqT2bxp hJJaAYulgtF1jDHeZgyunm7WrwZvxPC8AIcFcksRMxB5XOEo8PBXaGHxbIjl+PCw 6WpF5g7ZO95keYonpQ8nK9Vcn7BgWcQUY5SuZCaMTk79Hs/kD1upc22IHg//t1qy +0i2SNTxj7n7mkynBHoKSrlVviUkyZHQYniuAGciYYKTfRy0F1LaM3QOUF3XA9j+ 2g1jCWolMPWpzWFTOkBwoCmCs0MX7FaYvsAeLx4rDVLRQWzvKZKGTubDBWS8wBsA q0hDv4b3r4k6cIz9a4PYNFARsnShkKHwln9lM5HjPHUNSZ6oaaIdi4wEf0xwipMi Ei+xh3Ukztq6pBGlNbdxdlBP3PVap0AI81alswLWqCL5yBHzv0NQp+x7/EODJDcv E6sKPRmBVTzO9Y20KMlHrcdlPiNbBDhJ+QIDAQABo2MwYTAdBgNVHQ4EFgQUhmmo 2HE37bnky9h7IQ5phCFGU18wHwYDVR0jBBgwFoAUhmmo2HE37bnky9h7IQ5phCFG U18wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQEL BQADggIBAAbqGW0/XCmvptoh/6oHH8OvMUbn4m8Y9DqEnlWkfadhYkYaOh5SAZ51 mD7Ouwi+he8gV0evMgmW9liDw/i/2IWfHLT56yHSQqj8kRYRu8ZEdfC8IQg7Pj8m CJruJKtWWfCHWnoqxPmCkYfWrb2wb5K3Me33zMdvtdMg3Hzxg9UoRFvqyRJtYj7c oK8A3uWiX6vjDZTG+x7SF03zB40n6pR2mq+i7gqXeZBxV6VrIuYMQFVUk2VICe9h lsLsMFzq5Y3/P9evKMAI8JoxLLVlmI29pLY6A6VCiAFfyjiflXGtFRGNfHyo6FTM PzoLfGb0R/jAli47CVhvI7JyNqGMb6Oa4jqoVw5+RMmrgkaI5RhOplcTnqnxuEBq vxpkutnLNFTZ4LLRjYyaGYiYybZF9NG/OkCbTzT4fwLxqHqa4HCzijnbdAZbLtGC 2aL/SXMqHf1EHZmii9NZ/ndseom0l2+eVMaR8auZsSrpSbgzBB+UssVcBTD79Qb8 LBQyC6WXGJPCEOfOYsxdZMDbD7q9CqgT5P4kI8VfryB5iqaLfDtUwjT8GPoTybFi WHMk0DiS1quLYFZK2QhyFY2D1VLweyTQl8Hb/yYbxmd9QZDpDGCaIRkDt5H+rX17 +MG2n3yPHeLbGBLg9jphH7MMmsn57Z9fYjJADOOLFKG+W6txAQV3oQAxAA== -----END PKCS7----- More spam here, too. Should be ignored.""" # Needed because we want PKCS7_DER to be raw bytes, not base64 encoded def b64decode(thing): return base64.decodebytes(bytes(thing, encoding='UTF-8')) PKCS7_DER = b64decode( 'MIILZwYJKoZIhvcNAQcCoIILWDCCC1QCAQExADALBgkqhkiG9w0BBwGgggs6MIIF' + 'cjCCA1qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCVVMxEzAR' + 'BgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAKBgNVBAoMA0lC' + 'TTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmlibS5jb20wHhcN' + 'MTYwOTI3MDgxODMzWhcNMjYwOTI1MDgxODMzWjAjMSEwHwYDVQQDDBhjYS1pbnRA' + 'c2JhbHVrb2ZmLmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC' + 'AQC8KiZi1t9vX8IBtiOgOXHwyTo4ljYOqz92BqyIj4r/YJNHxeJ7JHNiM29eu6K/' + 'LdJleOkDXV+m0LDchn6pMf1/VJ5/Zpr7eWTqyKSaP5zTVbIOI1It60MQpsSiGbcf' + 'VPJ7TrwzNKtyBXREHwC+mEvIuEwDRHd8DljJG5J2CpS3Re/CgxR8DrKXnT6ZXHik' + 'og3yYJ7vULtxz8ktgOjM7uFh27YmHU4o1WyeAmMwpesVkqY7E7frbIYYbQo5B1D1' + 'eWqM3KldqOQqvq34kPkf0vdfXxPurysNJrEaOXzDRdI6GiXaCKtnSEF0urCRdenI' + 'DWMeqq2w5H1ekNgpK3XeFFxQYkhvXN85HiRmPO9wrd4qdEs1nvTIVVEDpB5FFe4a' + 'FQmsAe57Ll1DTKZcja30VD1uJ5PbeoysHhN270+IbFeXK3x/icS5F1QdfE/pYIA0' + 'L3JRSY88IXw4giHnlLnYb55NwLac3EXmr2Qks/T87/gbk5gk9G+0XK3FSRxF+MDd' + 'mRiLAKLSb4Ej3wX1dXnSgelx5cBZ0n+NBY/865oauui27/OIaL7ZaDCDZU/tjIJD' + 'y/uQkuAjH4UVF4m5PqRaykqrjbyRJeADbL2E7CxamOsgyAfzhgIt04hpKkvZoCCT' + 'RREeNp9rRITQiGMsfCUuxackDajsW/pnFD7m1ZKej8fcdQIDAQABo2YwZDAdBgNV' + 'HQ4EFgQUN1MP5SS5ZJyrWuPVSkEFKK2SnXQwHwYDVR0jBBgwFoAUhmmo2HE37bnk' + 'y9h7IQ5phCFGU18wEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYw' + 'DQYJKoZIhvcNAQELBQADggIBABGdyLUs9Pm9SXpSQLSY4BWnYdZvUoS5rhJ7coVr' + 'kTm3bBmI6haSWyoSy/l0Qno+7rXz1YruKYqKaYTAakuDL8C/J2VuL/p29nyClH0r' + 'DKNuz+6jU8xLVmcZwBtsKK1QxNgAdQ3DWygtmXzZ/tigfIVVIyaTkOvOjovuIxTW' + 'AbmJu/xbkq4zZEaYJ0xobK8LGq3ZDauCsPNykqH/jwLwYWHQbKPmppTkNff7unXr' + 'xQ+eSH/a1aCCldZI/NZywjZpNUdylEcnZhWshiWChD6j+CgrirdO0JeH9sGR0La7' + '1VqujFWvVJUYYSbb7l4KFBLFw8Od5Z5rpYXm/qTHd6OvyS3qajse8ardqN0g2Hun' + 'u0AtJ99JBHxzTP6blAcuTTrwS2XjB83/7k5YfN0jGbqQOYCJMTZ3pk3JkrZipxhj' + 'Y1ZX1N8Opb7IwgjIXwzNy/joL7smUNBQlTPDN1IfM5b83NGNSDKaS1pWiqaLXO6e' + 'rkwabZxCVfGgvIk9hE4x6+Cu+jdOLTpAwq1mcQroAp1+CInHrZeHdnhz0zR84CUm' + 'ddOos2WYTF+OvRfel32rBCaKlH6Ssij0JGxSYT24WXygsCdpDXfimg3O4Fk2sJlV' + '015O7iIu22bowsDcF9RfvkdHNULrClWI12sRspXF9VmRjbDyG4eASBiulJQVbk9D' + '26vPMIIFwDCCA6igAwIBAgIJAJLWg/Z3x5xpMA0GCSqGSIb3DQEBCwUAMG0xCzAJ' + 'BgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxl' + 'MQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1jYS10ZXN0QHNiYWx1a29m' + 'Zi5pYm0uY29tMB4XDTE2MDkyNzA4MDU1M1oXDTI2MDkyNTA4MDU1M1owbTELMAkG' + 'A1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUx' + 'DDAKBgNVBAoMA0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2Zm' + 'LmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDdYtZguUzR' + 'pohJ8GI2/KCXZxd6FzmZtqrKz1JhZxSV56WhYnYljzQgRsPX8lxUWC/nSm13cjit' + 'fKG/TvDNTs6bb9t7VkYM0k0ewvArcNRSSR/YHO0r7fWv7XkwTvt3yFupWkeNBaqD' + 'ChaZvlblcQxNUgXI3r/dOJDhOlfzhF0LML5FIIHgkgQCHAUQ62OfLkmXqNPYAKa9' + 'K/TE4UGtG9LYT0dy3AwKUpvXfnKJSEgrRd8Nul7Rp6BgYWoJg6pZD4GLFiqT2bxp' + 'hJJaAYulgtF1jDHeZgyunm7WrwZvxPC8AIcFcksRMxB5XOEo8PBXaGHxbIjl+PCw' + '6WpF5g7ZO95keYonpQ8nK9Vcn7BgWcQUY5SuZCaMTk79Hs/kD1upc22IHg//t1qy' + '+0i2SNTxj7n7mkynBHoKSrlVviUkyZHQYniuAGciYYKTfRy0F1LaM3QOUF3XA9j+' + '2g1jCWolMPWpzWFTOkBwoCmCs0MX7FaYvsAeLx4rDVLRQWzvKZKGTubDBWS8wBsA' + 'q0hDv4b3r4k6cIz9a4PYNFARsnShkKHwln9lM5HjPHUNSZ6oaaIdi4wEf0xwipMi' + 'Ei+xh3Ukztq6pBGlNbdxdlBP3PVap0AI81alswLWqCL5yBHzv0NQp+x7/EODJDcv' + 'E6sKPRmBVTzO9Y20KMlHrcdlPiNbBDhJ+QIDAQABo2MwYTAdBgNVHQ4EFgQUhmmo' + '2HE37bnky9h7IQ5phCFGU18wHwYDVR0jBBgwFoAUhmmo2HE37bnky9h7IQ5phCFG' + 'U18wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQEL' + 'BQADggIBAAbqGW0/XCmvptoh/6oHH8OvMUbn4m8Y9DqEnlWkfadhYkYaOh5SAZ51' + 'mD7Ouwi+he8gV0evMgmW9liDw/i/2IWfHLT56yHSQqj8kRYRu8ZEdfC8IQg7Pj8m' + 'CJruJKtWWfCHWnoqxPmCkYfWrb2wb5K3Me33zMdvtdMg3Hzxg9UoRFvqyRJtYj7c' + 'oK8A3uWiX6vjDZTG+x7SF03zB40n6pR2mq+i7gqXeZBxV6VrIuYMQFVUk2VICe9h' + 'lsLsMFzq5Y3/P9evKMAI8JoxLLVlmI29pLY6A6VCiAFfyjiflXGtFRGNfHyo6FTM' + 'PzoLfGb0R/jAli47CVhvI7JyNqGMb6Oa4jqoVw5+RMmrgkaI5RhOplcTnqnxuEBq' + 'vxpkutnLNFTZ4LLRjYyaGYiYybZF9NG/OkCbTzT4fwLxqHqa4HCzijnbdAZbLtGC' + '2aL/SXMqHf1EHZmii9NZ/ndseom0l2+eVMaR8auZsSrpSbgzBB+UssVcBTD79Qb8' + 'LBQyC6WXGJPCEOfOYsxdZMDbD7q9CqgT5P4kI8VfryB5iqaLfDtUwjT8GPoTybFi' + 'WHMk0DiS1quLYFZK2QhyFY2D1VLweyTQl8Hb/yYbxmd9QZDpDGCaIRkDt5H+rX17' + '+MG2n3yPHeLbGBLg9jphH7MMmsn57Z9fYjJADOOLFKG+W6txAQV3oQAxAA==') # Keys for the above CA certs, logged here to make it simple to sign other # certs for testing purposes in the future. INTERMEDIATE_KEY = b"""-----BEGIN RSA PRIVATE KEY----- MIIJJwIBAAKCAgEAvComYtbfb1/CAbYjoDlx8Mk6OJY2Dqs/dgasiI+K/2CTR8Xi eyRzYjNvXruivy3SZXjpA11fptCw3IZ+qTH9f1Sef2aa+3lk6sikmj+c01WyDiNS LetDEKbEohm3H1Tye068MzSrcgV0RB8AvphLyLhMA0R3fA5YyRuSdgqUt0XvwoMU fA6yl50+mVx4pKIN8mCe71C7cc/JLYDozO7hYdu2Jh1OKNVsngJjMKXrFZKmOxO3 62yGGG0KOQdQ9XlqjNypXajkKr6t+JD5H9L3X18T7q8rDSaxGjl8w0XSOhol2gir Z0hBdLqwkXXpyA1jHqqtsOR9XpDYKSt13hRcUGJIb1zfOR4kZjzvcK3eKnRLNZ70 yFVRA6QeRRXuGhUJrAHuey5dQ0ymXI2t9FQ9bieT23qMrB4Tdu9PiGxXlyt8f4nE uRdUHXxP6WCANC9yUUmPPCF8OIIh55S52G+eTcC2nNxF5q9kJLP0/O/4G5OYJPRv tFytxUkcRfjA3ZkYiwCi0m+BI98F9XV50oHpceXAWdJ/jQWP/OuaGrrotu/ziGi+ 2Wgwg2VP7YyCQ8v7kJLgIx+FFReJuT6kWspKq428kSXgA2y9hOwsWpjrIMgH84YC LdOIaSpL2aAgk0URHjafa0SE0IhjLHwlLsWnJA2o7Fv6ZxQ+5tWSno/H3HUCAwEA AQKCAgA2onJ03nkP6Jj3UEB15FgeFv+NsKfPiI+roHJ2UF+GmS8Kdv20zbem+tJK imbN5esiRYI61ODSGeRQk8ixLe+yCgrfaRZ1ECFqPon0s6XAgzBpBH53EMlvS0zq 2FaghVTG0uy4XYGuYMEKion3zVar2D9R745V+gBznErhdV8K/AaKzu6ius3GUgT8 GKp6+wbbqoxbZnCWskNyr+xonK/abyYrWPT5zEZ2drEAThy9LdCQdMBBXkhtTTPb rTEnpXJ3phaTOFfPxX/UHZwIToQ/L+cktb3lWqevuqNsO5i4ACGfdkb2fTdsQkzE X51a1fBC1kIKi72POLVa9uCJdBX9TafN7vObGdVtrO/rzqS6PhaD85JcQ/6ns4Cx 8+zERCrNlSJ4sGkmSVXF5nFXwgZ5WgZVAbf7vyCBdBT4GqV0H5Yq0kxu2OPd5qvD ZXesU2bkRhNpWG0LkjhM5mNE2lcBlBM+e93ZUSvP+KA83paLv6lNMmILG3DUbIpG +juDZQgmTKAR2emsr4JBvJpp5XrczbFvxdr6Kn7UqVGFkqNFyMBBAeE0tdp1biLO XCEptvvc0gh273csaaMHfyaDjOnvHQ0MJ+p0Z1WRNnvuoDd2rCclZ3suL0XYMZ2z 0je5yhJrnlbduFv7pDugG6mbLgmcTMFvBlKYQdjhnkRPtIDfAQKCAQEA3BQg/nLB BupvYnoDzX0hgF5IYCm65DDg9zgXlyKrvbE+jAYpCgSA9aIkQVhL8DcxXLGj2AMV LMtKtYn2vMz5uP5ooWxXsmlJphl6OLiWqpWOq0mW3J+jLCsjShUUWfHiwMkSvDw0 CQvTRkXkJVeGduv63wH7jDcsB7NalpcYFQOk8s3L4tv+Yqm72bU26wB1BXGm6BSx FeA03u4MvFI/rebyNEiVqFo9r0kBTpvHELuNpZTCotYdGZiJ3qgauJNzv1ga1KH8 jjeXaR6YoP6xiD7TQvV02ZZ28VBSFmYmFKP5nlwHqCf4K5nq0rbaJ1OIJMx+J7Nj hW5Li6OqRlWDwQKCAQEA2uCDEXABrwLd5JFDaAGqyIPzhtgTzOrddPEwkVY88S5n Pv2zRnrCYx+e7erIgwtRVCny+CdH/AcQrd3jzxTjvUnDwsfWfG/vjnVCbxt/nJPL cab1wumUQYfDYYZwdxOCs/y77V5sXskzmM/cr2ftPaVAWliKQoiMBq1qe2GX+6+v pwuLd31bf2o2h5Ki1CbvjNPPwVycqOVuNRU4Kv+p74pdDdys8FHjtdXkkwnNyOI+ 4CWZ00ep4rGMw6jbs/REnSNmY6o2eCUjceYmb0B25U1c7VvU4rKaO5gGKP4i2YsG zJ3LITduk9HEiy2+fHDg5+jS5A+2sa7+jr9KRLr1tQKCAQBAEnwomDAqW0ZiXTOO N8fxH4HivUNz++cDEomLrsRhTaDwEzsPd5Az8yiq/ZHeNbNhUTZqRTt89xKhF7GF 9gceHLNJi8lcX9rZSMGUkekLcM9AfwQ05yUSTtzPTKPIK+vgRKn3s29S0uaHpwFs /bZgGQPIuUMK52UiOM0+2m5gb9ct+UxKl9HP16qhD2kVseRf2V7GYn/L5qJ95MBA c5Gmh34sSpWHlf4vcgm3YRLrhC8Q7eZckgmW0hoRgM+GvScNphDppt9oaDbkm8vD 02LMT74h2GRUlMG8L642Zzbe+35I5HI2Oqy9xOngvut0V5VjYUW5OTjYN+w2k0eX gn4BAoIBAEYrB5nFgKSslJo7/5IORsP1mVK3XtGo0co1sE5HiU4AhFTrXFfR7xN8 BaVcSV/Jdw82S5BGd4tScIRsyCWRGtmKFloRWq+V6wSOjvezpxt5PhV3Yv5Th5Xi gj53rQJfnN06vryAMtnIQuRQbv1EogfLPHA6RkjCIbHaUnKvfNvRHMy+pL1v0K9u S4D2/4Bn4xAQr1/b4tU6iDQ4U0NlpwMGJoLVJhP9DLU0lwyUbgZikammJERZixsD tI7dSWHNg1mlCaQV41RtA4n2MIgl8Hfeb1YgxITQoSVNvVvS7TU0nr9mLsK9VJPL Aelkhta6EUAHoeQ/LWCVK0J0DMkv7qkCggEAfYXt3IxEcAWGDse2GccNZ5t2LRxR hIm6yPHpzmyJklEEoqCKltjFpr5Ltl6GWn/BuE/iHjBUL/PYpvQ2Mjz3VYR5cYe7 W6Q8E45GTKX5A3YgAklRRKpd3BmS4pA3D6L0zt/CxWRZ/qIssGkOhV1E0/v7TgZx mOk14aL/0t9PWKYjlqn9TJlmO8ZrTcMSpZ3fRFznIAgk1avexggrhShtrgjy+7uh qH3e8e1WlIfA7FAqE1Dtae97oV/5wM9qp1rnijwq5jlZX+AqYq7GQ8J5x2ypGhZX +N7I5RuaLjkJJs3i/EzCDwp8F3ZXZRiILaWSaGZlrZ8jgVtlNhNfVYVFuQ== -----END RSA PRIVATE KEY-----""" CA_KEY = b"""-----BEGIN RSA PRIVATE KEY----- -----BEGIN RSA PRIVATE KEY----- MIIJKwIBAAKCAgEA3WLWYLlM0aaISfBiNvygl2cXehc5mbaqys9SYWcUleeloWJ2 JY80IEbD1/JcVFgv50ptd3I4rXyhv07wzU7Om2/be1ZGDNJNHsLwK3DUUkkf2Bzt K+31r+15ME77d8hbqVpHjQWqgwoWmb5W5XEMTVIFyN6/3TiQ4TpX84RdCzC+RSCB 4JIEAhwFEOtjny5Jl6jT2ACmvSv0xOFBrRvS2E9HctwMClKb135yiUhIK0XfDbpe 0aegYGFqCYOqWQ+BixYqk9m8aYSSWgGLpYLRdYwx3mYMrp5u1q8Gb8TwvACHBXJL ETMQeVzhKPDwV2hh8WyI5fjwsOlqReYO2TveZHmKJ6UPJyvVXJ+wYFnEFGOUrmQm jE5O/R7P5A9bqXNtiB4P/7dasvtItkjU8Y+5+5pMpwR6Ckq5Vb4lJMmR0GJ4rgBn ImGCk30ctBdS2jN0DlBd1wPY/toNYwlqJTD1qc1hUzpAcKApgrNDF+xWmL7AHi8e Kw1S0UFs7ymShk7mwwVkvMAbAKtIQ7+G96+JOnCM/WuD2DRQEbJ0oZCh8JZ/ZTOR 4zx1DUmeqGmiHYuMBH9McIqTIhIvsYd1JM7auqQRpTW3cXZQT9z1WqdACPNWpbMC 1qgi+cgR879DUKfse/xDgyQ3LxOrCj0ZgVU8zvWNtCjJR63HZT4jWwQ4SfkCAwEA AQKCAgEA2q4m1KQ1HWJCfcbVPTuN5gAPUKpgW1X0nyDrXwtTaj/HfAKmcbNi6f78 tPLSAP6bUvxR5QsOsU/K9g4kDqkprKBxTQOLbl7NjvVAB6kMEbvpmK/6FsqXRZBt hSp/e3KOGFr1EnfmVkpAyN0bOMjSPg4naKOfIgYeFlxrREAbKFKdn+rcX9fb3bmP x4a8gSBX0VcS6uq5yWMCBPf8x+IUA1dMXEjAG/I9vj9JJBIiN5xtGEJgJvhNkuam t383ZYHLlHfw1trdId2yMvYT2wm9nT8+g1CKdnJJSgbZdM40fYCH3vlm7TZjr33v a2GUBsM0/CUZlRCxsA7gyurVAAADS6UtfOF2lcLIxeC8FDdL/p4ytF3sYND4f0kp +gQn5+vTnfijfEqWbHWnkn9V8MSZd3ihVn74d2544SOLJ5j+i1vYfieBj4gXhOiA TMudpGh7wKOy/ajLRtSxtM1uYGtycA1e1jaBX8eXwfPyJemYlurK7DEyH2BlVbJY EUCGYvR96VNpDLpvBwB0+G4E1LJOpt+h4si03mQIfnX3um6hBmUGzGwyr14i7Qh6 mPT2i/xdZtUFD1Hp2cFCwVvkGzhorgM+ICgLOFF2FOuzBrC+zrQNj6Aom1bWakdw x/lNKSYmzypsCQC20lCme4SRyRfn/Tz/ylN95uvZvU5gr7Lhf4ECggEBAPwKNCGI 45B16n7CqnTziO3nZHO2xeKEyzLXbKK2f/AvYiQnZxRG8aY+DqoqnnBbXCXnd7LW nDjoOYI3l75n/F33+2IiyJUNSF2noLEu1T1gQVaFuU6p8bwCJ5rShuxXMVAGkw3z /bcTuaZIJU4KTNCP4A9wgyB40uwRrYiEQMaYXjeph71PTOEA7XseuiOhHnaiCaeg KVivOD9vR532Ai2mCmi/6oBtT/AjnbWLXXNJRp0OfPZ2nZ/Z8j0zmCMmbhMtpQe0 Utk5LaABCqRh6ZRp4bvoqgR7yrOAH1NUPPJhdrQywAl0UiXgnjhNixDp4kP/TLvE 70Z2i+u3ayssEnMCggEBAODdVUYSw3F+CQ8T+IGaTAU8lzE5fgxGZcxsnzrnzbCQ PLSuzx9FJJ8+gYCkD3FUg8vduN8uyy3YY13LFjOiJ6+TczglgmGgZM+2qxQ5mFyT 9FVijPUNtUuhJm3SBvHpJlzmcR/uNiIws55M+RbGSKB7kotf5FchZ2YBhZzpr7lG jn6x15ts6iSlxHnR5QAPvqgCOhUJnk8CiDaypx12MXRP/A/KZX8XAeRFIMmKSC6f O7kRY/xpSKxuyvACDybxhXbGP86t07ZXpXU8PmgU6yjnsGxQOg4iLlReI3jiaa7m TTeiNjW3Ra2pOBd5BWn3ecVvf4UHJsJs59euYWha2uMCggEBAMbLlYrN2hBbsXYC PUi5vktHs+fBRhwA+fVEDZ/Zqtfbx+EUYy2PN5MUZ6S4sPeWV/xdsgARXm9UW+fl yX6Zrmi/7Dvfi65lJ6sXSJv4gKFEhsSj/SGa0dylJm/rlhhcPb0NMnhS9s+sc0ZA qYwAe84VbXlAGW1HX7ZryyBekGkUTVxCD5q2LcFbZfUyq0bnEowoCs14eqREsGz4 bNie7eDrklJE7cYWcnLK5N4I6tC//z5p6w7LSFCJK5QyWdF/wlrGKeEFzkMf4mjN 6YL257H0QeRhA5k9uwgSCqNDUj8ruOExFl9erFzL6oAmSYYxtBJGEFQaZVCCuKJX reQDgxkCggEBANjfn6b94dJMJKCOOleFYVyLm6a2NIPSQcxv1wMRHqjz9BivoMQg A7oypuRTVzGOua6eIfffQcbttKh5fug9tNj59V5pjt5pu9E59LaE9hYT/Mt9SUXv +rL+sfmpX1lh7MYc225YaY2AOzyqMHNuug1OIYCa87e1V+xh+2PjXr/q9PPswm39 FbZSyrRTY/IzPUb9Hte7dxvs7UMT+2nG3Nu5aPox0sJIhmKK6Zx36jZNDWTpCO4g /R6RnNjuo36D4p0zh8bmkBKFZec0O1xXEJdbHiTZG6UWAmkMglnMxPES3daSdIZK RMHBO4AoELirHp71cp/yzccnElRKs1faiNECggEBAJg1b53r259txjDUxY922imF JXySNhRHlrQ6BYyfHJWMMEVNasd86wrc8VKgIqQcxtdfIL1dGxOu31xzmxsSmfjR 0aG51uHi/erTKeV0C3/bdZ8TgeTKhxXAVZXLuJ4i6HvdF1lAZmE0stXd7vA0bedJ 7RYKKnuW16qo05oNx/ADdjfCaOHA0cCfyPv294CQn0z4hyEVAbBykU/j6v0WbzS5 r187A8Q9L5pB57JnuY9nO7MvrINJVNbLPYjanqrkqvwDjiPkzETVm50mVtFYLWgw 8m7OLXEaFVJ4XA3C8e78bzDhSMvQTc8QVYmwj24gQ/uolftqdM4lEKpUucw/ECs= -----END RSA PRIVATE KEY-----""" # An expired self-signed cert for testing. X509_EXPIRED_CN = 'www.example.com' X509_EXPIRED = b"""-----BEGIN CERTIFICATE----- MIIDfzCCAmegAwIBAgIJAKByYqy5fAfLMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV BAYTAlVTMQ0wCwYDVQQIDAREZWFkMRAwDgYDVQQHDAdUb3RhbGx5MQwwCgYDVQQK DANJQk0xGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAeFw0xNjA5MjQxODAxNTRa Fw0xNjA5MjUxODAxNTRaMFYxCzAJBgNVBAYTAlVTMQ0wCwYDVQQIDAREZWFkMRAw DgYDVQQHDAdUb3RhbGx5MQwwCgYDVQQKDANJQk0xGDAWBgNVBAMMD3d3dy5leGFt cGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKZXgoeI7tzM KLUs0Fp9qEnILZTH2Xj5Q/j0KTkLD5A3VLROJof1lMb8voAkF16jnCC+A4RuphEO QtEUAUwlbvYv0rrSEWKYtkGKpEAg7mH05/BiiLSuveIQido6u4659FJ3bgYNE/P0 xb8vMuxy4M7JH1OF8XReI05UfLqGr5isjri/IS4ofZy97aMciDdqeAs+yDg6lCpk e0UcPLmJw5tIMg30Pl0AsxkD9U5JejAHEOvYgNgCyk9lo8uf/S41pzmU4Wc9TmL0 WDunicpqngmajV+V45VN6t4NDHo093kyZ/4gJcqRfsNQ2DQRyFzd8Yjllz36dO9B HT2NhI9yKhECAwEAAaNQME4wHQYDVR0OBBYEFBRND67rjYxqeUFH3p9+vSoQS1Qe MB8GA1UdIwQYMBaAFBRND67rjYxqeUFH3p9+vSoQS1QeMAwGA1UdEwQFMAMBAf8w DQYJKoZIhvcNAQELBQADggEBAFOcwM8mlTsP+sw4yhxcPD72qiIn4DRI++17Yoeu eVJWO5ZlelOaBVdMFV573/7LR0j4y22RNPiFRCj+oG+w/kQLVRBnxj/LhZj3T+sQ DIlahXIWCroPqVXEbRejxOLugNLS7SoWp9pKqWXPawkyHIS0Ht7LyZQYm9Pt7PKc uerOX3Qzt+W2nmgxA3mHhL76tCRqDATdn5guLH1F0g29WB614oI43kSt4WW0i4JT S+aDmoFsO3i/E+x+qm5H0swjU9dLCvdMjo0VUpk5f1aJJ10xpeKTUYOB55haalJI j+/EXRZyEna+vPrS8mCl0GMvlFm0ZWFdWaWPR7l3J/J4is0= -----END CERTIFICATE-----""" X509_EXPIRED_KEY = b"""-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEApleCh4ju3MwotSzQWn2oScgtlMfZePlD+PQpOQsPkDdUtE4m h/WUxvy+gCQXXqOcIL4DhG6mEQ5C0RQBTCVu9i/SutIRYpi2QYqkQCDuYfTn8GKI tK694hCJ2jq7jrn0UnduBg0T8/TFvy8y7HLgzskfU4XxdF4jTlR8uoavmKyOuL8h Lih9nL3toxyIN2p4Cz7IODqUKmR7RRw8uYnDm0gyDfQ+XQCzGQP1Tkl6MAcQ69iA 2ALKT2Wjy5/9LjWnOZThZz1OYvRYO6eJymqeCZqNX5XjlU3q3g0MejT3eTJn/iAl ypF+w1DYNBHIXN3xiOWXPfp070EdPY2Ej3IqEQIDAQABAoIBAD4c/0jNASTBt5Gv oj2oHmcusJaV6ccajR8xTRNX5f/cKW0KoaizM1L6ncgLsg5M2cgALCAPkUNdJ+Ya qkFc2Qpk4TORrZw7mhLvSlYH9fvuD43bvWB6v7zioBc1R0QMfAcvQY5Q49p81DqH zWQtoXSV9XSi1360iEp/kfO0x20ip9rP7qDOKuN5gdvRa8sXKD+jnmp17e1rx+fS U0UoReBUbn4iLbOdEVyH9HSqTB+p5nPq63KJBioJZMGhLNntKMAff8uMiVhhb7Io vIIHgoIfFce9YwC4fn+0UDrBCAx+SAyw2cmmMyXIqhd3c2Ca7zFmezSuC3H5Y4si 535VO2ECgYEA2/7I8QOkrRx+Bd2sxe6n+jeA6yRVqBb+bE6rZUUQUlSAFqoM8RKJ K8cRjePmtkd9UkGrfDN6XTyqKD5Vt1Cd7FNl5Q08C/WP5VUOaKgdq3MkeOoJT8xf c0LWAoRw5InP7n6TRASExekagQEIMMOHZFtwSjz+HauLqohrk6CaBRcCgYEAwZDK J0mYspt8Wwlwrv0ouQG6tCy0NkWCdNs4EbT12qH6FKsdUuvJku+Zr1amCq/8apTn pdn2YlRDp5+jqsKf0dui5M2zC088XJov3VF1Ujm4BtSVwRRhi7BxM9BCv1txUs20 e2XPKV7RKexOL6iWPWDIcB6ZFhJdxQI5mOF9ExcCgYEAmLHPZvnQYxdToV6rfPaZ QOMlaBBgI7tR/HreG/xDx+E+xnxhXzIuY2RYmtOEXyBfq6hJDnvsgqqIsEYT2Jjs BAwevUziUKqwpczTo3CMp2PT/Nj0fZ6s4aOSR00FzpqY6ECSlrNMNNIGw2Oj+7S7 VLziw6Rx/MYEuujVQjJGtSECgYAXlwC8BwEgC0j9g0oDWYEEAwzw9l3EG3aJrUnJ BqfLzF/A8xWwzGGVkbPGJaY4uTfm+Vy93rFjTGeuXwtAPVXi6oSQo+0FHNP7aSMa Mto8eiJOWswhas10i71QFjp8PbWy5LTxMPgtT4voMw9YSZB9zHTBDUmU4gohf2Lr mdd3YwKBgHu4IlMxt40w+Bn5xasvACB5iaO5EBKO7rp0ba0Po3t9SG9iPSr8Yruq Qv1cDRGlM5jHboqSM2ju2/b/Wc2ezdjoktrwgG+ElQuptwwNIsFrooHMLMY3B53k Je8uvLnAPRLL95ZhclaSw2vAxmaiGIsm7WGhjnRQ2Vntgd6fNgY9 -----END RSA PRIVATE KEY-----""" # Other certificates and keys used in tests. ALT_EXT_CRT = b"""-----BEGIN CERTIFICATE----- MIIGqjCCBZKgAwIBAgIJAIApBg8slSSiMA0GCSqGSIb3DQEBBQUAMIGLMQswCQYD VQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4w HAYDVQQKDBVPcGVuU3RhY2sgRXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24g TGJhYXMxHjAcBgNVBAMMFXd3dy5DTkZyb21TdWJqZWN0Lm9yZzAeFw0xNTA1MjEy MDMzMjNaFw0yNTA1MTgyMDMzMjNaMIGLMQswCQYDVQQGEwJVUzEOMAwGA1UECAwF VGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4wHAYDVQQKDBVPcGVuU3RhY2sg RXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24gTGJhYXMxHjAcBgNVBAMMFXd3 dy5DTkZyb21TdWJqZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBALL1nmbDPUDps84i1sM3rhHrc+Dlu0N/wKQWKZFeiWUtF/pot19V3o0yXDps g7W5RkLMTFkZEcnQpyGdpAGjTjzmNXMZw99EzxsmrR3l6hUEISifVbvEuftYZT6j PxM5ML6WAjFNaBEZPWtZi8CgX5xdjdrDNndwyHob49n7Nc/h1kVqqBqMILabTqC6 yEcxS/B+DugVuuYbEdYYYElQUMfM+mUdULrSqIVl2n5AvvSFjWzWzfgPyp4QKn+f 7HVRT62bh/XjQ88n1tMYNAEqixRZTPgqY1LFl9VJVgRp9fdL6ttMurOR3C0STJ5q CdKBL7LrpbY4u8dEragRC6YAyI8CAwEAAaOCAw0wggMJMAkGA1UdEwQCMAAwCwYD VR0PBAQDAgXgMIIC7QYDVR0RBIIC5DCCAuCCGHd3dy5ob3N0RnJvbUROU05hbWUx LmNvbYIYd3d3Lmhvc3RGcm9tRE5TTmFtZTIuY29tghh3d3cuaG9zdEZyb21ETlNO YW1lMy5jb22CGHd3dy5ob3N0RnJvbUROU05hbWU0LmNvbYcECgECA4cQASNFZ4mr ze/3s9WR5qLEgIYWaHR0cDovL3d3dy5leGFtcGxlLmNvbaSBjzCBjDELMAkGA1UE BhMCVVMxDjAMBgNVBAgMBVRleGFzMRQwEgYDVQQHDAtTYW4gQW50b25pbzEeMBwG A1UECgwVT3BlblN0YWNrIEV4cGVyaW1lbnRzMRYwFAYDVQQLDA1OZXV0cm9uIExi YWFzMR8wHQYDVQQDDBZ3d3cuY25Gcm9tQWx0TmFtZTEub3JnpIGPMIGMMQswCQYD VQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4w HAYDVQQKDBVPcGVuU3RhY2sgRXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24g TGJhYXMxHzAdBgNVBAMMFnd3dy5jbkZyb21BbHROYW1lMi5vcmekgY8wgYwxCzAJ BgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEUMBIGA1UEBwwLU2FuIEFudG9uaW8x HjAcBgNVBAoMFU9wZW5TdGFjayBFeHBlcmltZW50czEWMBQGA1UECwwNTmV1dHJv biBMYmFhczEfMB0GA1UEAwwWd3d3LmNuRnJvbUFsdE5hbWUzLm9yZ6SBjzCBjDEL MAkGA1UEBhMCVVMxDjAMBgNVBAgMBVRleGFzMRQwEgYDVQQHDAtTYW4gQW50b25p bzEeMBwGA1UECgwVT3BlblN0YWNrIEV4cGVyaW1lbnRzMRYwFAYDVQQLDA1OZXV0 cm9uIExiYWFzMR8wHQYDVQQDDBZ3d3cuY25Gcm9tQWx0TmFtZTQub3JnMA0GCSqG SIb3DQEBBQUAA4IBAQCS6iDn6R3C+qJLZibaqrBSkM9yu5kwRsQ6lQ+DODvVYGWq eGkkh5o2c6WbJlH44yF280+HvnJcuISD7epPHJN0vUM9+WMtXfEli9avFHgu2JxP 3P0ixK2kaJnqKQkSEdnA/v/eWP1Cd2v6rbKCIo9d2gSP0cnpdtlX9Zk3SzEh0V7s RjSdfZoAvz0aAnpDHlTerLcz5T2aiRae2wSt/RLA3qDO1Ji05tWvQBmKuepxS6A1 tL4Drm+OCXJwTrE7ClTMCwcrZnLl4tI+Z+X3DV92WQB8ldST/QFjz1hgs/4zrADA elu2c/X7MR4ObOjhDfaVGQ8kMhYf5hx69qyNDsGi -----END CERTIFICATE-----""" ALT_EXT_CRT_KEY = b""" -----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAsvWeZsM9QOmzziLWwzeuEetz4OW7Q3/ApBYpkV6JZS0X+mi3 X1XejTJcOmyDtblGQsxMWRkRydCnIZ2kAaNOPOY1cxnD30TPGyatHeXqFQQhKJ9V u8S5+1hlPqM/EzkwvpYCMU1oERk9a1mLwKBfnF2N2sM2d3DIehvj2fs1z+HWRWqo GowgtptOoLrIRzFL8H4O6BW65hsR1hhgSVBQx8z6ZR1QutKohWXafkC+9IWNbNbN +A/KnhAqf5/sdVFPrZuH9eNDzyfW0xg0ASqLFFlM+CpjUsWX1UlWBGn190vq20y6 s5HcLRJMnmoJ0oEvsuultji7x0StqBELpgDIjwIDAQABAoIBAC3DX6FZtfU+jgtd n1vGhk3wzu4o8S0+ow2S2UhiS3JDCMmxM4s+ky26Phl2nGvBGDWGttNl9MWOBN80 x7bfgudR20M2yH70wp1n04c8vxJmvu/7ZtogYYrjvOg6qKuKyWtDQwZGjCErOiiU eodku25qAhd6Khh7D9kh/q9EbSteYFXsqJiNrY4ul1+cROMZpHx63xY6AzPmkvSU garkgY4rw9E71t7it2laWkRKVsd+kEjayritdEEliNMVFFtrGEgplYkmLxGf0HLi ROFVMCLRW/P12JpXllFPrBb8rlPL4w1c/s+yStohT0K+o4FLXhsf/inxmfc9XnZX dJm0k/ECgYEA47FpV1caMk+TNPfu318VCGRmjwpXdmkNaUiX2Uvs3xIKQ6KJmpo3 sj0YjQEmQVz8s6geStvU1LdPxgsWZfbDt31M6SNwylh82ABQF1bZyrcMRxM8bHhe bhDITM1dAn6aROkS1cBpfR9NJOFD850lmJvBGR9ORVBGyucTKH5uXxkCgYEAyTU0 zQKW2aU3J7mTCC9cp+eSD3fubJpa3ML5XfQ8YNID4PsxWglNKPcOTC4yaSfxVmyk S0WIQUazCstszQsvwy9YyHtpkMq+0lyCPvrYnmRV0zx5zT155V2zcEh/oj64eoee W5kvJSs/x6vT+lEN0TDEJ2gKEaJuBt6JG6P04ecCgYBSNw1CbEEZSYJt7dhi74I4 tYgSvjk2mFgvW/b4j2HIaksqgNYO7QCPa2AiCfg2Qc09UcceYKJI7Kfxaq97wc6J wsSyqglgBvONSw+gXcvmVpIoV9nJkO0H8SdiFAUxkWVC3KXgaMmuVE8WsgBHRsb8 g8EFwTgR7xqgyS8xv/U6gQKBgQCdUr/dSJgAx6EPq5degAHXu0ZGWAUR38MJ+F2Y 6/5FyhCEWoRlHP66+CmywTBjbnrSk5IG1PBL8ebOmu6QiJ2o5R1rbKvHLe/0dabV bbfwaQ1+ZDvskZP9Fr3WHqnFh3shO2dDwcvOKTnuetj9UWEXXyUQltXAohubvWbB OPqhowKBgB3t2oUSFJI8fSNQnQNkcespJTddr0oLEwgsIl4Q7rdFHLr+/c46svjJ kPMtpfxDQvkgK2aWpS4OP0E2vSU/IfMEDmlypfKe2SaTtFehZSUwR4R1/ZhSL3iS iMwJYgm98P27s4TEMdhlPNVJrj1FrD+4VrgpOsoM20EkZnTvel9s -----END RSA PRIVATE KEY-----""" ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE = 'test_passphrase' ENCRYPTED_PKCS8_CRT_KEY = b"""-----BEGIN ENCRYPTED PRIVATE KEY----- MIIE6TAbBgkqhkiG9w0BBQMwDgQIT04zko6pmJICAggABIIEyL/79sqzTQ7BsEjY ao2Uhh3//mpNJfCDhjSZOmWL7s4+161cEqpxrfxo4bHH8fkZ60VZUQP8CjwwQUhP 4iwpv2bYbQwzlttZwTC6s28wh7FRtgVoVPTwvXJa6fl2zAjLtsjwLZ/556ez9xIJ 67hxkIK2EzGQaeEKI1+vVF5EKsgKiPEmgspOBxRPoVWTx49NooiakGnwaBoDyTob 8FMr8mF1EheNQ4kl1bPrl+csD7PPnfbWUdNVvMljEhS3cYamQDPEWyAzvaIr0rHh /6h80L/G2+0fensrTspWJcjX+XDBwQPk+YMic0TJ3KvkC7p2iNJhjNrjhQ+APZWq xYrjfcmdK0RaaoqN+1zeE1P2kWIJx9CQZVMeGhVzzcmPwJPDnJFpkU+8cgTWnUr/ Fh8YtDoDzLiAUcmV1Kk7LYtYPHuU8epuz5PYm49TbWzdS7PX5wqFAFmrVt5jysm4 D/Ox0r4KV1t7D/1gc1WRIu8oUXkIglCHWNpTyMK0kFPctAf/ua+DUFRE4eSx3rsX ZKIymdF9v/WF1Ud0tsNeudQbVeXWS6UCR8m/rqe81W4npQm/uqUNla+6yaYUmHlk tvw/m6pt+jKhn0XIRkMwHrTpIaMVvInMg0xpkRuc7Xj5A7vNnkypZRNZJHgy7WWC 6GpOCWJOltYaNy7tmAkSUHJ6kNjXK5a4fi30HknEaqKjFTQNGvcybulJ3MXUzds0 MJoTpvQfLzYQbMYZ/XRGND4lgeEbs29nWLPae8D5XlDeZQMin8EukPko8u8+YGbU eWGOvDc+4/xrWrsq1i6R0uWq+Cyoql8oh0PNBlM04S7GAbu1pOD/tPcq/GNYcv/Q vJcIz9KA3BNepq7tC8D88ggEvFjTsHKeW/OnuCxKducSna4Mq+GebU52tKjkLjFC eLG4Vx0BY5xPH3gd7iyuAf7S+08BbinNZWjHLpdmR3vKK5YbLPiGSfcYQdClr6BK 9vNWH4TXmZMV+rWtfSeM/cbhCHwxT5Jx6N0OFAxOblQClWnUD79nGkEgn/GoY/Aj FPNj8u2U/mJHgFHH3ClidYL9jJUvhGpTixB8nGgMjJ0wvFcp+5OysG3TsjqYkwR6 RRNBmM+iLEUFTrMZYb+edHvGJsMEMZ0qvjmZDsfDz6ax5M9zH/ORFcGplgIec8kj I106+dqAVVrv1CrBf2N/pxV0OXVhgl6ECe/Ee1xYC2e2CiEgUnQtedu8ekgPgp73 tHcAiWMamLPTwXuL7jFtvWaQfkYBmrBdEx54+eZOfH/NgV3o8gbaWNHSxbfbwlXN MvyJidZGkXU0DJtUUnO5i2S7ftKCdOzrrSA8HDTvxFUhxretYpF3NzPYpYkM7WJX GM7bTMn37AWYqLZmdYYdjh1ZOH/wsM/3uxGBpyEyy4Urrr1ux7X1P0cL0O2P/72h GRd499JLrRMrmmtQ4KrN7GCHdctvujhDP8zvmnaEyGVzg88XmDg50ZF3+8DmOOgX EMZEYHO2Wi2uyFotFtZCuqoOJmGPPeGV8QrsRs82hnL1bcd6REUTWk0KsTt13lvF WwMJugHFk5NQuse3P4Hh9smQrRrv1dvnpt7s4yKStKolXUaFWcXJvXVaDfR5266Y p7cuYY1cAyI7gFfl5A== -----END ENCRYPTED PRIVATE KEY-----""" UNENCRYPTED_PKCS8_CRT_KEY = b"""-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCy9Z5mwz1A6bPO ItbDN64R63Pg5btDf8CkFimRXollLRf6aLdfVd6NMlw6bIO1uUZCzExZGRHJ0Kch naQBo0485jVzGcPfRM8bJq0d5eoVBCEon1W7xLn7WGU+oz8TOTC+lgIxTWgRGT1r WYvAoF+cXY3awzZ3cMh6G+PZ+zXP4dZFaqgajCC2m06gushHMUvwfg7oFbrmGxHW GGBJUFDHzPplHVC60qiFZdp+QL70hY1s1s34D8qeECp/n+x1UU+tm4f140PPJ9bT GDQBKosUWUz4KmNSxZfVSVYEafX3S+rbTLqzkdwtEkyeagnSgS+y66W2OLvHRK2o EQumAMiPAgMBAAECggEALcNfoVm19T6OC12fW8aGTfDO7ijxLT6jDZLZSGJLckMI ybEziz6TLbo+GXaca8EYNYa202X0xY4E3zTHtt+C51HbQzbIfvTCnWfThzy/Ema+ 7/tm2iBhiuO86Dqoq4rJa0NDBkaMISs6KJR6h2S7bmoCF3oqGHsP2SH+r0RtK15g VeyomI2tji6XX5xE4xmkfHrfFjoDM+aS9JSBquSBjivD0TvW3uK3aVpaREpWx36Q SNrKuK10QSWI0xUUW2sYSCmViSYvEZ/QcuJE4VUwItFb8/XYmleWUU+sFvyuU8vj DVz+z7JK2iFPQr6jgUteGx/+KfGZ9z1edld0mbST8QKBgQDjsWlXVxoyT5M09+7f XxUIZGaPCld2aQ1pSJfZS+zfEgpDoomamjeyPRiNASZBXPyzqB5K29TUt0/GCxZl 9sO3fUzpI3DKWHzYAFAXVtnKtwxHEzxseF5uEMhMzV0CfppE6RLVwGl9H00k4UPz nSWYm8EZH05FUEbK5xMofm5fGQKBgQDJNTTNApbZpTcnuZMIL1yn55IPd+5smlrc wvld9Dxg0gPg+zFaCU0o9w5MLjJpJ/FWbKRLRYhBRrMKy2zNCy/DL1jIe2mQyr7S XII++tieZFXTPHnNPXnlXbNwSH+iPrh6h55bmS8lKz/Hq9P6UQ3RMMQnaAoRom4G 3okbo/Th5wKBgFI3DUJsQRlJgm3t2GLvgji1iBK+OTaYWC9b9viPYchqSyqA1g7t AI9rYCIJ+DZBzT1Rxx5gokjsp/Fqr3vBzonCxLKqCWAG841LD6Bdy+ZWkihX2cmQ 7QfxJ2IUBTGRZULcpeBoya5UTxayAEdGxvyDwQXBOBHvGqDJLzG/9TqBAoGBAJ1S v91ImADHoQ+rl16AAde7RkZYBRHfwwn4XZjr/kXKEIRahGUc/rr4KbLBMGNuetKT kgbU8Evx5s6a7pCInajlHWtsq8ct7/R1ptVtt/BpDX5kO+yRk/0WvdYeqcWHeyE7 Z0PBy84pOe562P1RYRdfJRCW1cCiG5u9ZsE4+qGjAoGAHe3ahRIUkjx9I1CdA2Rx 6yklN12vSgsTCCwiXhDut0Ucuv79zjqy+MmQ8y2l/ENC+SArZpalLg4/QTa9JT8h 8wQOaXKl8p7ZJpO0V6FlJTBHhHX9mFIveJKIzAliCb3w/buzhMQx2GU81UmuPUWs P7hWuCk6ygzbQSRmdO96X2w= -----END PRIVATE KEY-----""" EXPECTED_IMD_TEST_SUBJS = ["IMD3", "IMD2", "IMD1"] TEST_X509_IMDS = b"""Junk -----BEGIN CERTIFICATE----- MIIBhDCCAS6gAwIBAgIGAUo7hO/eMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT BElNRDIwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD EwRJTUQzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKHIPXo2pfD5dpnpVDVz4n43 zn3VYsjz/mgOZU0WIWjPA97mvulb7mwb4/LB4ijOMzHj9XfwP75GiOFxYFs8O80C AwEAAaNwMG4wDwYDVR0TAQH/BAUwAwEB/zA8BgNVHSMENTAzgBS6rfnABCO3oHEz NUUtov2hfXzfVaETpBEwDzENMAsGA1UEAxMESU1EMYIGAUo7hO/DMB0GA1UdDgQW BBRiLW10LVJiFO/JOLsQFev0ToAcpzANBgkqhkiG9w0BAQsFAANBABtdF+89WuDi TC0FqCocb7PWdTucaItD9Zn55G8KMd93eXrOE/FQDf1ScC+7j0jIHXjhnyu6k3NV 8el/x5gUHlc= -----END CERTIFICATE----- Junk should be ignored by x509 splitter -----BEGIN CERTIFICATE----- MIIBhDCCAS6gAwIBAgIGAUo7hO/DMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT BElNRDEwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD EwRJTUQyMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJYHqnsisVKTlwVaCSa2wdrv CeJJzqpEVV0RVgAAF6FXjX2Tioii+HkXMR9zFgpE1w4yD7iu9JDb8yTdNh+NxysC AwEAAaNwMG4wDwYDVR0TAQH/BAUwAwEB/zA8BgNVHSMENTAzgBQt3KvN8ncGj4/s if1+wdvIMCoiE6ETpBEwDzENMAsGA1UEAxMEcm9vdIIGAUo7hO+mMB0GA1UdDgQW BBS6rfnABCO3oHEzNUUtov2hfXzfVTANBgkqhkiG9w0BAQsFAANBAIlJODvtmpok eoRPOb81MFwPTTGaIqafebVWfBlR0lmW8IwLhsOUdsQqSzoeypS3SJUBpYT1Uu2v zEDOmgdMsBY= -----END CERTIFICATE----- Junk should be thrown out like junk -----BEGIN CERTIFICATE----- MIIBfzCCASmgAwIBAgIGAUo7hO+mMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT BHJvb3QwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD EwRJTUQxMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAI+tSJxr60ogwXFmgqbLMW7K 3fkQnh9sZBi7Qo6AzUnfe/AhXoisib651fOxKXCbp57IgzLTv7O9ygq3I+5fQqsC AwEAAaNrMGkwDwYDVR0TAQH/BAUwAwEB/zA3BgNVHSMEMDAugBR73ZKSpjbsz9tZ URkvFwpIO7gB4KETpBEwDzENMAsGA1UEAxMEcm9vdIIBATAdBgNVHQ4EFgQULdyr zfJ3Bo+P7In9fsHbyDAqIhMwDQYJKoZIhvcNAQELBQADQQBenkZ2k7RgZqgj+dxA D7BF8MN1oUAOpyYqAjkGddSEuMyNmwtHKZI1dyQ0gBIQdiU9yAG2oTbUIK4msbBV uJIQ -----END CERTIFICATE-----""" PKCS12_BUNDLE = pkg_resources.resource_string( 'octavia.tests.unit.common.sample_configs', 'sample_pkcs12.p12') X509_CA_CERT_CN = 'ca.example.org' X509_CA_CERT_SHA1 = '3d52837151662dbe7c01a97fad0aab5f61f78280' X509_CA_CERT = b"""-----BEGIN CERTIFICATE----- MIIFoDCCA4igAwIBAgIJAPBfmRtfTNF2MA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZPcmVnb24xEjAQBgNVBAoMCU9wZW5TdGFjazEQMA4G A1UECwwHT2N0YXZpYTEXMBUGA1UEAwwOY2EuZXhhbXBsZS5vcmcwHhcNMTkwMjE0 MDQ1MjQwWhcNMjkwMjExMDQ1MjQwWjBdMQswCQYDVQQGEwJVUzEPMA0GA1UECAwG T3JlZ29uMRIwEAYDVQQKDAlPcGVuU3RhY2sxEDAOBgNVBAsMB09jdGF2aWExFzAV BgNVBAMMDmNhLmV4YW1wbGUub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC CgKCAgEAshn5CRt949+edmECCpaQtrCnjiA8KFsNCb9Dv70LkK9XbHtFkJuUgJR1 VE1OhGK057k/z1gEYUIFxw8s9wKMaAxta7CwxkpJR8oMa60nx4hbNLF1Q5xO0P40 YW/fSxuBmztI8EtYGUCGDLpktUTrewWu68nnWV2Wyx5B69Z14qrDGk7b6VH2atWD qJwDGrPkekNSUiE2Z/cCcTDH2t1jqtlGsiS8tDDH4h35ywm6fY3V/11hHT76dxDz LhrLa2aVXeVtqGMTOHkXOFEwcQNfh78z7qBOZy9O8bCCepCmJ56ff9E3kXd1jam2 6TiZikOVWhDOv668IosYzCU2gllKYG++7PITb+12VaVqJwWf8G9rFQ0xptZuXmHE BTFCzxWxK8vSs85aBYWFd8eLmWrEZyEk1JfD7jU4OZm9BK3qoRvfwDwzPnmZIpCt YPhYVi5F1W/w3Iw1mTqxkEMuy6mlMn14nKmA2seSAkPSJ+b5C92dqhwN1cvgUVhL bIl3Yurj3ayvT+vRCYadQZJif+e/dxUrcRZ7oPpV23QxVgEZ+Yd+++3XA09LSdhQ lLl/3/I+MNvCxHEKx4imCGLAmMOFL7u9Af/delFRVKDXferYb/HIxkiJGJco96J5 RvYsXGr2wTCQcCRZjv1+LlAlKUAgJMeVkszKQ56pCI7tvyB2gG8CAwEAAaNjMGEw HQYDVR0OBBYEFN/4bLQKWNMwoLzQ2du9NT33x7+DMB8GA1UdIwQYMBaAFN/4bLQK WNMwoLzQ2du9NT33x7+DMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGG MA0GCSqGSIb3DQEBCwUAA4ICAQB2nU0y43nVDKgL1PPIdVDnYa2vjH+DBkSAVaTv 73OKdimh4Kzy0YYlrKzeNiE2k4Q/nUjTbAN13DvQbjRFQZx17L2Gckv+cMFyB7yb vlsBeySarJKhYeKhlLrd20Qn7GiyHGkXUshnSVQm9/HFlegoMMjQyExGsA1PYU6W mycNYv5yWTLgbaFNfIYjL6AcIVtxMMZoD4XgpVpETwNIoble+B3sYQ05dTYxMyT0 aHjafUPedasqXFoo5TJCJ7Wcq92dBwUXpgkHsf3PPKy8VVukWUaCP9ECAxHLmEPj 0tyElkvy55lauzVing7F/uRF6DIlRz6fH0y92qFJ5/t46L9C3V23+zIF80CJeZ21 /goal0NlAyjhI4zfpwwAUeqnAElncNhFcmTWHLyTGQyA4rYHDl5fZIhk6MFYdLwi ml96m+T1z8iPqmrTtd6P3SVmEkRvSt8L7ItL82VcDELUCXJoSKEm5im84yEiPdUs emQtJbioTM4+Vze32U6MSznelKiK3dkNPnNiKA6xsjxNC+Hp2LzcANg3/SUUC9ea pDEMmP7TJMJ3dG63RtAzQiGfRO18BIVOrRUfQpR32FkrYd9wCE02cnv0QZzY9NYt 6hAlAa6Motve8UFewoO4pNknj3MBEN+64wDzHaP6VPysNJwrAlgaHfGDU6xJffAd uCWDmw== -----END CERTIFICATE-----""" X509_CA_CRL = b"""-----BEGIN X509 CRL----- MIIC7zCB2AIBATANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJVUzEPMA0GA1UE CAwGT3JlZ29uMRIwEAYDVQQKDAlPcGVuU3RhY2sxEDAOBgNVBAsMB09jdGF2aWEx FzAVBgNVBAMMDmNhLmV4YW1wbGUub3JnFw0xOTAyMTkwMjAxNTlaFw0xOTAzMjEw MjAxNTlaMBUwEwICEAAXDTE5MDIxOTAyMDAyMlqgMDAuMB8GA1UdIwQYMBaAFN/4 bLQKWNMwoLzQ2du9NT33x7+DMAsGA1UdFAQEAgIQADANBgkqhkiG9w0BAQsFAAOC AgEAcPtYSLEkJwvqaAfMGXwI2uTTKWURqtwfcBMYdVF1u2xsBsrKR6ogpBjzc1sX A5WN9Tz5TXPVd38DTEGlCGLQ7wZ8wwYAR2sArHjw/zcsOJcFVTWtpX+2UAbpqis9 rBq7K6TF2m1fYb0RJg0AUbja/wfpghoEjfFx8FjIa8WAqqazyWR9vslm7kSoEgr+ MDV7agVK+h1n68hdLA9osUyPaAobus5FcVlXePPp5Ab8/vx1b2/Y+VXHaJXTZCin FLQaxaH0PsMCKN/T52GPMRKa2Cc6IEaDFgE1ZlA8nP5t2tA7MFORI8dix6jIzBJD W2CRf1Oxkrd3iqs1IljtlKHKMUTS67lfA9EwKlt8dR+KwH/WT23LSIoC9NnS3DP+ aT3t52soCpjXbfl8fgs62bome1/88BoNIa2T1Mj6F0aPvepLsFB/UrXWhADFj+DX 7WclP62BNBCTlUNvMF0eC9o7r5xeazo53KH1KI62qlFrz5MbRCG8g0JtTFqsMJld phYuPfZekoNbsOIPDTiPFniuP2saOF4TSRCW4KnpgblRkds6c8X+1ExdlSo5GjNa PftOKlYtE7T7Kw4CI9+O2H38IUOYjDt/c2twy954K4pKe4x9Ud8mImpS/oEzOsoz /Mn++bjO55LdaAUKQ3wa8LZ5WFB+Gs6b2kmBfzGarWEiX64= -----END X509 CRL-----""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/common/sample_data_models.py0000664000175000017500000007512500000000000023453 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.common import constants as lib_consts from oslo_utils import uuidutils from octavia.common import constants from octavia.common import data_models class SampleDriverDataModels(object): def __init__(self): self.project_id = uuidutils.generate_uuid() self.lb_id = uuidutils.generate_uuid() self.ip_address = '192.0.2.30' self.port_id = uuidutils.generate_uuid() self.network_id = uuidutils.generate_uuid() self.subnet_id = uuidutils.generate_uuid() self.qos_policy_id = uuidutils.generate_uuid() self.lb_name = uuidutils.generate_uuid() self.lb_description = uuidutils.generate_uuid() self.flavor_id = uuidutils.generate_uuid() self.flavor_profile_id = uuidutils.generate_uuid() self.listener1_id = uuidutils.generate_uuid() self.listener2_id = uuidutils.generate_uuid() self.default_tls_container_ref = uuidutils.generate_uuid() self.sni_container_ref_1 = uuidutils.generate_uuid() self.sni_container_ref_2 = uuidutils.generate_uuid() self.client_ca_tls_certificate_ref = uuidutils.generate_uuid() self.client_crl_container_ref = uuidutils.generate_uuid() self.pool_sni_container_ref = uuidutils.generate_uuid() self.pool_ca_container_ref = uuidutils.generate_uuid() self.pool_crl_container_ref = uuidutils.generate_uuid() self.pool1_id = uuidutils.generate_uuid() self.pool2_id = uuidutils.generate_uuid() self.hm1_id = uuidutils.generate_uuid() self.hm2_id = uuidutils.generate_uuid() self.member1_id = uuidutils.generate_uuid() self.member2_id = uuidutils.generate_uuid() self.member3_id = uuidutils.generate_uuid() self.member4_id = uuidutils.generate_uuid() self.l7policy1_id = uuidutils.generate_uuid() self.l7policy2_id = uuidutils.generate_uuid() self.l7rule1_id = uuidutils.generate_uuid() self.l7rule2_id = uuidutils.generate_uuid() self.created_at = datetime.datetime.now() self.updated_at = (datetime.datetime.now() + datetime.timedelta(minutes=1)) self._common_test_dict = { lib_consts.PROVISIONING_STATUS: constants.ACTIVE, lib_consts.OPERATING_STATUS: constants.ONLINE, lib_consts.PROJECT_ID: self.project_id, constants.CREATED_AT: self.created_at, constants.UPDATED_AT: self.updated_at, constants.ENABLED: True} # Setup Health Monitors self.test_hm1_dict = { lib_consts.ID: self.hm1_id, lib_consts.TYPE: constants.HEALTH_MONITOR_PING, lib_consts.DELAY: 1, lib_consts.TIMEOUT: 3, lib_consts.FALL_THRESHOLD: 1, lib_consts.RISE_THRESHOLD: 2, lib_consts.HTTP_METHOD: lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET, lib_consts.URL_PATH: '/', lib_consts.EXPECTED_CODES: '200', lib_consts.NAME: 'hm1', lib_consts.POOL_ID: self.pool1_id, lib_consts.HTTP_VERSION: 1.0, lib_consts.DOMAIN_NAME: None, lib_consts.PROJECT_ID: self.project_id} self.test_hm1_dict.update(self._common_test_dict) self.test_hm2_dict = copy.deepcopy(self.test_hm1_dict) self.test_hm2_dict[lib_consts.ID] = self.hm2_id self.test_hm2_dict[lib_consts.NAME] = 'hm2' self.test_hm2_dict.update( {lib_consts.HTTP_VERSION: 1.1, lib_consts.DOMAIN_NAME: 'testdomainname.com'}) self.db_hm1 = data_models.HealthMonitor(**self.test_hm1_dict) self.db_hm2 = data_models.HealthMonitor(**self.test_hm2_dict) self.provider_hm1_dict = { lib_consts.ADMIN_STATE_UP: True, lib_consts.DELAY: 1, lib_consts.EXPECTED_CODES: '200', lib_consts.HEALTHMONITOR_ID: self.hm1_id, lib_consts.HTTP_METHOD: lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET, lib_consts.MAX_RETRIES: 2, lib_consts.MAX_RETRIES_DOWN: 1, lib_consts.NAME: 'hm1', lib_consts.POOL_ID: self.pool1_id, lib_consts.PROJECT_ID: self.project_id, lib_consts.TIMEOUT: 3, lib_consts.TYPE: constants.HEALTH_MONITOR_PING, lib_consts.URL_PATH: '/', lib_consts.HTTP_VERSION: 1.0, lib_consts.DOMAIN_NAME: None} self.provider_hm2_dict = copy.deepcopy(self.provider_hm1_dict) self.provider_hm2_dict[lib_consts.HEALTHMONITOR_ID] = self.hm2_id self.provider_hm2_dict[lib_consts.NAME] = 'hm2' self.provider_hm2_dict.update( {lib_consts.HTTP_VERSION: 1.1, lib_consts.DOMAIN_NAME: 'testdomainname.com'}) self.provider_hm1 = driver_dm.HealthMonitor(**self.provider_hm1_dict) self.provider_hm2 = driver_dm.HealthMonitor(**self.provider_hm2_dict) # Setup Members self.test_member1_dict = { lib_consts.ID: self.member1_id, lib_consts.POOL_ID: self.pool1_id, constants.IP_ADDRESS: '192.0.2.16', lib_consts.PROTOCOL_PORT: 80, lib_consts.WEIGHT: 0, lib_consts.BACKUP: False, lib_consts.SUBNET_ID: self.subnet_id, lib_consts.PROJECT_ID: self.project_id, lib_consts.NAME: 'member1', lib_consts.OPERATING_STATUS: lib_consts.ONLINE, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, constants.ENABLED: True, constants.CREATED_AT: self.created_at, constants.UPDATED_AT: self.updated_at, lib_consts.MONITOR_ADDRESS: '192.0.2.26', lib_consts.MONITOR_PORT: 81} self.test_member1_dict.update(self._common_test_dict) self.test_member2_dict = copy.deepcopy(self.test_member1_dict) self.test_member2_dict[lib_consts.ID] = self.member2_id self.test_member2_dict[constants.IP_ADDRESS] = '192.0.2.17' self.test_member2_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.27' self.test_member2_dict[lib_consts.NAME] = 'member2' self.test_member3_dict = copy.deepcopy(self.test_member1_dict) self.test_member3_dict[lib_consts.ID] = self.member3_id self.test_member3_dict[constants.IP_ADDRESS] = '192.0.2.18' self.test_member3_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.28' self.test_member3_dict[lib_consts.NAME] = 'member3' self.test_member3_dict[lib_consts.POOL_ID] = self.pool2_id self.test_member4_dict = copy.deepcopy(self.test_member1_dict) self.test_member4_dict[lib_consts.ID] = self.member4_id self.test_member4_dict[constants.IP_ADDRESS] = '192.0.2.19' self.test_member4_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.29' self.test_member4_dict[lib_consts.NAME] = 'member4' self.test_member4_dict[lib_consts.POOL_ID] = self.pool2_id self.test_pool1_members_dict = [self.test_member1_dict, self.test_member2_dict] self.test_pool2_members_dict = [self.test_member3_dict, self.test_member4_dict] self.db_member1 = data_models.Member(**self.test_member1_dict) self.db_member2 = data_models.Member(**self.test_member2_dict) self.db_member3 = data_models.Member(**self.test_member3_dict) self.db_member4 = data_models.Member(**self.test_member4_dict) self.db_pool1_members = [self.db_member1, self.db_member2] self.db_pool2_members = [self.db_member3, self.db_member4] self.provider_member1_dict = {lib_consts.ADDRESS: '192.0.2.16', lib_consts.ADMIN_STATE_UP: True, lib_consts.MEMBER_ID: self.member1_id, lib_consts.MONITOR_ADDRESS: '192.0.2.26', lib_consts.MONITOR_PORT: 81, lib_consts.NAME: 'member1', lib_consts.POOL_ID: self.pool1_id, lib_consts.PROJECT_ID: self.project_id, lib_consts.PROTOCOL_PORT: 80, lib_consts.SUBNET_ID: self.subnet_id, lib_consts.WEIGHT: 0, lib_consts.BACKUP: False} self.provider_member2_dict = copy.deepcopy(self.provider_member1_dict) self.provider_member2_dict[lib_consts.MEMBER_ID] = self.member2_id self.provider_member2_dict[lib_consts.ADDRESS] = '192.0.2.17' self.provider_member2_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.27' self.provider_member2_dict[lib_consts.NAME] = 'member2' self.provider_member3_dict = copy.deepcopy(self.provider_member1_dict) self.provider_member3_dict[lib_consts.MEMBER_ID] = self.member3_id self.provider_member3_dict[lib_consts.ADDRESS] = '192.0.2.18' self.provider_member3_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.28' self.provider_member3_dict[lib_consts.NAME] = 'member3' self.provider_member3_dict[lib_consts.POOL_ID] = self.pool2_id self.provider_member4_dict = copy.deepcopy(self.provider_member1_dict) self.provider_member4_dict[lib_consts.MEMBER_ID] = self.member4_id self.provider_member4_dict[lib_consts.ADDRESS] = '192.0.2.19' self.provider_member4_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.29' self.provider_member4_dict[lib_consts.NAME] = 'member4' self.provider_member4_dict[lib_consts.POOL_ID] = self.pool2_id self.provider_pool1_members_dict = [self.provider_member1_dict, self.provider_member2_dict] self.provider_pool2_members_dict = [self.provider_member3_dict, self.provider_member4_dict] self.provider_member1 = driver_dm.Member(**self.provider_member1_dict) self.provider_member2 = driver_dm.Member(**self.provider_member2_dict) self.provider_member3 = driver_dm.Member(**self.provider_member3_dict) self.provider_member4 = driver_dm.Member(**self.provider_member4_dict) self.provider_pool1_members = [self.provider_member1, self.provider_member2] self.provider_pool2_members = [self.provider_member3, self.provider_member4] # Setup test pools self.test_pool1_dict = { lib_consts.ID: self.pool1_id, lib_consts.NAME: 'pool1', lib_consts.DESCRIPTION: 'Pool 1', constants.LOAD_BALANCER_ID: self.lb_id, lib_consts.PROJECT_ID: self.project_id, lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, lib_consts.LB_ALGORITHM: lib_consts.LB_ALGORITHM_ROUND_ROBIN, lib_consts.MEMBERS: self.test_pool1_members_dict, constants.HEALTH_MONITOR: self.test_hm1_dict, lib_consts.SESSION_PERSISTENCE: { lib_consts.TYPE: lib_consts.LB_ALGORITHM_SOURCE_IP}, lib_consts.LISTENERS: [], lib_consts.L7POLICIES: [], constants.TLS_CERTIFICATE_ID: self.pool_sni_container_ref, constants.CA_TLS_CERTIFICATE_ID: self.pool_ca_container_ref, constants.CRL_CONTAINER_ID: self.pool_crl_container_ref, lib_consts.TLS_ENABLED: True, lib_consts.TLS_CIPHERS: None} self.test_pool1_dict.update(self._common_test_dict) self.test_pool2_dict = copy.deepcopy(self.test_pool1_dict) self.test_pool2_dict[lib_consts.ID] = self.pool2_id self.test_pool2_dict[lib_consts.NAME] = 'pool2' self.test_pool2_dict[lib_consts.DESCRIPTION] = 'Pool 2' self.test_pool2_dict[ lib_consts.MEMBERS] = self.test_pool2_members_dict del self.test_pool2_dict[constants.TLS_CERTIFICATE_ID] del self.test_pool2_dict[constants.CA_TLS_CERTIFICATE_ID] del self.test_pool2_dict[constants.CRL_CONTAINER_ID] self.test_pools = [self.test_pool1_dict, self.test_pool2_dict] self.db_pool1 = data_models.Pool(**self.test_pool1_dict) self.db_pool1.health_monitor = self.db_hm1 self.db_pool1.members = self.db_pool1_members self.db_pool2 = data_models.Pool(**self.test_pool2_dict) self.db_pool2.health_monitor = self.db_hm2 self.db_pool2.members = self.db_pool2_members self.test_db_pools = [self.db_pool1, self.db_pool2] pool_cert = data_models.TLSContainer(certificate='pool cert') pool_ca_file_content = 'X509 POOL CA CERT FILE' pool_crl_file_content = 'X509 POOL CRL FILE' self.provider_pool1_dict = { lib_consts.ADMIN_STATE_UP: True, lib_consts.DESCRIPTION: 'Pool 1', lib_consts.HEALTHMONITOR: self.provider_hm1_dict, lib_consts.LB_ALGORITHM: lib_consts.LB_ALGORITHM_ROUND_ROBIN, lib_consts.LOADBALANCER_ID: self.lb_id, lib_consts.MEMBERS: self.provider_pool1_members_dict, lib_consts.NAME: 'pool1', lib_consts.POOL_ID: self.pool1_id, lib_consts.PROJECT_ID: self.project_id, lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, lib_consts.SESSION_PERSISTENCE: { lib_consts.TYPE: lib_consts.LB_ALGORITHM_SOURCE_IP}, lib_consts.TLS_CONTAINER_REF: self.pool_sni_container_ref, lib_consts.TLS_CONTAINER_DATA: pool_cert.to_dict(), lib_consts.CA_TLS_CONTAINER_REF: self.pool_ca_container_ref, lib_consts.CA_TLS_CONTAINER_DATA: pool_ca_file_content, lib_consts.CRL_CONTAINER_REF: self.pool_crl_container_ref, lib_consts.CRL_CONTAINER_DATA: pool_crl_file_content, lib_consts.TLS_ENABLED: True, lib_consts.TLS_CIPHERS: None, lib_consts.TLS_VERSIONS: None, } self.provider_pool2_dict = copy.deepcopy(self.provider_pool1_dict) self.provider_pool2_dict[lib_consts.POOL_ID] = self.pool2_id self.provider_pool2_dict[lib_consts.NAME] = 'pool2' self.provider_pool2_dict[lib_consts.DESCRIPTION] = 'Pool 2' self.provider_pool2_dict[ lib_consts.MEMBERS] = self.provider_pool2_members_dict self.provider_pool2_dict[ lib_consts.HEALTHMONITOR] = self.provider_hm2_dict self.provider_pool2_dict[lib_consts.TLS_CONTAINER_REF] = None del self.provider_pool2_dict[lib_consts.TLS_CONTAINER_DATA] self.provider_pool2_dict[lib_consts.CA_TLS_CONTAINER_REF] = None del self.provider_pool2_dict[lib_consts.CA_TLS_CONTAINER_DATA] self.provider_pool2_dict[lib_consts.CRL_CONTAINER_REF] = None del self.provider_pool2_dict[lib_consts.CRL_CONTAINER_DATA] self.provider_pool1 = driver_dm.Pool(**self.provider_pool1_dict) self.provider_pool1.members = self.provider_pool1_members self.provider_pool1.healthmonitor = self.provider_hm1 self.provider_pool2 = driver_dm.Pool(**self.provider_pool2_dict) self.provider_pool2.members = self.provider_pool2_members self.provider_pool2.healthmonitor = self.provider_hm2 self.provider_pools = [self.provider_pool1, self.provider_pool2] # Setup L7Rules self.test_l7rule1_dict = { lib_consts.ID: self.l7rule1_id, lib_consts.L7POLICY_ID: self.l7policy1_id, lib_consts.TYPE: lib_consts.L7RULE_TYPE_PATH, lib_consts.COMPARE_TYPE: lib_consts.L7RULE_COMPARE_TYPE_EQUAL_TO, lib_consts.KEY: 'fake_key', lib_consts.VALUE: 'fake_value', lib_consts.PROJECT_ID: self.project_id, constants.L7POLICY: None, lib_consts.INVERT: False} self.test_l7rule1_dict.update(self._common_test_dict) self.test_l7rule2_dict = copy.deepcopy(self.test_l7rule1_dict) self.test_l7rule2_dict[lib_consts.ID] = self.l7rule2_id self.test_l7rules = [self.test_l7rule1_dict, self.test_l7rule2_dict] self.db_l7Rule1 = data_models.L7Rule(**self.test_l7rule1_dict) self.db_l7Rule2 = data_models.L7Rule(**self.test_l7rule2_dict) self.db_l7Rules = [self.db_l7Rule1, self.db_l7Rule2] self.provider_l7rule1_dict = { lib_consts.ADMIN_STATE_UP: True, lib_consts.COMPARE_TYPE: lib_consts.L7RULE_COMPARE_TYPE_EQUAL_TO, lib_consts.INVERT: False, lib_consts.KEY: 'fake_key', lib_consts.L7POLICY_ID: self.l7policy1_id, lib_consts.L7RULE_ID: self.l7rule1_id, lib_consts.TYPE: lib_consts.L7RULE_TYPE_PATH, lib_consts.PROJECT_ID: self.project_id, lib_consts.VALUE: 'fake_value'} self.provider_l7rule2_dict = copy.deepcopy(self.provider_l7rule1_dict) self.provider_l7rule2_dict[lib_consts.L7RULE_ID] = self.l7rule2_id self.provider_l7rules_dicts = [self.provider_l7rule1_dict, self.provider_l7rule2_dict] self.provider_l7rule1 = driver_dm.L7Rule(**self.provider_l7rule1_dict) self.provider_l7rule2 = driver_dm.L7Rule(**self.provider_l7rule2_dict) self.provider_rules = [self.provider_l7rule1, self.provider_l7rule2] # Setup L7Policies self.test_l7policy1_dict = { lib_consts.ID: self.l7policy1_id, lib_consts.NAME: 'l7policy_1', lib_consts.DESCRIPTION: 'L7policy 1', lib_consts.LISTENER_ID: self.listener1_id, lib_consts.ACTION: lib_consts.L7POLICY_ACTION_REDIRECT_TO_URL, lib_consts.REDIRECT_POOL_ID: None, lib_consts.REDIRECT_URL: 'http://example.com/index.html', lib_consts.REDIRECT_PREFIX: None, lib_consts.PROJECT_ID: self.project_id, lib_consts.POSITION: 1, constants.LISTENER: None, constants.REDIRECT_POOL: None, lib_consts.L7RULES: self.test_l7rules, lib_consts.REDIRECT_HTTP_CODE: 302} self.test_l7policy1_dict.update(self._common_test_dict) self.test_l7policy2_dict = copy.deepcopy(self.test_l7policy1_dict) self.test_l7policy2_dict[lib_consts.ID] = self.l7policy2_id self.test_l7policy2_dict[lib_consts.NAME] = 'l7policy_2' self.test_l7policy2_dict[lib_consts.DESCRIPTION] = 'L7policy 2' self.test_l7policies = [self.test_l7policy1_dict, self.test_l7policy2_dict] self.db_l7policy1 = data_models.L7Policy(**self.test_l7policy1_dict) self.db_l7policy2 = data_models.L7Policy(**self.test_l7policy2_dict) self.db_l7policy1.l7rules = self.db_l7Rules self.db_l7policy2.l7rules = self.db_l7Rules self.db_l7policies = [self.db_l7policy1, self.db_l7policy2] self.provider_l7policy1_dict = { lib_consts.ACTION: lib_consts.L7POLICY_ACTION_REDIRECT_TO_URL, lib_consts.ADMIN_STATE_UP: True, lib_consts.DESCRIPTION: 'L7policy 1', lib_consts.L7POLICY_ID: self.l7policy1_id, lib_consts.LISTENER_ID: self.listener1_id, lib_consts.NAME: 'l7policy_1', lib_consts.POSITION: 1, lib_consts.PROJECT_ID: self.project_id, lib_consts.REDIRECT_POOL_ID: None, lib_consts.REDIRECT_URL: 'http://example.com/index.html', lib_consts.REDIRECT_PREFIX: None, lib_consts.RULES: self.provider_l7rules_dicts, lib_consts.REDIRECT_HTTP_CODE: 302 } self.provider_l7policy2_dict = copy.deepcopy( self.provider_l7policy1_dict) self.provider_l7policy2_dict[ lib_consts.L7POLICY_ID] = self.l7policy2_id self.provider_l7policy2_dict[lib_consts.NAME] = 'l7policy_2' self.provider_l7policy2_dict[lib_consts.DESCRIPTION] = 'L7policy 2' self.provider_l7policies_dict = [self.provider_l7policy1_dict, self.provider_l7policy2_dict] self.provider_l7policy1 = driver_dm.L7Policy( **self.provider_l7policy1_dict) self.provider_l7policy1.rules = self.provider_rules self.provider_l7policy2 = driver_dm.L7Policy( **self.provider_l7policy2_dict) self.provider_l7policy2.rules = self.provider_rules self.provider_l7policies = [self.provider_l7policy1, self.provider_l7policy2] # Setup Listeners self.test_listener1_dict = { lib_consts.ID: self.listener1_id, lib_consts.NAME: 'listener_1', lib_consts.DESCRIPTION: 'Listener 1', lib_consts.DEFAULT_POOL_ID: self.pool1_id, constants.LOAD_BALANCER_ID: self.lb_id, lib_consts.PROJECT_ID: self.project_id, lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, lib_consts.PROTOCOL_PORT: 90, lib_consts.CONNECTION_LIMIT: 10000, constants.TLS_CERTIFICATE_ID: self.default_tls_container_ref, lib_consts.DEFAULT_POOL: self.test_pool1_dict, constants.SNI_CONTAINERS: [self.sni_container_ref_1, self.sni_container_ref_2], constants.PEER_PORT: 55, lib_consts.L7POLICIES: self.test_l7policies, lib_consts.INSERT_HEADERS: {}, lib_consts.TIMEOUT_CLIENT_DATA: 1000, lib_consts.TIMEOUT_MEMBER_CONNECT: 2000, lib_consts.TIMEOUT_MEMBER_DATA: 3000, lib_consts.TIMEOUT_TCP_INSPECT: 4000, constants.CLIENT_CA_TLS_CERTIFICATE_ID: self.client_ca_tls_certificate_ref, lib_consts.CLIENT_AUTHENTICATION: constants.CLIENT_AUTH_NONE, constants.CLIENT_CRL_CONTAINER_ID: self.client_crl_container_ref, lib_consts.ALLOWED_CIDRS: ['192.0.2.0/24', '198.51.100.0/24'], lib_consts.TLS_CIPHERS: constants.CIPHERS_OWASP_SUITE_B } self.test_listener1_dict.update(self._common_test_dict) self.test_listener2_dict = copy.deepcopy(self.test_listener1_dict) self.test_listener2_dict[lib_consts.ID] = self.listener2_id self.test_listener2_dict[lib_consts.NAME] = 'listener_2' self.test_listener2_dict[lib_consts.DESCRIPTION] = 'Listener 1' self.test_listener2_dict[lib_consts.DEFAULT_POOL_ID] = self.pool2_id self.test_listener2_dict[ lib_consts.DEFAULT_POOL] = self.test_pool2_dict del self.test_listener2_dict[lib_consts.L7POLICIES] del self.test_listener2_dict[constants.SNI_CONTAINERS] del self.test_listener2_dict[constants.CLIENT_CA_TLS_CERTIFICATE_ID] del self.test_listener2_dict[constants.CLIENT_CRL_CONTAINER_ID] self.test_listeners = [self.test_listener1_dict, self.test_listener2_dict] self.db_listener1 = data_models.Listener(**self.test_listener1_dict) self.db_listener2 = data_models.Listener(**self.test_listener2_dict) self.db_listener1.default_pool = self.db_pool1 self.db_listener2.default_pool = self.db_pool2 self.db_listener1.l7policies = self.db_l7policies self.db_listener1.sni_containers = [ data_models.SNI(tls_container_id='2'), data_models.SNI(tls_container_id='3')] self.test_db_listeners = [self.db_listener1, self.db_listener2] cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') ca_cert = 'ca cert' crl_file_content = 'X509 CRL FILE' self.provider_listener1_dict = { lib_consts.ADMIN_STATE_UP: True, lib_consts.ALLOWED_CIDRS: ['192.0.2.0/24', '198.51.100.0/24'], lib_consts.CONNECTION_LIMIT: 10000, lib_consts.DEFAULT_POOL: self.provider_pool1_dict, lib_consts.DEFAULT_POOL_ID: self.pool1_id, lib_consts.DEFAULT_TLS_CONTAINER_DATA: cert1.to_dict(), lib_consts.DEFAULT_TLS_CONTAINER_REF: self.default_tls_container_ref, lib_consts.DESCRIPTION: 'Listener 1', lib_consts.INSERT_HEADERS: {}, lib_consts.L7POLICIES: self.provider_l7policies_dict, lib_consts.LISTENER_ID: self.listener1_id, lib_consts.LOADBALANCER_ID: self.lb_id, lib_consts.NAME: 'listener_1', lib_consts.PROJECT_ID: self.project_id, lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, lib_consts.PROTOCOL_PORT: 90, lib_consts.SNI_CONTAINER_DATA: [cert2.to_dict(), cert3.to_dict()], lib_consts.SNI_CONTAINER_REFS: [self.sni_container_ref_1, self.sni_container_ref_2], lib_consts.TIMEOUT_CLIENT_DATA: 1000, lib_consts.TIMEOUT_MEMBER_CONNECT: 2000, lib_consts.TIMEOUT_MEMBER_DATA: 3000, lib_consts.TIMEOUT_TCP_INSPECT: 4000, lib_consts.CLIENT_CA_TLS_CONTAINER_REF: self.client_ca_tls_certificate_ref, lib_consts.CLIENT_CA_TLS_CONTAINER_DATA: ca_cert, lib_consts.CLIENT_AUTHENTICATION: constants.CLIENT_AUTH_NONE, lib_consts.CLIENT_CRL_CONTAINER_REF: self.client_crl_container_ref, lib_consts.CLIENT_CRL_CONTAINER_DATA: crl_file_content, lib_consts.TLS_CIPHERS: constants.CIPHERS_OWASP_SUITE_B, lib_consts.TLS_VERSIONS: None } self.provider_listener2_dict = copy.deepcopy( self.provider_listener1_dict) self.provider_listener2_dict[ lib_consts.LISTENER_ID] = self.listener2_id self.provider_listener2_dict[lib_consts.NAME] = 'listener_2' self.provider_listener2_dict[lib_consts.DESCRIPTION] = 'Listener 1' self.provider_listener2_dict[ lib_consts.DEFAULT_POOL_ID] = self.pool2_id self.provider_listener2_dict[ lib_consts.DEFAULT_POOL] = self.provider_pool2_dict del self.provider_listener2_dict[lib_consts.L7POLICIES] self.provider_listener2_dict[ lib_consts.CLIENT_CA_TLS_CONTAINER_REF] = None del self.provider_listener2_dict[ lib_consts.CLIENT_CA_TLS_CONTAINER_DATA] self.provider_listener2_dict[lib_consts.CLIENT_AUTHENTICATION] = ( constants.CLIENT_AUTH_NONE) self.provider_listener2_dict[ lib_consts.CLIENT_CRL_CONTAINER_REF] = None del self.provider_listener2_dict[lib_consts.CLIENT_CRL_CONTAINER_DATA] self.provider_listener1 = driver_dm.Listener( **self.provider_listener1_dict) self.provider_listener2 = driver_dm.Listener( **self.provider_listener2_dict) self.provider_listener1.default_pool = self.provider_pool1 self.provider_listener2.default_pool = self.provider_pool2 self.provider_listener1.l7policies = self.provider_l7policies self.provider_listeners = [self.provider_listener1, self.provider_listener2] self.test_vip_dict = {constants.IP_ADDRESS: self.ip_address, constants.NETWORK_ID: self.network_id, constants.PORT_ID: self.port_id, lib_consts.SUBNET_ID: self.subnet_id, constants.QOS_POLICY_ID: self.qos_policy_id, constants.OCTAVIA_OWNED: None} self.provider_vip_dict = { lib_consts.VIP_ADDRESS: self.ip_address, lib_consts.VIP_NETWORK_ID: self.network_id, lib_consts.VIP_PORT_ID: self.port_id, lib_consts.VIP_SUBNET_ID: self.subnet_id, lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id, constants.OCTAVIA_OWNED: None} self.db_vip = data_models.Vip( ip_address=self.ip_address, network_id=self.network_id, port_id=self.port_id, subnet_id=self.subnet_id, qos_policy_id=self.qos_policy_id) self.test_loadbalancer1_dict = { lib_consts.NAME: self.lb_name, lib_consts.DESCRIPTION: self.lb_description, constants.ENABLED: True, lib_consts.PROVISIONING_STATUS: lib_consts.PENDING_UPDATE, lib_consts.OPERATING_STATUS: lib_consts.OFFLINE, constants.TOPOLOGY: constants.TOPOLOGY_ACTIVE_STANDBY, constants.VRRP_GROUP: None, constants.PROVIDER: constants.AMPHORA, constants.SERVER_GROUP_ID: uuidutils.generate_uuid(), lib_consts.PROJECT_ID: self.project_id, lib_consts.ID: self.lb_id, constants.FLAVOR_ID: self.flavor_id, constants.TAGS: ['test_tag']} self.provider_loadbalancer_dict = { lib_consts.ADDITIONAL_VIPS: None, lib_consts.ADMIN_STATE_UP: True, lib_consts.DESCRIPTION: self.lb_description, lib_consts.FLAVOR: {"something": "else"}, lib_consts.LISTENERS: None, lib_consts.LOADBALANCER_ID: self.lb_id, lib_consts.NAME: self.lb_name, lib_consts.POOLS: None, lib_consts.PROJECT_ID: self.project_id, lib_consts.VIP_ADDRESS: self.ip_address, lib_consts.VIP_NETWORK_ID: self.network_id, lib_consts.VIP_PORT_ID: self.port_id, lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id, lib_consts.VIP_SUBNET_ID: self.subnet_id} self.provider_loadbalancer_tree_dict = { lib_consts.ADDITIONAL_VIPS: None, lib_consts.ADMIN_STATE_UP: True, lib_consts.AVAILABILITY_ZONE: None, lib_consts.DESCRIPTION: self.lb_description, lib_consts.FLAVOR: {"something": "else"}, lib_consts.LISTENERS: None, lib_consts.LOADBALANCER_ID: self.lb_id, lib_consts.NAME: self.lb_name, lib_consts.POOLS: None, lib_consts.PROJECT_ID: self.project_id, lib_consts.VIP_ADDRESS: self.ip_address, lib_consts.VIP_NETWORK_ID: self.network_id, lib_consts.VIP_PORT_ID: self.port_id, lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id, lib_consts.VIP_SUBNET_ID: self.subnet_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/common/sample_network_data.py0000664000175000017500000002736000000000000023657 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections def create_iproute_ipv4_address(ip_address, broadcast_address, interface_name): """Returns a netlink/iproute (pyroute2) IPv4 address.""" Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) return ( {'family': 2, 'prefixlen': 24, 'flags': 0, 'scope': 0, 'index': 2, 'attrs': [('IFA_ADDRESS', ip_address), ('IFA_LOCAL', ip_address), ('IFA_BROADCAST', broadcast_address), ('IFA_LABEL', interface_name), ('IFA_FLAGS', 0), ('IFA_CACHEINFO', {'ifa_preferred': 49256, 'ifa_valid': 49256, 'cstamp': 1961, 'tstamp': 73441020})], 'header': {'length': 88, 'type': 20, 'flags': 2, 'sequence_number': 258, 'pid': 7590, 'error': None, 'stats': Stats(qsize=0, delta=0, delay=0)}, 'event': 'RTM_NEWADDR'},) def create_iproute_ipv6_address(ip_address, interface_name): """Returns a netlink/iproute (pyroute2) IPv6 address.""" Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) return ( {'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2, 'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503, 'ifa_valid': 2591703, 'cstamp': 2038, 'tstamp': 77073215}), ('IFA_ADDRESS', '2001:db8:ffff:ffff:ffff:ffff:ffff:ffff'), ('IFA_FLAGS', 768)], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 257, 'pid': 7590, 'error': None, 'stats': Stats(qsize=0, delta=0, delay=0)}, 'event': 'RTM_NEWADDR'}, {'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2, 'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503, 'ifa_valid': 2591703, 'cstamp': 2038, 'tstamp': 77073215}), ('IFA_ADDRESS', ip_address), ('IFA_FLAGS', 768)], 'header': {'length': 72, 'type': 20, 'flags': 2, 'sequence_number': 257, 'pid': 7590, 'error': None, 'stats': Stats(qsize=0, delta=0, delay=0)}, 'event': 'RTM_NEWADDR'},) def create_iproute_interface(interface_name): """Returns a netlink/iproute (pyroute2) interface.""" Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) return [{ 'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, 'change': 0, 'attrs': [('IFLA_TXQLEN', 1000), ('IFLA_IFNAME', interface_name), ('IFLA_OPERSTATE', 'UP'), ('IFLA_LINKMODE', 0), ('IFLA_MTU', 1500), ('IFLA_GROUP', 0), ('IFLA_PROMISCUITY', 0), ('IFLA_NUM_TX_QUEUES', 1), ('IFLA_GSO_MAX_SEGS', 65535), ('IFLA_GSO_MAX_SIZE', 65536), ('IFLA_NUM_RX_QUEUES', 1), ('IFLA_CARRIER', 1), ('IFLA_QDISC', 'fq_codel'), ('IFLA_CARRIER_CHANGES', 2), ('IFLA_PROTO_DOWN', 0), ('IFLA_CARRIER_UP_COUNT', 1), ('IFLA_CARRIER_DOWN_COUNT', 1), ('IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}), ('IFLA_ADDRESS', '52:54:00:cf:37:9e'), ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'), ('IFLA_STATS64', { 'rx_packets': 756091, 'tx_packets': 780292, 'rx_bytes': 234846748, 'tx_bytes': 208583687, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}), ('IFLA_STATS', { 'rx_packets': 756091, 'tx_packets': 780292, 'rx_bytes': 234846748, 'tx_bytes': 208583687, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}), ('IFLA_XDP', '05:00:02:00:00:00:00:00'), ('IFLA_AF_SPEC', { 'attrs': [ ('AF_INET', { 'dummy': 65664, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 1, 'secure_redirects': 1, 'send_redirects': 1, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 1, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 0, 'nopolicy': 0, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 0, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}), ('AF_INET6', { 'attrs': [('IFLA_INET6_FLAGS', 2147483648), ('IFLA_INET6_CACHEINFO', { 'max_reasm_len': 65535, 'tstamp': 1859, 'reachable_time': 30708, 'retrans_time': 1000}), ('IFLA_INET6_CONF', { 'forwarding': 1, 'hop_limit': 64, 'mtu': 1500, 'accept_ra': 2, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 0, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 1, 'force_tllao': 0, 'ndisc_notify': 0}), ('IFLA_INET6_STATS', { 'num': 37, 'inpkts': 57817, 'inoctets': 144065857, 'indelivers': 36758, 'outforwdatagrams': 0, 'outpkts': 35062, 'outoctets': 4796485, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 23214, 'outmcastpkts': 6546, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 2255059, 'outmcastoctets': 589090, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 57860, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}), ('IFLA_INET6_ICMP6STATS', { 'num': 6, 'inmsgs': 2337, 'inerrors': 0, 'outmsgs': 176, 'outerrors': 0, 'csumerrors': 0}), ('IFLA_INET6_TOKEN', '::'), ('IFLA_INET6_ADDR_GEN_MODE', 0)]})]})], 'header': {'length': 1304, 'type': 16, 'flags': 0, 'sequence_number': 261, 'pid': 7590, 'error': None, 'stats': Stats(qsize=0, delta=0, delay=0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/common/utils.py0000664000175000017500000000313500000000000020766 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures # Borrowed from neutron # https://review.opendev.org/#/c/232716/ class OpenFixture(fixtures.Fixture): """Mock access to a specific file while preserving open for others.""" def __init__(self, filepath, contents=''): self.path = filepath self.contents = contents def _setUp(self): self.mock_open = mock.mock_open(read_data=self.contents) # work around for https://bugs.python.org/issue21258 self.mock_open.return_value.__iter__ = ( lambda self: iter(self.readline, '')) self._orig_open = open def replacement_open(name, *args, **kwargs): if name == self.path: return self.mock_open(name, *args, **kwargs) return self._orig_open(name, *args, **kwargs) self._patch = mock.patch('builtins.open', new=replacement_open) self._patch.start() self.addCleanup(self._patch.stop) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/0000775000175000017500000000000000000000000020124 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/__init__.py0000664000175000017500000000107400000000000022237 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/amphorae/0000775000175000017500000000000000000000000021720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/amphorae/__init__.py0000664000175000017500000000107400000000000024033 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/amphorae/backend/0000775000175000017500000000000000000000000023307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/amphorae/backend/__init__.py0000664000175000017500000000107400000000000025422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/amphorae/backend/agent/0000775000175000017500000000000000000000000024405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/amphorae/backend/agent/__init__.py0000664000175000017500000000107400000000000026520 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/amphorae/backend/agent/api_server/0000775000175000017500000000000000000000000026544 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/amphorae/backend/agent/api_server/__init__.py0000664000175000017500000000107400000000000030657 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py0000664000175000017500000004643500000000000033027 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat import subprocess from unittest import mock import flask from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.amphorae.backends.agent.api_server import keepalivedlvs from octavia.amphorae.backends.agent.api_server import server from octavia.amphorae.backends.agent.api_server import util from octavia.common import constants as consts from octavia.tests.common import utils as test_utils from octavia.tests.unit import base class KeepalivedLvsTestCase(base.TestCase): FAKE_ID = uuidutils.generate_uuid() LISTENER_ID = 'listener-1111-1111-1111-listenerid00' POOL_ID = 'poolpool-1111-1111-1111-poolid000000' MEMBER_ID1 = 'memberid-1111-1111-1111-memberid1111' MEMBER_ID2 = 'memberid-2222-2222-2222-memberid2222' HEALTHMONITOR_ID = 'hmidhmid-1111-1111-1111-healthmonito' NORMAL_CFG_CONTENT = ( "# Configuration for Listener %(listener_id)s\n\n" "net_namespace haproxy-amphora\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol udp\n" " delay_loop 30\n" " delay_before_retry 31\n" " retry 3\n\n\n" " # Configuration for Pool %(pool_id)s\n" " # Configuration for HealthMonitor %(hm_id)s\n" " # Configuration for Member %(member1_id)s\n" " real_server 10.0.0.99 82 {\n" " weight 13\n" " inhibit_on_failure\n" " uthreshold 98\n" " persistence_timeout 33\n" " persistence_granularity 255.255.0.0\n" " delay_before_retry 31\n" " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.99 82\"\n" " misc_timeout 30\n" " misc_dynamic\n" " }\n" " }\n\n" " # Configuration for Member %(member2_id)s\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " inhibit_on_failure\n" " uthreshold 98\n" " persistence_timeout 33\n" " persistence_granularity 255.255.0.0\n" " delay_before_retry 31\n" " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.98 82\"\n" " misc_timeout 30\n" " misc_dynamic\n" " }\n" " }\n\n" "}\n\n") % {'listener_id': LISTENER_ID, 'pool_id': POOL_ID, 'hm_id': HEALTHMONITOR_ID, 'member1_id': MEMBER_ID1, 'member2_id': MEMBER_ID2} PROC_CONTENT = ( "IP Virtual Server version 1.2.1 (size=4096)\n" "Prot LocalAddress:Port Scheduler Flags\n" " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" "UDP 0A000002:0050 sh\n" " -> 0A000063:0052 Masq 13 1 0\n" " -> 0A000062:0052 Masq 13 1 0\n" ) NORMAL_PID_CONTENT = "1988" TEST_URL = server.PATH_PREFIX + '/listeners/%s/%s/udp_listener' def setUp(self): super(KeepalivedLvsTestCase, self).setUp() self.app = flask.Flask(__name__) self.client = self.app.test_client() self._ctx = self.app.test_request_context() self._ctx.push() self.test_keepalivedlvs = keepalivedlvs.KeepalivedLvs() self.app.add_url_rule( rule=self.TEST_URL % ('', ''), view_func=(lambda amphora_id, listener_id: self.test_keepalivedlvs.upload_udp_listener_config( listener_id)), methods=['PUT']) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'run_systemctl_command') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'install_netns_systemd_service') @mock.patch('pyroute2.NetNS', create=True) @mock.patch('shutil.copy2') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) @mock.patch('os.chmod') @mock.patch('os.path.exists') @mock.patch('os.makedirs') @mock.patch('os.remove') @mock.patch('subprocess.check_output') def test_upload_udp_listener_config_no_vrrp_check_dir( self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, m_os_sysinit, m_copy2, mock_netns, mock_install_netns, mock_systemctl): m_exists.side_effect = [False, False, True, True, False, False] cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open with mock.patch('os.open') as m_open, mock.patch.object(os, 'fdopen', m) as m_fdopen: m_open.side_effect = ['TEST-WRITE-CFG', 'TEST-WRITE-SYSINIT'] res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), data=self.NORMAL_CFG_CONTENT) mock_install_netns.assert_called_once() systemctl_calls = [ mock.call(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX), mock.call(consts.ENABLE, 'octavia-keepalivedlvs-%s' % str(self.FAKE_ID)), ] mock_systemctl.assert_has_calls(systemctl_calls) os_mkdir_calls = [ mock.call(util.keepalived_lvs_dir()), mock.call(util.keepalived_backend_check_script_dir()) ] m_os_mkdir.assert_has_calls(os_mkdir_calls) m_os_chmod.assert_called_with( util.keepalived_backend_check_script_path(), stat.S_IEXEC) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH systemd_cfg_path = util.keepalived_lvs_init_path( consts.INIT_SYSTEMD, self.FAKE_ID) m_open_calls = [ mock.call(cfg_path, flags, mode), mock.call(systemd_cfg_path, flags, mode) ] m_open.assert_has_calls(m_open_calls) m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') self.assertEqual(200, res.status_code) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_udp_listeners') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_loadbalancers') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'run_systemctl_command') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'install_netns_systemd_service') @mock.patch('pyroute2.NetNS', create=True) @mock.patch('shutil.copy2') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) @mock.patch('os.chmod') @mock.patch('os.path.exists') @mock.patch('os.makedirs') @mock.patch('os.remove') @mock.patch('subprocess.check_output') def test_upload_udp_listener_config_with_vrrp_check_dir( self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, m_os_sysinit, m_copy2, mock_netns, mock_install_netns, mock_systemctl, mock_get_lbs, mock_get_udp_listeners): m_exists.side_effect = [False, False, True, True, False, False, False] mock_get_lbs.return_value = [] mock_get_udp_listeners.return_value = [self.FAKE_ID] cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='controller_worker', loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY) with mock.patch('os.open') as m_open, mock.patch.object(os, 'fdopen', m) as m_fdopen: m_open.side_effect = ['TEST-WRITE-CFG', 'TEST-WRITE-SYSINIT', 'TEST-WRITE-UDP-VRRP-CHECK'] res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), data=self.NORMAL_CFG_CONTENT) os_mkdir_calls = [ mock.call(util.keepalived_lvs_dir()), mock.call(util.keepalived_backend_check_script_dir()) ] m_os_mkdir.assert_has_calls(os_mkdir_calls) mock_install_netns.assert_called_once() systemctl_calls = [ mock.call(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX), mock.call(consts.ENABLE, 'octavia-keepalivedlvs-%s' % str(self.FAKE_ID)), ] mock_systemctl.assert_has_calls(systemctl_calls) m_os_chmod.assert_called_with( util.keepalived_backend_check_script_path(), stat.S_IEXEC) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH systemd_cfg_path = util.keepalived_lvs_init_path( consts.INIT_SYSTEMD, self.FAKE_ID) script_path = os.path.join( util.keepalived_check_scripts_dir(), keepalivedlvs.KEEPALIVED_CHECK_SCRIPT_NAME) m_open_calls = [ mock.call(cfg_path, flags, mode), mock.call(systemd_cfg_path, flags, mode), mock.call(script_path, flags, stat.S_IEXEC) ] m_open.assert_has_calls(m_open_calls) m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') m_fdopen.assert_any_call('TEST-WRITE-UDP-VRRP-CHECK', 'w') m_os_rm.assert_called_once_with(util.haproxy_check_script_path()) self.assertEqual(200, res.status_code) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'run_systemctl_command') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'install_netns_systemd_service') @mock.patch('shutil.copy2') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) @mock.patch('os.chmod') @mock.patch('os.path.exists') @mock.patch('os.makedirs') @mock.patch('os.remove') @mock.patch('subprocess.check_output') def test_upload_udp_listener_config_start_service_failure( self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, m_os_sysinit, m_copy2, mock_install_netns, mock_systemctl): m_exists.side_effect = [False, False, True, True, False] cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open mock_systemctl.side_effect = [mock.DEFAULT, Exception('boom')] with mock.patch('os.open') as m_open, mock.patch.object(os, 'fdopen', m) as m_fdopen: m_open.side_effect = ['TEST-WRITE-CFG', 'TEST-WRITE-SYSINIT'] res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), data=self.NORMAL_CFG_CONTENT) os_mkdir_calls = [ mock.call(util.keepalived_lvs_dir()), mock.call(util.keepalived_backend_check_script_dir()) ] m_os_mkdir.assert_has_calls(os_mkdir_calls) mock_install_netns.assert_called_once() systemctl_calls = [ mock.call(consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX), mock.call(consts.ENABLE, 'octavia-keepalivedlvs-%s' % str(self.FAKE_ID)), ] mock_systemctl.assert_has_calls(systemctl_calls) m_os_chmod.assert_called_with( util.keepalived_backend_check_script_path(), stat.S_IEXEC) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH systemd_cfg_path = util.keepalived_lvs_init_path( consts.INIT_SYSTEMD, self.FAKE_ID) m_open_calls = [ mock.call(cfg_path, flags, mode), mock.call(systemd_cfg_path, flags, mode) ] m_open.assert_has_calls(m_open_calls) m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') self.assertEqual(500, res.status_code) @mock.patch('subprocess.check_output') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'keepalivedlvs.KeepalivedLvs.' '_check_udp_listener_exists') def test_manage_udp_listener(self, mock_udp_exist, mock_check_output): res = self.test_keepalivedlvs.manage_udp_listener(self.FAKE_ID, 'start') cmd = ("/usr/sbin/service octavia-keepalivedlvs-{listener_id}" " {action}".format(listener_id=self.FAKE_ID, action='start')) mock_check_output.assert_called_once_with(cmd.split(), stderr=subprocess.STDOUT) self.assertEqual(202, res.status_code) res = self.test_keepalivedlvs.manage_udp_listener(self.FAKE_ID, 'restart') self.assertEqual(400, res.status_code) mock_check_output.side_effect = subprocess.CalledProcessError(1, 'blah!') res = self.test_keepalivedlvs.manage_udp_listener(self.FAKE_ID, 'start') self.assertEqual(500, res.status_code) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_udp_listeners', return_value=[LISTENER_ID]) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_keepalivedlvs_pid', return_value="12345") @mock.patch('subprocess.check_output') @mock.patch('os.remove') @mock.patch('os.path.exists') def test_delete_udp_listener(self, m_exist, m_remove, m_check_output, mget_pid, m_init_sys, mget_udp_listeners): m_exist.return_value = True res = self.test_keepalivedlvs.delete_udp_listener(self.FAKE_ID) cmd1 = ("/usr/sbin/service " "octavia-keepalivedlvs-{0} stop".format(self.FAKE_ID)) cmd2 = ("systemctl disable " "octavia-keepalivedlvs-{list}".format(list=self.FAKE_ID)) calls = [ mock.call(cmd1.split(), stderr=subprocess.STDOUT), mock.call(cmd2.split(), stderr=subprocess.STDOUT) ] m_check_output.assert_has_calls(calls) self.assertEqual(200, res.status_code) @mock.patch.object(keepalivedlvs, "webob") @mock.patch('os.path.exists') def test_delete_udp_listener_not_exist(self, m_exist, m_webob): m_exist.return_value = False self.test_keepalivedlvs.delete_udp_listener(self.FAKE_ID) calls = [ mock.call( json=dict(message='UDP Listener Not Found', details="No UDP listener with UUID: " "{0}".format(self.FAKE_ID)), status=404), mock.call(json={'message': 'OK'}) ] m_webob.Response.assert_has_calls(calls) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_keepalivedlvs_pid', return_value="12345") @mock.patch('subprocess.check_output') @mock.patch('os.path.exists') def test_delete_udp_listener_stop_service_fail(self, m_exist, m_check_output, mget_pid): m_exist.return_value = True m_check_output.side_effect = subprocess.CalledProcessError(1, 'Woops!') res = self.test_keepalivedlvs.delete_udp_listener(self.FAKE_ID) self.assertEqual(500, res.status_code) self.assertEqual({'message': 'Error stopping keepalivedlvs', 'details': None}, res.json) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSVINIT) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_keepalivedlvs_pid', return_value="12345") @mock.patch('subprocess.check_output') @mock.patch('os.remove') @mock.patch('os.path.exists') def test_delete_udp_listener_disable_service_fail(self, m_exist, m_remove, m_check_output, mget_pid, m_init_sys): m_exist.return_value = True m_check_output.side_effect = [True, subprocess.CalledProcessError( 1, 'Woops!')] res = self.test_keepalivedlvs.delete_udp_listener(self.FAKE_ID) self.assertEqual(500, res.status_code) self.assertEqual({ 'message': 'Error disabling ' 'octavia-keepalivedlvs-%s service' % self.FAKE_ID, 'details': None}, res.json) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_keepalivedlvs_pid', return_value="12345") @mock.patch('subprocess.check_output') @mock.patch('os.remove') @mock.patch('os.path.exists') def test_delete_udp_listener_unsupported_sysinit(self, m_exist, m_remove, m_check_output, mget_pid, m_init_sys): m_exist.return_value = True self.assertRaises( util.UnknownInitError, self.test_keepalivedlvs.delete_udp_listener, self.FAKE_ID) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py0000664000175000017500000037770600000000000031507 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import os import random import socket import stat import subprocess from unittest import mock import fixtures from oslo_config import fixture as oslo_fixture from oslo_serialization import jsonutils from oslo_utils import uuidutils from octavia.amphorae.backends.agent import api_server from octavia.amphorae.backends.agent.api_server import certificate_update from octavia.amphorae.backends.agent.api_server import server from octavia.amphorae.backends.agent.api_server import util from octavia.common import config from octavia.common import constants as consts from octavia.common import utils as octavia_utils from octavia.tests.common import utils as test_utils import octavia.tests.unit.base as base AMP_AGENT_CONF_PATH = '/etc/octavia/amphora-agent.conf' RANDOM_ERROR = b'random error' OK = dict(message='OK') FAKE_INTERFACE = 'eth33' class TestServerTestCase(base.TestCase): app = None def setUp(self): super(TestServerTestCase, self).setUp() self.conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia') self.conf.config(group="controller_worker", loadbalancer_topology=consts.TOPOLOGY_SINGLE) self.conf.load_raw_values(project='fake_project') self.conf.load_raw_values(prog='fake_prog') self.useFixture(fixtures.MockPatch( 'oslo_config.cfg.find_config_files', return_value=[AMP_AGENT_CONF_PATH])) with mock.patch('distro.id', return_value='ubuntu'): self.ubuntu_test_server = server.Server() self.ubuntu_app = self.ubuntu_test_server.app.test_client() with mock.patch('distro.id', return_value='centos'): self.centos_test_server = server.Server() self.centos_app = self.centos_test_server.app.test_client() @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) def test_ubuntu_haproxy_systemd(self, mock_init_system): self._test_haproxy(consts.INIT_SYSTEMD, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) def test_centos_haproxy_systemd(self, mock_init_system): self._test_haproxy(consts.INIT_SYSTEMD, consts.CENTOS, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSVINIT) def test_ubuntu_haproxy_sysvinit(self, mock_init_system): self._test_haproxy(consts.INIT_SYSVINIT, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_UPSTART) def test_ubuntu_haproxy_upstart(self, mock_init_system): self._test_haproxy(consts.INIT_UPSTART, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'haproxy_compatibility.get_haproxy_versions') @mock.patch('os.path.exists') @mock.patch('os.makedirs') @mock.patch('os.rename') @mock.patch('subprocess.check_output') def _test_haproxy(self, init_system, distro, mock_init_system, mock_subprocess, mock_rename, mock_makedirs, mock_exists, mock_get_version): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) mock_get_version.return_value = [1, 6] flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mock_exists.return_value = True file_name = '/var/lib/octavia/123/haproxy.cfg.new' m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open # happy case upstart file exists with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen, mock.patch( 'distro.id') as mock_distro_id: mock_open.return_value = 123 mock_distro_id.return_value = distro if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') mode = stat.S_IRUSR | stat.S_IWUSR mock_open.assert_called_with(file_name, flags, mode) mock_fdopen.assert_called_with(123, 'w') self.assertEqual(202, rv.status_code) m().write.assert_called_once_with('test') mock_subprocess.assert_any_call( "haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format( config_file=file_name, haproxy_ug=consts.HAPROXY_USER_GROUP_CFG, peer=(octavia_utils. base64_sha1_string('amp_123').rstrip('='))).split(), stderr=-2) mock_rename.assert_called_with( '/var/lib/octavia/123/haproxy.cfg.new', '/var/lib/octavia/123/haproxy.cfg') if init_system == consts.INIT_SYSTEMD: mock_subprocess.assert_any_call( "systemctl enable haproxy-123".split(), stderr=subprocess.STDOUT) elif init_system == consts.INIT_SYSVINIT: mock_subprocess.assert_any_call( "insserv /etc/init.d/haproxy-123".split(), stderr=subprocess.STDOUT) else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) # exception writing m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open m.side_effect = IOError() # open crashes with mock.patch('os.open'), mock.patch.object( os, 'fdopen', m), mock.patch('distro.id') as mock_distro_id: mock_distro_id.return_value = distro if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') self.assertEqual(500, rv.status_code) # check if files get created mock_exists.return_value = False if init_system == consts.INIT_SYSTEMD: init_path = consts.SYSTEMD_DIR + '/haproxy-123.service' elif init_system == consts.INIT_UPSTART: init_path = consts.UPSTART_DIR + '/haproxy-123.conf' elif init_system == consts.INIT_SYSVINIT: init_path = consts.SYSVINIT_DIR + '/haproxy-123' else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) m = self.useFixture(test_utils.OpenFixture(init_path)).mock_open # happy case upstart file exists with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen, mock.patch( 'distro.id') as mock_distro_id: mock_open.return_value = 123 mock_distro_id.return_value = distro if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') self.assertEqual(202, rv.status_code) if init_system == consts.INIT_SYSTEMD: mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) else: mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) mock_open.assert_called_with(init_path, flags, mode) mock_fdopen.assert_called_with(123, 'w') handle = mock_fdopen() handle.write.assert_any_call('test') # skip the template stuff mock_makedirs.assert_called_with('/var/lib/octavia/123') # unhappy case haproxy check fails mock_exists.return_value = True mock_subprocess.side_effect = [subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR)] with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen, mock.patch( 'distro.id') as mock_distro_id: mock_open.return_value = 123 mock_distro_id.return_value = distro if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') self.assertEqual(400, rv.status_code) self.assertEqual( {'message': 'Invalid request', u'details': u'random error'}, jsonutils.loads(rv.data.decode('utf-8'))) mode = stat.S_IRUSR | stat.S_IWUSR mock_open.assert_called_with(file_name, flags, mode) mock_fdopen.assert_called_with(123, 'w') handle = mock_fdopen() handle.write.assert_called_with('test') mock_subprocess.assert_called_with( "haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format( config_file=file_name, haproxy_ug=consts.HAPROXY_USER_GROUP_CFG, peer=(octavia_utils. base64_sha1_string('amp_123').rstrip('='))).split(), stderr=-2) mock_rename.assert_called_with( '/var/lib/octavia/123/haproxy.cfg.new', '/var/lib/octavia/123/haproxy.cfg.new-failed') # unhappy path with bogus init system mock_init_system.return_value = 'bogus' with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen, mock.patch( 'distro.id') as mock_distro_id: mock_open.return_value = 123 mock_distro_id.return_value = distro if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') elif distro == consts.CENTOS: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/amp_123/123/haproxy', data='test') self.assertEqual(500, rv.status_code) def test_ubuntu_start(self): self._test_start(consts.UBUNTU) def test_centos_start(self): self._test_start(consts.CENTOS) @mock.patch('os.listdir') @mock.patch('os.path.exists') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'vrrp_check_script_update') @mock.patch('subprocess.check_output') def _test_start(self, distro, mock_subprocess, mock_vrrp, mock_exists, mock_listdir): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/error') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/error') self.assertEqual(400, rv.status_code) self.assertEqual( {'message': 'Invalid Request', 'details': 'Unknown action: error', }, jsonutils.loads(rv.data.decode('utf-8'))) mock_exists.reset_mock() mock_exists.return_value = False if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/start') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/start') self.assertEqual(404, rv.status_code) self.assertEqual( {'message': 'Loadbalancer Not Found', 'details': 'No loadbalancer with UUID: 123'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_exists.assert_called_with('/var/lib/octavia') mock_exists.return_value = True mock_listdir.return_value = ['123'] if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/start') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/start') self.assertEqual(202, rv.status_code) self.assertEqual( {'message': 'OK', 'details': 'Configuration file is valid\nhaproxy daemon for' ' 123 started'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_subprocess.assert_called_with( ['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2) mock_exists.return_value = True mock_subprocess.side_effect = subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR) if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/start') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/start') self.assertEqual(500, rv.status_code) self.assertEqual( { 'message': 'Error starting haproxy', 'details': RANDOM_ERROR.decode('utf-8'), }, jsonutils.loads(rv.data.decode('utf-8'))) mock_subprocess.assert_called_with( ['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2) def test_ubuntu_reload(self): self._test_reload(consts.UBUNTU) def test_centos_reload(self): self._test_reload(consts.CENTOS) @mock.patch('os.listdir') @mock.patch('os.path.exists') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'vrrp_check_script_update') @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' 'Loadbalancer._check_haproxy_status') @mock.patch('subprocess.check_output') def _test_reload(self, distro, mock_subprocess, mock_haproxy_status, mock_vrrp, mock_exists, mock_listdir): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) # Process running so reload mock_exists.return_value = True mock_listdir.return_value = ['123'] mock_haproxy_status.return_value = consts.ACTIVE if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/reload') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/reload') self.assertEqual(202, rv.status_code) self.assertEqual( {'message': 'OK', 'details': 'Listener 123 reloaded'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_subprocess.assert_called_with( ['/usr/sbin/service', 'haproxy-123', 'reload'], stderr=-2) # Process not running so start mock_exists.return_value = True mock_haproxy_status.return_value = consts.OFFLINE if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/reload') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/reload') self.assertEqual(202, rv.status_code) self.assertEqual( {'message': 'OK', 'details': 'Configuration file is valid\nhaproxy daemon for' ' 123 started'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_subprocess.assert_called_with( ['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2) def test_ubuntu_info(self): self._test_info(consts.UBUNTU) def test_centos_info(self): self._test_info(consts.CENTOS) @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' 'AmphoraInfo._get_extend_body_from_udp_driver', return_value={}) @mock.patch('socket.gethostname') @mock.patch('subprocess.check_output') def _test_info(self, distro, mock_subbprocess, mock_hostname, mock_get_extend_body): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) mock_hostname.side_effect = ['test-host'] mock_subbprocess.side_effect = ['9.9.99-9'] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/info') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/info') self.assertEqual(200, rv.status_code) self.assertEqual(dict( api_version='1.0', haproxy_version='9.9.99-9', hostname='test-host'), jsonutils.loads(rv.data.decode('utf-8'))) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_protocol_for_lb_object', return_value='TCP') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) def test_delete_ubuntu_listener_systemd(self, mock_init_system, mock_get_proto): self._test_delete_listener(consts.INIT_SYSTEMD, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_protocol_for_lb_object', return_value='TCP') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) def test_delete_centos_listener_systemd(self, mock_init_system, mock_get_proto): self._test_delete_listener(consts.INIT_SYSTEMD, consts.CENTOS, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_protocol_for_lb_object', return_value='TCP') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSVINIT) def test_delete_ubuntu_listener_sysvinit(self, mock_init_system, mock_get_proto): self._test_delete_listener(consts.INIT_SYSVINIT, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_protocol_for_lb_object', return_value='TCP') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_UPSTART) def test_delete_ubuntu_listener_upstart(self, mock_init_system, mock_get_proto): self._test_delete_listener(consts.INIT_UPSTART, consts.UBUNTU, mock_init_system) @mock.patch('os.listdir') @mock.patch('os.path.exists') @mock.patch('subprocess.check_output') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'vrrp_check_script_update') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_haproxy_pid') @mock.patch('shutil.rmtree') @mock.patch('os.remove') def _test_delete_listener(self, init_system, distro, mock_init_system, mock_remove, mock_rmtree, mock_pid, mock_vrrp, mock_check_output, mock_exists, mock_listdir): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) # no listener mock_exists.return_value = False mock_listdir.return_value = ['123'] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(200, rv.status_code) self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) mock_exists.assert_called_once_with('/var/lib/octavia') # service is stopped + no upstart script + no vrrp mock_exists.side_effect = [True, True, False, False, False] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(200, rv.status_code) self.assertEqual({u'message': u'OK'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_rmtree.assert_called_with('/var/lib/octavia/123') if init_system == consts.INIT_SYSTEMD: mock_exists.assert_called_with(consts.SYSTEMD_DIR + '/haproxy-123.service') elif init_system == consts.INIT_UPSTART: mock_exists.assert_called_with(consts.UPSTART_DIR + '/haproxy-123.conf') elif init_system == consts.INIT_SYSVINIT: mock_exists.assert_called_with(consts.SYSVINIT_DIR + '/haproxy-123') else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) mock_exists.assert_any_call('/var/lib/octavia/123/123.pid') # service is stopped + no upstart script + vrrp mock_exists.side_effect = [True, True, False, True, False] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(200, rv.status_code) self.assertEqual({u'message': u'OK'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_rmtree.assert_called_with('/var/lib/octavia/123') if init_system == consts.INIT_SYSTEMD: mock_exists.assert_called_with(consts.SYSTEMD_DIR + '/haproxy-123.service') elif init_system == consts.INIT_UPSTART: mock_exists.assert_called_with(consts.UPSTART_DIR + '/haproxy-123.conf') elif init_system == consts.INIT_SYSVINIT: mock_exists.assert_called_with(consts.SYSVINIT_DIR + '/haproxy-123') else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) mock_exists.assert_any_call('/var/lib/octavia/123/123.pid') # service is stopped + upstart script + no vrrp mock_exists.side_effect = [True, True, False, False, True] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(200, rv.status_code) self.assertEqual({u'message': u'OK'}, jsonutils.loads(rv.data.decode('utf-8'))) if init_system == consts.INIT_SYSTEMD: mock_remove.assert_called_with(consts.SYSTEMD_DIR + '/haproxy-123.service') elif init_system == consts.INIT_UPSTART: mock_remove.assert_called_with(consts.UPSTART_DIR + '/haproxy-123.conf') elif init_system == consts.INIT_SYSVINIT: mock_remove.assert_called_with(consts.SYSVINIT_DIR + '/haproxy-123') else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) # service is stopped + upstart script + vrrp mock_exists.side_effect = [True, True, False, True, True] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(200, rv.status_code) self.assertEqual({u'message': u'OK'}, jsonutils.loads(rv.data.decode('utf-8'))) if init_system == consts.INIT_SYSTEMD: mock_remove.assert_called_with(consts.SYSTEMD_DIR + '/haproxy-123.service') elif init_system == consts.INIT_UPSTART: mock_remove.assert_called_with(consts.UPSTART_DIR + '/haproxy-123.conf') elif init_system == consts.INIT_SYSVINIT: mock_remove.assert_called_with(consts.SYSVINIT_DIR + '/haproxy-123') else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) # service is running + upstart script + no vrrp mock_exists.side_effect = [True, True, True, True, False, True] mock_pid.return_value = '456' if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(200, rv.status_code) self.assertEqual({u'message': u'OK'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_pid.assert_called_once_with('123') mock_check_output.assert_any_call( ['/usr/sbin/service', 'haproxy-123', 'stop'], stderr=-2) if init_system == consts.INIT_SYSTEMD: mock_check_output.assert_any_call( "systemctl disable haproxy-123".split(), stderr=subprocess.STDOUT) elif init_system == consts.INIT_UPSTART: mock_remove.assert_any_call(consts.UPSTART_DIR + '/haproxy-123.conf') elif init_system == consts.INIT_SYSVINIT: mock_check_output.assert_any_call( "insserv -r /etc/init.d/haproxy-123".split(), stderr=subprocess.STDOUT) else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) # service is running + upstart script + vrrp mock_exists.side_effect = [True, True, True, True, True, True] mock_pid.return_value = '456' if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(200, rv.status_code) self.assertEqual({u'message': u'OK'}, jsonutils.loads(rv.data.decode('utf-8'))) mock_pid.assert_called_with('123') mock_check_output.assert_any_call( ['/usr/sbin/service', 'haproxy-123', 'stop'], stderr=-2) if init_system == consts.INIT_SYSTEMD: mock_check_output.assert_any_call( "systemctl disable haproxy-123".split(), stderr=subprocess.STDOUT) elif init_system == consts.INIT_UPSTART: mock_remove.assert_any_call(consts.UPSTART_DIR + '/haproxy-123.conf') elif init_system == consts.INIT_SYSVINIT: mock_check_output.assert_any_call( "insserv -r /etc/init.d/haproxy-123".split(), stderr=subprocess.STDOUT) else: self.assertIn(init_system, consts.VALID_INIT_SYSTEMS) # service is running + stopping fails mock_exists.side_effect = [True, True, True, True] mock_check_output.side_effect = subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR) if distro == consts.UBUNTU: rv = self.ubuntu_app.delete('/' + api_server.VERSION + '/listeners/123') elif distro == consts.CENTOS: rv = self.centos_app.delete('/' + api_server.VERSION + '/listeners/123') self.assertEqual(500, rv.status_code) self.assertEqual( {'details': 'random error', 'message': 'Error stopping haproxy'}, jsonutils.loads(rv.data.decode('utf-8'))) # that's the last call before exception mock_exists.assert_called_with('/proc/456') def test_ubuntu_get_haproxy(self): self._test_get_haproxy(consts.UBUNTU) def test_centos_get_haproxy(self): self._test_get_haproxy(consts.CENTOS) @mock.patch('os.listdir') @mock.patch('os.path.exists') def _test_get_haproxy(self, distro, mock_exists, mock_listdir): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) CONTENT = "bibble\nbibble" mock_exists.side_effect = [False] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/loadbalancer/123/haproxy') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/loadbalancer/123/haproxy') self.assertEqual(404, rv.status_code) mock_exists.side_effect = [True, True] path = util.config_path('123') self.useFixture(test_utils.OpenFixture(path, CONTENT)) mock_listdir.return_value = ['123'] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/loadbalancer/123/haproxy') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/loadbalancer/123/haproxy') self.assertEqual(200, rv.status_code) self.assertEqual(octavia_utils.b(CONTENT), rv.data) self.assertEqual('text/plain; charset=utf-8', rv.headers['Content-Type'].lower()) def test_ubuntu_get_all_listeners(self): self._test_get_all_listeners(consts.UBUNTU) def test_get_all_listeners(self): self._test_get_all_listeners(consts.CENTOS) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_loadbalancers') @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' 'Loadbalancer._check_haproxy_status') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'parse_haproxy_file') def _test_get_all_listeners(self, distro, mock_parse, mock_status, mock_lbs): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) # no listeners mock_lbs.side_effect = [[]] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/listeners') self.assertEqual(200, rv.status_code) self.assertFalse(jsonutils.loads(rv.data.decode('utf-8'))) # one listener ACTIVE mock_lbs.side_effect = [['123']] mock_parse.side_effect = [['fake_socket', {'123': {'mode': 'test'}}]] mock_status.side_effect = [consts.ACTIVE] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/listeners') self.assertEqual(200, rv.status_code) self.assertEqual( [{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'}], jsonutils.loads(rv.data.decode('utf-8'))) # two listeners, two modes mock_lbs.side_effect = [['123', '456']] mock_parse.side_effect = [['fake_socket', {'123': {'mode': 'test'}}], ['fake_socket', {'456': {'mode': 'http'}}]] mock_status.return_value = consts.ACTIVE if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/listeners') self.assertEqual(200, rv.status_code) self.assertEqual( [{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'}, {'status': consts.ACTIVE, 'type': 'http', 'uuid': '456'}], jsonutils.loads(rv.data.decode('utf-8'))) def test_ubuntu_delete_cert(self): self._test_delete_cert(consts.UBUNTU) def test_centos_delete_cert(self): self._test_delete_cert(consts.CENTOS) @mock.patch('os.path.exists') @mock.patch('os.remove') def _test_delete_cert(self, distro, mock_remove, mock_exists): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) mock_exists.side_effect = [False] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete( '/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') elif distro == consts.CENTOS: rv = self.centos_app.delete( '/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') self.assertEqual(200, rv.status_code) self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) mock_exists.assert_called_once_with( '/var/lib/octavia/certs/123/test.pem') # wrong file name mock_exists.side_effect = [True] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete( '/' + api_server.VERSION + '/loadbalancer/123/certificates/test.bla') elif distro == consts.CENTOS: rv = self.centos_app.delete( '/' + api_server.VERSION + '/loadbalancer/123/certificates/test.bla') self.assertEqual(400, rv.status_code) mock_exists.side_effect = [True] if distro == consts.UBUNTU: rv = self.ubuntu_app.delete( '/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') elif distro == consts.CENTOS: rv = self.centos_app.delete( '/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') self.assertEqual(200, rv.status_code) self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) mock_remove.assert_called_once_with( '/var/lib/octavia/certs/123/test.pem') def test_ubuntu_get_certificate_md5(self): self._test_get_certificate_md5(consts.UBUNTU) def test_centos_get_certificate_md5(self): self._test_get_certificate_md5(consts.CENTOS) @mock.patch('os.path.exists') def _test_get_certificate_md5(self, distro, mock_exists): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) CONTENT = "TestTest" mock_exists.side_effect = [False] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') self.assertEqual(404, rv.status_code) self.assertEqual(dict( details='No certificate with filename: test.pem', message='Certificate Not Found'), jsonutils.loads(rv.data.decode('utf-8'))) mock_exists.assert_called_with('/var/lib/octavia/certs/123/test.pem') # wrong file name mock_exists.side_effect = [True] if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.bla', data='TestTest') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.bla', data='TestTest') self.assertEqual(400, rv.status_code) mock_exists.return_value = True mock_exists.side_effect = None if distro == consts.UBUNTU: path = self.ubuntu_test_server._loadbalancer._cert_file_path( '123', 'test.pem') elif distro == consts.CENTOS: path = self.centos_test_server._loadbalancer._cert_file_path( '123', 'test.pem') self.useFixture(test_utils.OpenFixture(path, CONTENT)) if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.pem') self.assertEqual(200, rv.status_code) self.assertEqual(dict(md5sum=hashlib.md5(octavia_utils. b(CONTENT)).hexdigest()), jsonutils.loads(rv.data.decode('utf-8'))) def test_ubuntu_upload_certificate_md5(self): self._test_upload_certificate_md5(consts.UBUNTU) def test_centos_upload_certificate_md5(self): self._test_upload_certificate_md5(consts.CENTOS) @mock.patch('os.path.exists') @mock.patch('os.makedirs') def _test_upload_certificate_md5(self, distro, mock_makedir, mock_exists): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) # wrong file name if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.bla', data='TestTest') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/test.bla', data='TestTest') self.assertEqual(400, rv.status_code) mock_exists.return_value = True if distro == consts.UBUNTU: path = self.ubuntu_test_server._loadbalancer._cert_file_path( '123', 'test.pem') elif distro == consts.CENTOS: path = self.centos_test_server._loadbalancer._cert_file_path( '123', 'test.pem') m = self.useFixture(test_utils.OpenFixture(path)).mock_open with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/' 'test.pem', data='TestTest') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/' 'test.pem', data='TestTest') self.assertEqual(200, rv.status_code) self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) handle = m() handle.write.assert_called_once_with(octavia_utils.b('TestTest')) mock_exists.return_value = False m = self.useFixture(test_utils.OpenFixture(path)).mock_open with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/' 'test.pem', data='TestTest') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/loadbalancer/123/certificates/' 'test.pem', data='TestTest') self.assertEqual(200, rv.status_code) self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) handle = m() handle.write.assert_called_once_with(octavia_utils.b('TestTest')) mock_makedir.assert_called_once_with('/var/lib/octavia/certs/123') def test_ubuntu_upload_server_certificate(self): self._test_upload_server_certificate(consts.UBUNTU) def test_centos_upload_server_certificate(self): self._test_upload_server_certificate(consts.CENTOS) def _test_upload_server_certificate(self, distro): certificate_update.BUFFER = 5 # test the while loop path = '/etc/octavia/certs/server.pem' m = self.useFixture(test_utils.OpenFixture(path)).mock_open with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/certificate', data='TestTest') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/certificate', data='TestTest') self.assertEqual(202, rv.status_code) self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) handle = m() handle.write.assert_any_call(octavia_utils.b('TestT')) handle.write.assert_any_call(octavia_utils.b('est')) def test_ubuntu_plug_network(self): self._test_plug_network(consts.UBUNTU) self.conf.config(group="amphora_agent", agent_server_network_file="/path/to/interfaces_file") self._test_plug_network(consts.UBUNTU) def test_centos_plug_network(self): self._test_plug_network(consts.CENTOS) self.conf.config(group="amphora_agent", agent_server_network_file="/path/to/interfaces_file") self._test_plug_network(consts.CENTOS) @mock.patch('os.chmod') @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.NetNS', create=True) @mock.patch('subprocess.check_output') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'plug.Plug._netns_interface_exists') @mock.patch('os.path.isfile') def _test_plug_network(self, distro, mock_isfile, mock_int_exists, mock_check_output, mock_netns, mock_pyroute2, mock_os_chmod): mock_ipr = mock.MagicMock() mock_ipr_instance = mock.MagicMock() mock_ipr_instance.link_lookup.side_effect = [ [], [], [33], [33], [33], [33], [33], [33], [33], [33]] mock_ipr_instance.get_links.return_value = ({ 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) mock_ipr.__enter__.return_value = mock_ipr_instance mock_pyroute2.return_value = mock_ipr self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) port_info = {'mac_address': '123'} test_int_num = random.randint(0, 9999) mock_int_exists.return_value = False netns_handle = mock_netns.return_value.__enter__.return_value netns_handle.get_links.return_value = [0] * test_int_num mock_isfile.return_value = True test_int_num = str(test_int_num) # Interface already plugged mock_int_exists.return_value = True if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(409, rv.status_code) self.assertEqual(dict(message="Interface already exists"), jsonutils.loads(rv.data.decode('utf-8'))) mock_int_exists.return_value = False # No interface at all file_name = '/sys/bus/pci/rescan' m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) mock_open.assert_called_with(file_name, os.O_WRONLY) mock_fdopen.assert_called_with(123, 'w') m().write.assert_called_once_with('1') self.assertEqual(404, rv.status_code) self.assertEqual(dict(details="No suitable network interface found"), jsonutils.loads(rv.data.decode('utf-8'))) # No interface down m().reset_mock() with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) mock_open.assert_called_with(file_name, os.O_WRONLY) mock_fdopen.assert_called_with(123, 'w') m().write.assert_called_once_with('1') self.assertEqual(404, rv.status_code) self.assertEqual(dict(details="No suitable network interface found"), jsonutils.loads(rv.data.decode('utf-8'))) # One Interface down, Happy Path mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if self.conf.conf.amphora_agent.agent_server_network_file: file_name = self.conf.conf.amphora_agent.agent_server_network_file flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND elif distro == consts.UBUNTU: file_name = ('/etc/netns/{0}/network/interfaces.d/' 'eth{1}.cfg'.format(consts.AMPHORA_NAMESPACE, test_int_num)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC elif distro == consts.CENTOS: file_name = ('/etc/netns/{0}/sysconfig/network-scripts/' 'ifcfg-eth{1}'.format(consts.AMPHORA_NAMESPACE, test_int_num)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'auto eth{int}\n' 'iface eth{int} inet dhcp\n' 'auto eth{int}:0\n' 'iface eth{int}:0 inet6 auto\n'.format(int=test_int_num)) elif distro == consts.CENTOS: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\n' 'DEVICE="eth{int}"\n' 'ONBOOT="yes"\n' 'TYPE="Ethernet"\n' 'USERCTL="yes"\n' 'IPV6INIT="no"\n' 'BOOTPROTO="dhcp"\n' 'PERSISTENT_DHCLIENT="1"\n'.format(int=test_int_num)) mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', 'eth' + test_int_num], stderr=-2) # fixed IPs happy path port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [ {'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]} mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if self.conf.conf.amphora_agent.agent_server_network_file: file_name = self.conf.conf.amphora_agent.agent_server_network_file flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND elif distro == consts.UBUNTU: file_name = ('/etc/netns/{0}/network/interfaces.d/' 'eth{1}.cfg'.format(consts.AMPHORA_NAMESPACE, test_int_num)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC elif distro == consts.CENTOS: file_name = ('/etc/netns/{0}/sysconfig/network-scripts/' 'ifcfg-eth{1}'.format(consts.AMPHORA_NAMESPACE, test_int_num)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n\n# Generated by Octavia agent\n' 'auto eth{int}\n' 'iface eth{int} inet static\n' 'address 10.0.0.5\nbroadcast 10.0.0.255\n' 'netmask 255.255.255.0\n' 'mtu 1450\n' 'post-up /sbin/iptables -t nat -A POSTROUTING -p udp ' '-o eth{int} -j MASQUERADE\n' 'post-down /sbin/iptables -t nat -D POSTROUTING -p udp ' '-o eth{int} -j MASQUERADE\n'.format(int=test_int_num)) elif distro == consts.CENTOS: handle.write.assert_any_call( '\n\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\n' 'DEVICE="eth{int}"\n' 'ONBOOT="yes"\n' 'TYPE="Ethernet"\n' 'USERCTL="yes"\n' 'IPV6INIT="no"\n' 'MTU="1450"\n' 'BOOTPROTO="static"\n' 'IPADDR="10.0.0.5"\n' 'NETMASK="255.255.255.0"\n'.format(int=test_int_num)) mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', 'eth' + test_int_num], stderr=-2) # fixed IPs happy path IPv6 port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [ {'ip_address': '2001:db8::2', 'subnet_cidr': '2001:db8::/32'}]} mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if self.conf.conf.amphora_agent.agent_server_network_file: file_name = self.conf.conf.amphora_agent.agent_server_network_file flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND elif distro == consts.UBUNTU: file_name = ('/etc/netns/{0}/network/interfaces.d/' 'eth{1}.cfg'.format(consts.AMPHORA_NAMESPACE, test_int_num)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC elif distro == consts.CENTOS: file_name = ('/etc/netns/{0}/sysconfig/network-scripts/' 'ifcfg-eth{1}'.format(consts.AMPHORA_NAMESPACE, test_int_num)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n\n# Generated by Octavia agent\n' 'auto eth{int}\n' 'iface eth{int} inet6 static\n' 'address 2001:0db8:0000:0000:0000:0000:0000:0002\n' 'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n' 'netmask 32\nmtu 1450\n' 'post-up /sbin/ip6tables -t nat -A POSTROUTING -p udp ' '-o eth{int} -j MASQUERADE\n' 'post-down /sbin/ip6tables -t nat -D POSTROUTING -p udp ' '-o eth{int} -j MASQUERADE\n'.format(int=test_int_num)) elif distro == consts.CENTOS: handle.write.assert_any_call( '\n\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\nDEVICE="eth{int}"\n' 'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes"\n' 'IPV6INIT="yes"\nIPV6_MTU="1450"\n' 'IPV6_AUTOCONF="no"\n' 'IPV6ADDR="2001:0db8:0000:0000:0000:0000:' '0000:0002"\n'.format(int=test_int_num)) mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', 'eth' + test_int_num], stderr=-2) # fixed IPs, bogus IP port_info = {'mac_address': '123', 'fixed_ips': [ {'ip_address': '10005', 'subnet_cidr': '10.0.0.0/24'}]} flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH file_name = '/etc/netns/{0}/network/interfaces.d/eth{1}.cfg'.format( consts.AMPHORA_NAMESPACE, test_int_num) m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(400, rv.status_code) # same as above but ifup fails port_info = {'mac_address': '123', 'fixed_ips': [ {'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]} mock_check_output.side_effect = [subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR), subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR)] m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(500, rv.status_code) self.assertEqual( {'details': RANDOM_ERROR.decode('utf-8'), 'message': 'Error plugging network'}, jsonutils.loads(rv.data.decode('utf-8'))) # Bad port_info tests port_info = 'Bad data' if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(400, rv.status_code) port_info = {'fixed_ips': [{'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]} if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(400, rv.status_code) def test_ubuntu_plug_network_host_routes(self): self._test_plug_network_host_routes(consts.UBUNTU) self.conf.config(group="amphora_agent", agent_server_network_file="/path/to/interfaces_file") def test_centos_plug_network_host_routes(self): self._test_plug_network_host_routes(consts.CENTOS) @mock.patch('os.chmod') @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.NetNS', create=True) @mock.patch('subprocess.check_output') def _test_plug_network_host_routes(self, distro, mock_check_output, mock_netns, mock_pyroute2, mock_os_chmod): mock_ipr = mock.MagicMock() mock_ipr_instance = mock.MagicMock() mock_ipr_instance.link_lookup.return_value = [33] mock_ipr_instance.get_links.return_value = ({ 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) mock_ipr.__enter__.return_value = mock_ipr_instance mock_pyroute2.return_value = mock_ipr self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) SUBNET_CIDR = '192.0.2.0/24' BROADCAST = '192.0.2.255' NETMASK = '255.255.255.0' IP = '192.0.1.5' MAC = '123' DEST1 = '198.51.100.0/24' DEST2 = '203.0.113.1/32' NEXTHOP = '192.0.2.1' netns_handle = mock_netns.return_value.__enter__.return_value netns_handle.get_links.return_value = [{ 'attrs': [['IFLA_IFNAME', consts.NETNS_PRIMARY_INTERFACE]]}] port_info = {'mac_address': MAC, 'mtu': 1450, 'fixed_ips': [ {'ip_address': IP, 'subnet_cidr': SUBNET_CIDR, 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP}]}]} flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if distro == consts.UBUNTU: file_name = '/etc/netns/{0}/network/interfaces.d/{1}.cfg'.format( consts.AMPHORA_NAMESPACE, consts.NETNS_PRIMARY_INTERFACE) elif distro == consts.CENTOS: file_name = ('/etc/netns/{0}/sysconfig/network-scripts/' 'ifcfg-{1}'.format(consts.AMPHORA_NAMESPACE, consts.NETNS_PRIMARY_INTERFACE)) m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/network", content_type='application/json', data=jsonutils.dumps(port_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n\n# Generated by Octavia agent\n' 'auto ' + consts.NETNS_PRIMARY_INTERFACE + '\niface ' + consts.NETNS_PRIMARY_INTERFACE + ' inet static\n' + 'address ' + IP + '\nbroadcast ' + BROADCAST + '\n' + 'netmask ' + NETMASK + '\n' + 'mtu 1450\n' + 'up route add -net ' + DEST1 + ' gw ' + NEXTHOP + ' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n' 'down route del -net ' + DEST1 + ' gw ' + NEXTHOP + ' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n' 'up route add -host ' + DEST2 + ' gw ' + NEXTHOP + ' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n' 'down route del -host ' + DEST2 + ' gw ' + NEXTHOP + ' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n' + 'post-up /sbin/iptables -t nat -A POSTROUTING -p udp -o ' + consts.NETNS_PRIMARY_INTERFACE + ' -j MASQUERADE' + '\n' + 'post-down /sbin/iptables -t nat -D POSTROUTING -p udp ' '-o ' + consts.NETNS_PRIMARY_INTERFACE + ' -j MASQUERADE' + '\n') elif distro == consts.CENTOS: handle.write.assert_any_call( '\n\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\nDEVICE="{int}"\n' 'ONBOOT="yes"\nTYPE="Ethernet"\n' 'USERCTL="yes"\nIPV6INIT="no"\nMTU="1450"\n' 'BOOTPROTO="static"\nIPADDR="{ip}"\n' 'NETMASK="{mask}"\n'.format( int=consts.NETNS_PRIMARY_INTERFACE, ip=IP, mask=NETMASK)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mock_open.assert_any_call('/sbin/ifup-local', flags, mode) mock_open.assert_any_call('/sbin/ifdown-local', flags, mode) calls = [mock.call('/sbin/ifup-local', stat.S_IEXEC), mock.call('/sbin/ifdown-local', stat.S_IEXEC)] mock_os_chmod.assert_has_calls(calls) mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', consts.NETNS_PRIMARY_INTERFACE], stderr=-2) def test_ubuntu_plug_VIP4(self): self._test_plug_VIP4(consts.UBUNTU) self.conf.config(group="amphora_agent", agent_server_network_file="/path/to/interfaces_file") self._test_plug_VIP4(consts.UBUNTU) self._test_plug_VIP4(consts.CENTOS) self.conf.config(group="amphora_agent", agent_server_network_file="/path/to/interfaces_file") self._test_plug_VIP4(consts.CENTOS) @mock.patch('os.chmod') @mock.patch('shutil.copy2') @mock.patch('pyroute2.NSPopen', create=True) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'plug.Plug._netns_interface_exists') @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.netns.create', create=True) @mock.patch('pyroute2.NetNS', create=True) @mock.patch('subprocess.check_output') @mock.patch('shutil.copytree') @mock.patch('os.makedirs') @mock.patch('os.path.isfile') def _test_plug_VIP4(self, distro, mock_isfile, mock_makedirs, mock_copytree, mock_check_output, mock_netns, mock_netns_create, mock_pyroute2, mock_int_exists, mock_nspopen, mock_copy2, mock_os_chmod): mock_ipr = mock.MagicMock() mock_ipr_instance = mock.MagicMock() mock_ipr_instance.link_lookup.side_effect = [[], [], [33], [33], [33], [33], [33], [33]] mock_ipr_instance.get_links.return_value = ({ 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) mock_ipr.__enter__.return_value = mock_ipr_instance mock_pyroute2.return_value = mock_ipr mock_isfile.return_value = True self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) subnet_info = { 'subnet_cidr': '203.0.113.0/24', 'gateway': '203.0.113.1', 'mac_address': '123' } # malformed ip if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + '/plug/vip/error', data=jsonutils.dumps(subnet_info), content_type='application/json') elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + '/plug/vip/error', data=jsonutils.dumps(subnet_info), content_type='application/json') self.assertEqual(400, rv.status_code) # No subnet info if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + '/plug/vip/error') elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + '/plug/vip/error') self.assertEqual(400, rv.status_code) # Interface already plugged mock_int_exists.return_value = True if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) self.assertEqual(409, rv.status_code) self.assertEqual(dict(message="Interface already exists"), jsonutils.loads(rv.data.decode('utf-8'))) mock_int_exists.return_value = False # No interface at all file_name = '/sys/bus/pci/rescan' m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) mock_open.assert_called_with(file_name, os.O_WRONLY) mock_fdopen.assert_called_with(123, 'w') m().write.assert_called_once_with('1') self.assertEqual(404, rv.status_code) self.assertEqual(dict(details="No suitable network interface found"), jsonutils.loads(rv.data.decode('utf-8'))) # Two interfaces down m().reset_mock() with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) mock_open.assert_called_with(file_name, os.O_WRONLY) mock_fdopen.assert_called_with(123, 'w') m().write.assert_called_once_with('1') self.assertEqual(404, rv.status_code) self.assertEqual(dict(details="No suitable network interface found"), jsonutils.loads(rv.data.decode('utf-8'))) # Happy Path IPv4, with VRRP_IP and host route full_subnet_info = { 'subnet_cidr': '203.0.113.0/24', 'gateway': '203.0.113.1', 'mac_address': '123', 'vrrp_ip': '203.0.113.4', 'mtu': 1450, 'host_routes': [{'destination': '203.0.114.0/24', 'nexthop': '203.0.113.5'}, {'destination': '203.0.115.1/32', 'nexthop': '203.0.113.5'}] } mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if self.conf.conf.amphora_agent.agent_server_network_file: file_name = self.conf.conf.amphora_agent.agent_server_network_file flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND elif distro == consts.UBUNTU: file_name = ('/etc/netns/{netns}/network/interfaces.d/' '{netns_int}.cfg'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC elif distro == consts.CENTOS: file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/' 'ifcfg-{netns_int}'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps( full_subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps( full_subnet_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'auto {netns_int} {netns_int}:0\n' 'iface {netns_int} inet static\n' 'address 203.0.113.4\n' 'broadcast 203.0.113.255\n' 'netmask 255.255.255.0\n' 'gateway 203.0.113.1\n' 'mtu 1450\n' 'up route add -net 203.0.114.0/24 gw 203.0.113.5 ' 'dev {netns_int}\n' 'down route del -net 203.0.114.0/24 gw 203.0.113.5 ' 'dev {netns_int}\n' 'up route add -host 203.0.115.1/32 gw 203.0.113.5 ' 'dev {netns_int}\n' 'down route del -host 203.0.115.1/32 gw 203.0.113.5 ' 'dev {netns_int}\n' 'iface {netns_int}:0 inet static\n' 'address 203.0.113.2\n' 'broadcast 203.0.113.255\n' 'netmask 255.255.255.0\n\n' '# Add a source routing table to allow members to ' 'access the VIP\n\n' 'post-up /sbin/ip route add default via 203.0.113.1 ' 'dev eth1 onlink table 1\n' 'post-down /sbin/ip route del default via 203.0.113.1 ' 'dev eth1 onlink table 1\n\n\n' 'post-up /sbin/ip route add 203.0.113.0/24 ' 'dev eth1 src 203.0.113.2 scope link table 1\n' 'post-down /sbin/ip route del 203.0.113.0/24 ' 'dev eth1 src 203.0.113.2 scope link table 1\n' 'post-up /sbin/ip route add 203.0.114.0/24 ' 'via 203.0.113.5 dev eth1 onlink table 1\n' 'post-down /sbin/ip route del 203.0.114.0/24 ' 'via 203.0.113.5 dev eth1 onlink table 1\n' 'post-up /sbin/ip route add 203.0.115.1/32 ' 'via 203.0.113.5 dev eth1 onlink table 1\n' 'post-down /sbin/ip route del 203.0.115.1/32 ' 'via 203.0.113.5 dev eth1 onlink table 1\n\n\n' 'post-up /sbin/ip rule add from 203.0.113.2/32 table 1 ' 'priority 100\n' 'post-down /sbin/ip rule del from 203.0.113.2/32 table 1 ' 'priority 100\n\n' 'post-up /sbin/iptables -t nat -A POSTROUTING -p udp ' '-o eth1 -j MASQUERADE\n' 'post-down /sbin/iptables -t nat -D POSTROUTING -p udp ' '-o eth1 -j MASQUERADE'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) elif distro == consts.CENTOS: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n' 'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes" \n' 'BOOTPROTO="static"\nIPADDR="203.0.113.4"\n' 'NETMASK="255.255.255.0"\nGATEWAY="203.0.113.1"\n' 'MTU="1450" \n'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mock_open.assert_any_call('/sbin/ifup-local', flags, mode) mock_open.assert_any_call('/sbin/ifdown-local', flags, mode) calls = [mock.call('/sbin/ifup-local', stat.S_IEXEC), mock.call('/sbin/ifdown-local', stat.S_IEXEC)] mock_os_chmod.assert_has_calls(calls) mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', '{netns_int}:0'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2) # Verify sysctl was loaded calls = [mock.call('amphora-haproxy', ['/sbin/sysctl', '--system'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['modprobe', 'ip_vs'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv4.ip_forward=1'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv4.vs.conntrack=1'], stdout=subprocess.PIPE)] mock_nspopen.assert_has_calls(calls, any_order=True) # One Interface down, Happy Path IPv4 mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if self.conf.conf.amphora_agent.agent_server_network_file: file_name = self.conf.conf.amphora_agent.agent_server_network_file flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND elif distro == consts.UBUNTU: file_name = ('/etc/netns/{netns}/network/interfaces.d/' '{netns_int}.cfg'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC elif distro == consts.CENTOS: file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/' 'ifcfg-{netns_int}'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'auto {netns_int} {netns_int}:0\n\n' 'iface {netns_int} inet dhcp\n' 'iface {netns_int}:0 inet static\n' 'address 203.0.113.2\n' 'broadcast 203.0.113.255\n' 'netmask 255.255.255.0\n\n' '# Add a source routing table to allow members to ' 'access the VIP\n\n' 'post-up /sbin/ip route add default via 203.0.113.1 ' 'dev eth1 onlink table 1\n' 'post-down /sbin/ip route del default via 203.0.113.1 ' 'dev eth1 onlink table 1\n\n\n' 'post-up /sbin/ip route add 203.0.113.0/24 ' 'dev eth1 src 203.0.113.2 scope link table 1\n' 'post-down /sbin/ip route del 203.0.113.0/24 ' 'dev eth1 src 203.0.113.2 scope link table 1\n\n\n' 'post-up /sbin/ip rule add from 203.0.113.2/32 table 1 ' 'priority 100\n' 'post-down /sbin/ip rule del from 203.0.113.2/32 table 1 ' 'priority 100\n\n' 'post-up /sbin/iptables -t nat -A POSTROUTING -p udp ' '-o eth1 -j MASQUERADE\n' 'post-down /sbin/iptables -t nat -D POSTROUTING -p udp ' '-o eth1 -j MASQUERADE'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) elif distro == consts.CENTOS: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n' 'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes" \n' 'BOOTPROTO="dhcp"\nPERSISTENT_DHCLIENT="1" \n'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', '{netns_int}:0'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2) mock_check_output.side_effect = [ 'unplug1', subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR), subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR)] m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/203.0.113.2", content_type='application/json', data=jsonutils.dumps(subnet_info)) self.assertEqual(500, rv.status_code) self.assertEqual( {'details': RANDOM_ERROR.decode('utf-8'), 'message': 'Error plugging VIP'}, jsonutils.loads(rv.data.decode('utf-8'))) def test_ubuntu_plug_VIP6(self): self._test_plug_vip6(consts.UBUNTU) def test_centos_plug_VIP6(self): self._test_plug_vip6(consts.CENTOS) @mock.patch('os.chmod') @mock.patch('shutil.copy2') @mock.patch('pyroute2.NSPopen', create=True) @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.netns.create', create=True) @mock.patch('pyroute2.NetNS', create=True) @mock.patch('subprocess.check_output') @mock.patch('shutil.copytree') @mock.patch('os.makedirs') @mock.patch('os.path.isfile') def _test_plug_vip6(self, distro, mock_isfile, mock_makedirs, mock_copytree, mock_check_output, mock_netns, mock_netns_create, mock_pyroute2, mock_nspopen, mock_copy2, mock_os_chmod): mock_ipr = mock.MagicMock() mock_ipr_instance = mock.MagicMock() mock_ipr_instance.link_lookup.side_effect = [[], [], [33], [33], [33], [33], [33], [33]] mock_ipr_instance.get_links.return_value = ({ 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) mock_ipr.__enter__.return_value = mock_ipr_instance mock_pyroute2.return_value = mock_ipr mock_isfile.return_value = True self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) subnet_info = { 'subnet_cidr': '2001:db8::/32', 'gateway': '2001:db8::1', 'mac_address': '123' } # malformed ip if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + '/plug/vip/error', data=jsonutils.dumps( subnet_info), content_type='application/json') elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + '/plug/vip/error', data=jsonutils.dumps( subnet_info), content_type='application/json') self.assertEqual(400, rv.status_code) # No subnet info if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + '/plug/vip/error', data=jsonutils.dumps(subnet_info), content_type='application/json') elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + '/plug/vip/error', data=jsonutils.dumps(subnet_info), content_type='application/json') self.assertEqual(400, rv.status_code) # No interface at all file_name = '/sys/bus/pci/rescan' m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) mock_open.assert_called_with(file_name, os.O_WRONLY) mock_fdopen.assert_called_with(123, 'w') m().write.assert_called_once_with('1') self.assertEqual(404, rv.status_code) self.assertEqual(dict(details="No suitable network interface found"), jsonutils.loads(rv.data.decode('utf-8'))) # Two interfaces down m().reset_mock() with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) mock_open.assert_called_with(file_name, os.O_WRONLY) mock_fdopen.assert_called_with(123, 'w') m().write.assert_called_once_with('1') self.assertEqual(404, rv.status_code) self.assertEqual(dict(details="No suitable network interface found"), jsonutils.loads(rv.data.decode('utf-8'))) # Happy Path IPv6, with VRRP_IP and host route full_subnet_info = { 'subnet_cidr': '2001:db8::/32', 'gateway': '2001:db8::1', 'mac_address': '123', 'vrrp_ip': '2001:db8::4', 'mtu': 1450, 'host_routes': [{'destination': '2001:db9::/32', 'nexthop': '2001:db8::5'}, {'destination': '2001:db9::1/128', 'nexthop': '2001:db8::5'}] } flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if distro == consts.UBUNTU: file_name = ('/etc/netns/{netns}/network/interfaces.d/' '{netns_int}.cfg'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) elif distro == consts.CENTOS: file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/' 'ifcfg-{netns_int}'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps( full_subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps( full_subnet_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'auto {netns_int} {netns_int}:0\n' 'iface {netns_int} inet6 static\n' 'address 2001:db8::4\n' 'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n' 'netmask 32\n' 'gateway 2001:db8::1\n' 'mtu 1450\n' 'up route add -net 2001:db9::/32 gw 2001:db8::5 ' 'dev {netns_int}\n' 'down route del -net 2001:db9::/32 gw 2001:db8::5 ' 'dev {netns_int}\n' 'up route add -host 2001:db9::1/128 gw 2001:db8::5 ' 'dev {netns_int}\n' 'down route del -host 2001:db9::1/128 gw 2001:db8::5 ' 'dev {netns_int}\n' 'iface {netns_int}:0 inet6 static\n' 'address 2001:0db8:0000:0000:0000:0000:0000:0002\n' 'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n' 'netmask 32\n\n' '# Add a source routing table to allow members to access ' 'the VIP\n\n' 'post-up /sbin/ip -6 route add default via 2001:db8::1 ' 'dev eth1 onlink table 1\n' 'post-down /sbin/ip -6 route del default via 2001:db8::1 ' 'dev eth1 onlink table 1\n\n\n' 'post-up /sbin/ip -6 route add 2001:db8::/32 ' 'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 ' 'scope link table 1\n' 'post-down /sbin/ip -6 route del 2001:db8::/32 ' 'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 ' 'scope link table 1\n' 'post-up /sbin/ip -6 route add 2001:db9::/32 via ' '2001:db8::5 dev eth1 onlink table 1\n' 'post-down /sbin/ip -6 route del 2001:db9::/32 ' 'via 2001:db8::5 dev eth1 onlink table 1\n' 'post-up /sbin/ip -6 route add 2001:db9::1/128 via ' '2001:db8::5 dev eth1 onlink table 1\n' 'post-down /sbin/ip -6 route del 2001:db9::1/128 ' 'via 2001:db8::5 dev eth1 onlink table 1\n\n\n' 'post-up /sbin/ip -6 rule add from ' '2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 ' 'priority 100\n' 'post-down /sbin/ip -6 rule del from ' '2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 ' 'priority 100\n\n' 'post-up /sbin/ip6tables -t nat -A POSTROUTING -p udp ' '-o eth1 -j MASQUERADE\n' 'post-down /sbin/ip6tables -t nat -D POSTROUTING -p udp ' '-o eth1 -j MASQUERADE'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) elif distro == consts.CENTOS: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n' 'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes"\n' 'IPV6INIT="yes"\nIPV6_DEFROUTE="yes"\n' 'IPV6_AUTOCONF="no"\nIPV6ADDR="2001:db8::4/32"\n' 'IPV6_DEFAULTGW="2001:db8::1"\nIPV6_MTU="1450" \n' 'IPV6ADDR_SECONDARIES="2001:0db8:0000:0000:0000:0000:' '0000:0002/32"\n'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) if distro == consts.UBUNTU: mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', '{netns_int}:0'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2) elif distro == consts.CENTOS: mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', '{netns_int}'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2) # Verify sysctl was loaded calls = [mock.call('amphora-haproxy', ['/sbin/sysctl', '--system'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['modprobe', 'ip_vs'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv6.conf.all.forwarding=1'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv4.vs.conntrack=1'], stdout=subprocess.PIPE)] mock_nspopen.assert_has_calls(calls, any_order=True) # One Interface down, Happy Path IPv6 flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH if distro == consts.UBUNTU: file_name = ('/etc/netns/{netns}/network/interfaces.d/' '{netns_int}.cfg'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) elif distro == consts.CENTOS: file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/' 'ifcfg-{netns_int}'.format( netns=consts.AMPHORA_NAMESPACE, netns_int=consts.NETNS_PRIMARY_INTERFACE)) m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) self.assertEqual(202, rv.status_code) mock_open.assert_any_call(file_name, flags, mode) mock_fdopen.assert_any_call(123, 'w') plug_inf_file = '/var/lib/octavia/plugged_interfaces' flags = os.O_RDWR | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_any_call(plug_inf_file, flags, mode) mock_fdopen.assert_any_call(123, 'r+') handle = m() if distro == consts.UBUNTU: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'auto {netns_int} {netns_int}:0\n\n' 'iface {netns_int} inet6 auto\n' 'iface {netns_int}:0 inet6 static\n' 'address 2001:0db8:0000:0000:0000:0000:0000:0002\n' 'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n' 'netmask 32\n\n' '# Add a source routing table to allow members to access ' 'the VIP\n\n' 'post-up /sbin/ip -6 route add default via 2001:db8::1 ' 'dev eth1 onlink table 1\n' 'post-down /sbin/ip -6 route del default via 2001:db8::1 ' 'dev eth1 onlink table 1\n\n\n' 'post-up /sbin/ip -6 route add 2001:db8::/32 ' 'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 ' 'scope link table 1\n' 'post-down /sbin/ip -6 route del 2001:db8::/32 ' 'dev eth1 src 2001:0db8:0000:0000:0000:0000:0000:0002 ' 'scope link table 1\n\n\n' 'post-up /sbin/ip -6 rule add from ' '2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 ' 'priority 100\n' 'post-down /sbin/ip -6 rule del from ' '2001:0db8:0000:0000:0000:0000:0000:0002/128 table 1 ' 'priority 100\n\n' 'post-up /sbin/ip6tables -t nat -A POSTROUTING -p udp ' '-o eth1 -j MASQUERADE\n' 'post-down /sbin/ip6tables -t nat -D POSTROUTING -p udp ' '-o eth1 -j MASQUERADE'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) elif distro == consts.CENTOS: handle.write.assert_any_call( '\n# Generated by Octavia agent\n' 'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n' 'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes" \n' 'IPV6INIT="yes"\nIPV6_DEFROUTE="yes"\n' 'IPV6_AUTOCONF="yes" \n' 'IPV6ADDR_SECONDARIES="2001:0db8:0000:0000:0000:0000:' '0000:0002/32"\n'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)) if distro == consts.UBUNTU: mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', '{netns_int}:0'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2) elif distro == consts.CENTOS: mock_check_output.assert_called_with( ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, 'ifup', '{netns_int}'.format( netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2) mock_check_output.side_effect = [ 'unplug1', subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR), subprocess.CalledProcessError( 7, 'test', RANDOM_ERROR)] m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): if distro == consts.UBUNTU: rv = self.ubuntu_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) elif distro == consts.CENTOS: rv = self.centos_app.post('/' + api_server.VERSION + "/plug/vip/2001:db8::2", content_type='application/json', data=jsonutils.dumps(subnet_info)) self.assertEqual(500, rv.status_code) self.assertEqual( {'details': RANDOM_ERROR.decode('utf-8'), 'message': 'Error plugging VIP'}, jsonutils.loads(rv.data.decode('utf-8'))) def test_ubuntu_get_interface(self): self._test_get_interface(consts.UBUNTU) def test_centos_get_interface(self): self._test_get_interface(consts.CENTOS) @mock.patch('pyroute2.NetNS', create=True) def _test_get_interface(self, distro, mock_netns): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) netns_handle = mock_netns.return_value.__enter__.return_value interface_res = {'interface': 'eth0'} # Happy path netns_handle.get_addr.return_value = [{ 'index': 3, 'family': socket.AF_INET, 'attrs': [['IFA_ADDRESS', '203.0.113.2']]}] netns_handle.get_links.return_value = [{ 'attrs': [['IFLA_IFNAME', 'eth0']]}] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/interface/203.0.113.2', data=jsonutils.dumps(interface_res), content_type='application/json') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/interface/203.0.113.2', data=jsonutils.dumps(interface_res), content_type='application/json') self.assertEqual(200, rv.status_code) # Happy path with IPv6 address normalization netns_handle.get_addr.return_value = [{ 'index': 3, 'family': socket.AF_INET6, 'attrs': [['IFA_ADDRESS', '0000:0000:0000:0000:0000:0000:0000:0001']]}] netns_handle.get_links.return_value = [{ 'attrs': [['IFLA_IFNAME', 'eth0']]}] if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/interface/::1', data=jsonutils.dumps(interface_res), content_type='application/json') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/interface/::1', data=jsonutils.dumps(interface_res), content_type='application/json') self.assertEqual(200, rv.status_code) # Nonexistent interface if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/interface/10.0.0.1', data=jsonutils.dumps(interface_res), content_type='application/json') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/interface/10.0.0.1', data=jsonutils.dumps(interface_res), content_type='application/json') self.assertEqual(404, rv.status_code) # Invalid IP address if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/interface/00:00:00:00:00:00', data=jsonutils.dumps(interface_res), content_type='application/json') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/interface/00:00:00:00:00:00', data=jsonutils.dumps(interface_res), content_type='application/json') self.assertEqual(400, rv.status_code) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) def test_ubuntu_upload_keepalived_config_systemd(self, mock_init_system): with mock.patch('distro.id', return_value='ubuntu'): self._test_upload_keepalived_config( consts.INIT_SYSTEMD, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSTEMD) def test_centos_upload_keepalived_config_systemd(self, mock_init_system): with mock.patch('distro.id', return_value='centos'): self._test_upload_keepalived_config( consts.INIT_SYSTEMD, consts.CENTOS, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_UPSTART) def test_ubuntu_upload_keepalived_config_upstart(self, mock_init_system): self._test_upload_keepalived_config(consts.INIT_UPSTART, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system', return_value=consts.INIT_SYSVINIT) def test_ubuntu_upload_keepalived_config_sysvinit(self, mock_init_system): self._test_upload_keepalived_config(consts.INIT_SYSVINIT, consts.UBUNTU, mock_init_system) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'vrrp_check_script_update') @mock.patch('os.path.exists') @mock.patch('os.makedirs') @mock.patch('os.rename') @mock.patch('subprocess.check_output') @mock.patch('os.remove') def _test_upload_keepalived_config(self, init_system, distro, mock_init_system, mock_remove, mock_subprocess, mock_rename, mock_makedirs, mock_exists, mock_vrrp_check): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC mock_exists.return_value = True cfg_path = util.keepalived_cfg_path() m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH mock_open.assert_called_with(cfg_path, flags, mode) mock_fdopen.assert_called_with(123, 'wb') self.assertEqual(200, rv.status_code) mock_vrrp_check.assert_called_once_with(None, consts.AMP_ACTION_START) mock_exists.return_value = False mock_vrrp_check.reset_mock() script_path = util.keepalived_check_script_path() m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open with mock.patch('os.open') as mock_open, mock.patch.object( os, 'fdopen', m) as mock_fdopen: mock_open.return_value = 123 if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/upload', data='test') mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) mock_open.assert_called_with(script_path, flags, mode) mock_fdopen.assert_called_with(123, 'w') self.assertEqual(200, rv.status_code) mock_vrrp_check.assert_called_once_with(None, consts.AMP_ACTION_START) def test_ubuntu_manage_service_vrrp(self): self._test_manage_service_vrrp(consts.UBUNTU) def test_centos_manage_service_vrrp(self): self._test_manage_service_vrrp(consts.CENTOS) @mock.patch('subprocess.check_output') def _test_manage_service_vrrp(self, distro, mock_check_output): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start') self.assertEqual(202, rv.status_code) if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/restart') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/restart') self.assertEqual(400, rv.status_code) mock_check_output.side_effect = subprocess.CalledProcessError(1, 'blah!') if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start') self.assertEqual(500, rv.status_code) def test_ubuntu_details(self): self._test_details(consts.UBUNTU) def test_centos_details(self): self._test_details(consts.CENTOS) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_udp_listeners', return_value=[]) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo.' '_get_extend_body_from_udp_driver', return_value={ "keepalived_version": '1.1.11-1', "ipvsadm_version": '2.2.22-2' }) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo.' '_count_udp_listener_processes', return_value=0) @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' 'AmphoraInfo._count_haproxy_processes') @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' 'AmphoraInfo._get_networks') @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' 'AmphoraInfo._load') @mock.patch('os.statvfs') @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' 'AmphoraInfo._cpu') @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' 'AmphoraInfo._get_meminfo') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'util.get_listeners') @mock.patch('socket.gethostname') @mock.patch('subprocess.check_output') def _test_details(self, distro, mock_subbprocess, mock_hostname, mock_get_listeners, mock_get_mem, mock_cpu, mock_statvfs, mock_load, mock_get_nets, mock_count_haproxy, mock_count_udp_listeners, mock_get_ext_from_udp_driver, mock_get_udp_listeners): self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) listener_id = uuidutils.generate_uuid() mock_get_listeners.return_value = [listener_id] mock_hostname.side_effect = ['test-host'] mock_subbprocess.side_effect = ['9.9.99-9'] MemTotal = random.randrange(0, 1000) MemFree = random.randrange(0, 1000) Buffers = random.randrange(0, 1000) Cached = random.randrange(0, 1000) SwapCached = random.randrange(0, 1000) Shmem = random.randrange(0, 1000) Slab = random.randrange(0, 1000) memory_dict = {'CmaFree': 0, 'Mapped': 38244, 'CommitLimit': 508048, 'MemFree': MemFree, 'AnonPages': 92384, 'DirectMap2M': 997376, 'SwapTotal': 0, 'NFS_Unstable': 0, 'SReclaimable': 34168, 'Writeback': 0, 'PageTables': 3760, 'Shmem': Shmem, 'Hugepagesize': 2048, 'MemAvailable': 738356, 'HardwareCorrupted': 0, 'SwapCached': SwapCached, 'Dirty': 80, 'Active': 237060, 'VmallocUsed': 0, 'Inactive(anon)': 2752, 'Slab': Slab, 'Cached': Cached, 'Inactive(file)': 149588, 'SUnreclaim': 17796, 'Mlocked': 3656, 'AnonHugePages': 6144, 'SwapFree': 0, 'Active(file)': 145512, 'CmaTotal': 0, 'Unevictable': 3656, 'KernelStack': 2368, 'Inactive': 152340, 'MemTotal': MemTotal, 'Bounce': 0, 'Committed_AS': 401884, 'Active(anon)': 91548, 'VmallocTotal': 34359738367, 'VmallocChunk': 0, 'DirectMap4k': 51072, 'WritebackTmp': 0, 'Buffers': Buffers} mock_get_mem.return_value = memory_dict cpu_total = random.randrange(0, 1000) cpu_user = random.randrange(0, 1000) cpu_system = random.randrange(0, 1000) cpu_softirq = random.randrange(0, 1000) cpu_dict = {'idle': '7168848', 'system': cpu_system, 'total': cpu_total, 'softirq': cpu_softirq, 'nice': '31', 'iowait': '902', 'user': cpu_user, 'irq': '0'} mock_cpu.return_value = cpu_dict f_blocks = random.randrange(0, 1000) f_bfree = random.randrange(0, 1000) f_frsize = random.randrange(0, 1000) f_bavail = random.randrange(0, 1000) stats = mock.MagicMock() stats.f_blocks = f_blocks stats.f_bfree = f_bfree stats.f_frsize = f_frsize stats.f_bavail = f_bavail disk_used = (f_blocks - f_bfree) * f_frsize disk_available = f_bavail * f_frsize mock_statvfs.return_value = stats load_1min = random.randrange(0, 10) load_5min = random.randrange(0, 10) load_15min = random.randrange(0, 10) mock_load.return_value = [load_1min, load_5min, load_15min] eth1_rx = random.randrange(0, 1000) eth1_tx = random.randrange(0, 1000) eth2_rx = random.randrange(0, 1000) eth2_tx = random.randrange(0, 1000) eth3_rx = random.randrange(0, 1000) eth3_tx = random.randrange(0, 1000) net_dict = {'eth2': {'network_rx': eth2_rx, 'network_tx': eth2_tx}, 'eth1': {'network_rx': eth1_rx, 'network_tx': eth1_tx}, 'eth3': {'network_rx': eth3_rx, 'network_tx': eth3_tx}} mock_get_nets.return_value = net_dict haproxy_count = random.randrange(0, 100) mock_count_haproxy.return_value = haproxy_count expected_dict = {'active': True, 'api_version': '1.0', 'cpu': {'soft_irq': cpu_softirq, 'system': cpu_system, 'total': cpu_total, 'user': cpu_user}, 'disk': {'available': disk_available, 'used': disk_used}, 'haproxy_count': haproxy_count, 'haproxy_version': '9.9.99-9', 'hostname': 'test-host', 'ipvsadm_version': u'2.2.22-2', 'keepalived_version': u'1.1.11-1', 'listeners': [listener_id], 'load': [load_1min, load_5min, load_15min], 'memory': {'buffers': Buffers, 'cached': Cached, 'free': MemFree, 'shared': Shmem, 'slab': Slab, 'swap_used': SwapCached, 'total': MemTotal}, 'networks': {'eth1': {'network_rx': eth1_rx, 'network_tx': eth1_tx}, 'eth2': {'network_rx': eth2_rx, 'network_tx': eth2_tx}, 'eth3': {'network_rx': eth3_rx, 'network_tx': eth3_tx}}, 'packages': {}, 'topology': consts.TOPOLOGY_SINGLE, 'topology_status': consts.TOPOLOGY_STATUS_OK, 'udp_listener_process_count': 0} if distro == consts.UBUNTU: rv = self.ubuntu_app.get('/' + api_server.VERSION + '/details') elif distro == consts.CENTOS: rv = self.centos_app.get('/' + api_server.VERSION + '/details') self.assertEqual(200, rv.status_code) self.assertEqual(expected_dict, jsonutils.loads(rv.data.decode('utf-8'))) def test_ubuntu_upload_config(self): self._test_upload_config(consts.UBUNTU) def test_centos_upload_config(self): self._test_upload_config(consts.CENTOS) @mock.patch('oslo_config.cfg.CONF.mutate_config_files') def _test_upload_config(self, distro, mock_mutate): server.BUFFER = 5 # test the while loop m = self.useFixture( test_utils.OpenFixture(AMP_AGENT_CONF_PATH)).mock_open with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/config', data='TestTest') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/config', data='TestTest') self.assertEqual(202, rv.status_code) self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) handle = m() handle.write.assert_any_call(octavia_utils.b('TestT')) handle.write.assert_any_call(octavia_utils.b('est')) mock_mutate.assert_called_once_with() # Test the exception handling mock_mutate.side_effect = Exception('boom') if distro == consts.UBUNTU: rv = self.ubuntu_app.put('/' + api_server.VERSION + '/config', data='TestTest') elif distro == consts.CENTOS: rv = self.centos_app.put('/' + api_server.VERSION + '/config', data='TestTest') self.assertEqual(500, rv.status_code) def test_version_discovery(self): with mock.patch('distro.id', return_value='ubuntu'): self.test_client = server.Server().app.test_client() expected_dict = {'api_version': api_server.VERSION} rv = self.test_client.get('/') self.assertEqual(200, rv.status_code) self.assertEqual(expected_dict, jsonutils.loads(rv.data.decode('utf-8'))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/api/0000775000175000017500000000000000000000000020675 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/__init__.py0000664000175000017500000000107400000000000023010 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/api/drivers/0000775000175000017500000000000000000000000022353 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/drivers/__init__.py0000664000175000017500000000107400000000000024466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4062166 octavia-6.2.2/octavia/tests/functional/api/drivers/driver_agent/0000775000175000017500000000000000000000000025024 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/drivers/driver_agent/__init__.py0000664000175000017500000000107400000000000027137 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py0000664000175000017500000003413300000000000031112 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import multiprocessing from octavia_lib.api.drivers import driver_lib as octavia_driver_lib from octavia_lib.common import constants as lib_consts from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from stevedore import driver as stevedore_driver from octavia.api.drivers.driver_agent import driver_listener from octavia.common import config from octavia.common import constants from octavia.db import repositories from octavia.tests.common import sample_certs from octavia.tests.common import sample_data_models from octavia.tests.functional.db import base CONF = cfg.CONF class DriverAgentTest(base.OctaviaDBTestBase): def _process_cleanup(self): self.exit_event.set() self.status_listener_proc.join(5) self.stats_listener_proc.join(5) self.get_listener_proc.join(5) def setUp(self): status_socket_file = '/tmp/octavia-{}.status.sock'.format( uuidutils.generate_uuid()) stats_socket_file = '/tmp/octavia-{}.stats.sock'.format( uuidutils.generate_uuid()) get_socket_file = '/tmp/octavia-{}.get.sock'.format( uuidutils.generate_uuid()) sqlite_db_file = '/tmp/octavia-{}.sqlite.db'.format( uuidutils.generate_uuid()) sqlite_db_connection = 'sqlite:///{}'.format(sqlite_db_file) # Note that because the driver agent is a multi-process # agent we must use a sqlite file rather than an # in-memory instance. super(DriverAgentTest, self).setUp( connection_string=sqlite_db_connection) conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) conf.config(group="driver_agent", status_socket_path=status_socket_file) conf.config(group="driver_agent", stats_socket_path=stats_socket_file) conf.config(group="driver_agent", status_request_timeout=1) conf.config(group="driver_agent", get_socket_path=get_socket_file) conf.config(group="certificates", cert_manager='local_cert_manager') conf.config(group="certificates", storage_path='/tmp') # Set up the certificate cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', name=CONF.certificates.cert_manager, invoke_on_load=True, ).driver self.cert_ref = cert_manager.store_cert( None, sample_certs.X509_CERT, sample_certs.X509_CERT_KEY_ENCRYPTED, sample_certs.X509_IMDS, private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) self.addCleanup(cert_manager.delete_cert, None, self.cert_ref) self.exit_event = multiprocessing.Event() self.status_listener_proc = multiprocessing.Process( name='status_listener', target=driver_listener.status_listener, args=(self.exit_event,)) # TODO(johnsom) Remove once https://bugs.python.org/issue6721 # is resolved. self.status_listener_proc.daemon = True self.status_listener_proc.start() self.stats_listener_proc = multiprocessing.Process( name='stats_listener', target=driver_listener.stats_listener, args=(self.exit_event,)) # TODO(johnsom) Remove once https://bugs.python.org/issue6721 # is resolved. self.stats_listener_proc.daemon = True self.stats_listener_proc.start() self.get_listener_proc = multiprocessing.Process( name='get_listener', target=driver_listener.get_listener, args=(self.exit_event,)) # TODO(johnsom) Remove once https://bugs.python.org/issue6721 # is resolved. self.get_listener_proc.daemon = True self.get_listener_proc.start() self.addCleanup(self._process_cleanup) self.driver_lib = octavia_driver_lib.DriverLibrary( status_socket=status_socket_file, stats_socket=stats_socket_file, get_socket=get_socket_file) self.sample_data = sample_data_models.SampleDriverDataModels() self.repos = repositories.Repositories() # Create the full load balancer in the database self.tls_container_dict = { lib_consts.CERTIFICATE: sample_certs.X509_CERT.decode('utf-8'), lib_consts.ID: sample_certs.X509_CERT_SHA1, lib_consts.INTERMEDIATES: [ i.decode('utf-8') for i in sample_certs.X509_IMDS_LIST], lib_consts.PASSPHRASE: None, lib_consts.PRIMARY_CN: sample_certs.X509_CERT_CN, lib_consts.PRIVATE_KEY: sample_certs.X509_CERT_KEY.decode('utf-8')} # ### Create load balancer self.repos.flavor_profile.create( self.session, id=self.sample_data.flavor_profile_id, provider_name=constants.AMPHORA, flavor_data='{"something": "else"}') self.repos.flavor.create( self.session, id=self.sample_data.flavor_id, enabled=True, flavor_profile_id=self.sample_data.flavor_profile_id) self.repos.create_load_balancer_and_vip( self.session, self.sample_data.test_loadbalancer1_dict, self.sample_data.test_vip_dict) # ### Create Pool pool_dict = copy.deepcopy(self.sample_data.test_pool1_dict) pool_dict[constants.LOAD_BALANCER_ID] = self.sample_data.lb_id # Use a live certificate pool_dict[constants.TLS_CERTIFICATE_ID] = self.cert_ref pool_dict[constants.CA_TLS_CERTIFICATE_ID] = self.cert_ref pool_dict[constants.CRL_CONTAINER_ID] = self.cert_ref # Remove items that are linked in the DB del pool_dict[lib_consts.MEMBERS] del pool_dict[constants.HEALTH_MONITOR] del pool_dict[lib_consts.SESSION_PERSISTENCE] del pool_dict[lib_consts.LISTENERS] del pool_dict[lib_consts.L7POLICIES] self.repos.pool.create(self.session, **pool_dict) self.repos.session_persistence.create( self.session, pool_id=self.sample_data.pool1_id, type=lib_consts.SESSION_PERSISTENCE_SOURCE_IP) self.provider_pool_dict = copy.deepcopy( self.sample_data.provider_pool1_dict) self.provider_pool_dict[ constants.LISTENER_ID] = self.sample_data.listener1_id # Fix for render_unsets = True self.provider_pool_dict[ lib_consts.SESSION_PERSISTENCE][lib_consts.COOKIE_NAME] = None self.provider_pool_dict[lib_consts.SESSION_PERSISTENCE][ lib_consts.PERSISTENCE_GRANULARITY] = None self.provider_pool_dict[lib_consts.SESSION_PERSISTENCE][ lib_consts.PERSISTENCE_TIMEOUT] = None # Use a live certificate self.provider_pool_dict[ lib_consts.TLS_CONTAINER_DATA] = self.tls_container_dict self.provider_pool_dict[lib_consts.TLS_CONTAINER_REF] = self.cert_ref self.provider_pool_dict[ lib_consts.CA_TLS_CONTAINER_DATA] = ( sample_certs.X509_CERT.decode('utf-8')) self.provider_pool_dict[ lib_consts.CA_TLS_CONTAINER_REF] = self.cert_ref self.provider_pool_dict[ lib_consts.CRL_CONTAINER_DATA] = ( sample_certs.X509_CERT.decode('utf-8')) self.provider_pool_dict[lib_consts.CRL_CONTAINER_REF] = self.cert_ref # ### Create Member member_dict = copy.deepcopy(self.sample_data.test_member1_dict) self.repos.member.create(self.session, **member_dict) self.provider_pool_dict[lib_consts.MEMBERS] = [ self.sample_data.provider_member1_dict] # ### Create Health Monitor hm_dict = copy.deepcopy(self.sample_data.test_hm1_dict) self.repos.health_monitor.create(self.session, **hm_dict) self.provider_pool_dict[ lib_consts.HEALTHMONITOR] = self.sample_data.provider_hm1_dict # ### Create Listener listener_dict = copy.deepcopy(self.sample_data.test_listener1_dict) listener_dict[lib_consts.DEFAULT_POOL_ID] = self.sample_data.pool1_id # Remove items that are linked in the DB del listener_dict[lib_consts.L7POLICIES] del listener_dict[lib_consts.DEFAULT_POOL] del listener_dict[constants.SNI_CONTAINERS] # Use a live certificate listener_dict[constants.TLS_CERTIFICATE_ID] = self.cert_ref listener_dict[constants.CLIENT_CA_TLS_CERTIFICATE_ID] = self.cert_ref listener_dict[constants.CLIENT_CRL_CONTAINER_ID] = self.cert_ref self.repos.listener.create(self.session, **listener_dict) self.repos.sni.create(self.session, listener_id=self.sample_data.listener1_id, tls_container_id=self.cert_ref, position=1) # Add our live certs in that differ from the fake certs in sample_data self.provider_listener_dict = copy.deepcopy( self.sample_data.provider_listener1_dict) self.provider_listener_dict[ lib_consts.DEFAULT_TLS_CONTAINER_REF] = self.cert_ref self.provider_listener_dict[ lib_consts.DEFAULT_TLS_CONTAINER_DATA] = self.tls_container_dict self.provider_listener_dict[ lib_consts.CLIENT_CA_TLS_CONTAINER_REF] = self.cert_ref self.provider_listener_dict[ lib_consts.CLIENT_CA_TLS_CONTAINER_DATA] = ( sample_certs.X509_CERT.decode('utf-8')) self.provider_listener_dict[ lib_consts.CLIENT_CRL_CONTAINER_REF] = self.cert_ref self.provider_listener_dict[ lib_consts.CLIENT_CRL_CONTAINER_DATA] = ( sample_certs.X509_CERT.decode('utf-8')) self.provider_listener_dict[ lib_consts.SNI_CONTAINER_DATA] = [self.tls_container_dict] self.provider_listener_dict[ lib_consts.SNI_CONTAINER_REFS] = [self.cert_ref] self.provider_listener_dict[ lib_consts.DEFAULT_POOL] = self.provider_pool_dict self.provider_listener_dict[ lib_consts.DEFAULT_POOL_ID] = self.sample_data.pool1_id self.provider_listener_dict[lib_consts.L7POLICIES] = [ self.sample_data.provider_l7policy1_dict] # ### Create L7 Policy l7policy_dict = copy.deepcopy(self.sample_data.test_l7policy1_dict) del l7policy_dict[lib_consts.L7RULES] self.repos.l7policy.create(self.session, **l7policy_dict) # ### Create L7 Rules l7rule_dict = copy.deepcopy(self.sample_data.test_l7rule1_dict) self.repos.l7rule.create(self.session, **l7rule_dict) l7rule2_dict = copy.deepcopy(self.sample_data.test_l7rule2_dict) self.repos.l7rule.create(self.session, **l7rule2_dict) self.provider_lb_dict = copy.deepcopy( self.sample_data.provider_loadbalancer_tree_dict) self.provider_lb_dict[lib_consts.POOLS] = [self.provider_pool_dict] self.provider_lb_dict[ lib_consts.LISTENERS] = [self.provider_listener_dict] def test_get_loadbalancer(self): result = self.driver_lib.get_loadbalancer(self.sample_data.lb_id) self.assertEqual(self.provider_lb_dict, result.to_dict(render_unsets=True, recurse=True)) # Test non-existent load balancer result = self.driver_lib.get_loadbalancer('bogus') self.assertIsNone(result) def test_get_listener(self): result = self.driver_lib.get_listener(self.sample_data.listener1_id) # We need to recurse here to pick up the SNI data self.assertEqual(self.provider_listener_dict, result.to_dict(render_unsets=True, recurse=True)) # Test non-existent listener result = self.driver_lib.get_listener('bogus') self.assertIsNone(result) def test_get_pool(self): result = self.driver_lib.get_pool(self.sample_data.pool1_id) self.assertEqual(self.provider_pool_dict, result.to_dict(render_unsets=True, recurse=True)) # Test non-existent pool result = self.driver_lib.get_pool('bogus') self.assertIsNone(result) def test_get_member(self): result = self.driver_lib.get_member(self.sample_data.member1_id) self.assertEqual(self.sample_data.provider_member1_dict, result.to_dict(render_unsets=True)) # Test non-existent member result = self.driver_lib.get_member('bogus') self.assertIsNone(result) def test_get_healthmonitor(self): result = self.driver_lib.get_healthmonitor(self.sample_data.hm1_id) self.assertEqual(self.sample_data.provider_hm1_dict, result.to_dict(render_unsets=True)) # Test non-existent health monitor result = self.driver_lib.get_healthmonitor('bogus') self.assertIsNone(result) def test_get_l7policy(self): result = self.driver_lib.get_l7policy(self.sample_data.l7policy1_id) self.assertEqual(self.sample_data.provider_l7policy1_dict, result.to_dict(render_unsets=True, recurse=True)) # Test non-existent L7 policy result = self.driver_lib.get_l7policy('bogus') self.assertIsNone(result) def test_get_l7rule(self): result = self.driver_lib.get_l7rule(self.sample_data.l7rule1_id) self.assertEqual(self.sample_data.provider_l7rule1_dict, result.to_dict(render_unsets=True)) # Test non-existent L7 rule result = self.driver_lib.get_l7rule('bogus') self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/test_healthcheck.py0000664000175000017500000003033600000000000024556 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture import pecan from octavia.api import config as pconfig from octavia.api.healthcheck import healthcheck_plugins from octavia.tests.functional.db import base as base_db_test class TestHealthCheck(base_db_test.OctaviaDBTestBase): def setUp(self): super(TestHealthCheck, self).setUp() # We need to define these early as they are late loaded in oslo # middleware and our configuration overrides would not apply. # Note: These must match exactly the option definitions in # oslo.middleware healthcheck! If not you will get duplicate option # errors. healthcheck_opts = [ cfg.BoolOpt( 'detailed', default=False, help='Show more detailed information as part of the response. ' 'Security note: Enabling this option may expose ' 'sensitive details about the service being monitored. ' 'Be sure to verify that it will not violate your ' 'security policies.'), cfg.ListOpt( 'backends', default=[], help='Additional backends that can perform health checks and ' 'report that information back as part of a request.'), ] cfg.CONF.register_opts(healthcheck_opts, group='healthcheck') self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='healthcheck', backends=['octavia_db_check']) self.conf.config(group='api_settings', healthcheck_refresh_interval=5) self.UNAVAILABLE = (healthcheck_plugins.OctaviaDBHealthcheck. UNAVAILABLE_REASON) def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def _make_app(self): # Note: we need to set argv=() to stop the wsgi setup_app from # pulling in the testing tool sys.argv return pecan.testing.load_test_app({'app': pconfig.app, 'wsme': pconfig.wsme}, argv=()) def _get_enabled_app(self): self.conf.config(group='api_settings', healthcheck_enabled=True) return self._make_app() def _get_disabled_app(self): self.conf.config(group='api_settings', healthcheck_enabled=False) return self._make_app() def _get(self, app, path, params=None, headers=None, status=200, expect_errors=False): response = app.get(path, params=params, headers=headers, status=status, expect_errors=expect_errors) return response def _head(self, app, path, headers=None, status=204, expect_errors=False): response = app.head(path, headers=headers, status=status, expect_errors=expect_errors) return response def _post(self, app, path, body, headers=None, status=201, expect_errors=False): response = app.post_json(path, params=body, headers=headers, status=status, expect_errors=expect_errors) return response def _put(self, app, path, body, headers=None, status=200, expect_errors=False): response = app.put_json(path, params=body, headers=headers, status=status, expect_errors=expect_errors) return response def _delete(self, app, path, params=None, headers=None, status=204, expect_errors=False): response = app.delete(path, headers=headers, status=status, expect_errors=expect_errors) return response def test_healthcheck_get_text(self): self.conf.config(group='healthcheck', detailed=False) response = self._get(self._get_enabled_app(), '/healthcheck') self.assertEqual(200, response.status_code) self.assertEqual('OK', response.text) # Note: For whatever reason, detailed=True text has no additonal info def test_healthcheck_get_text_detailed(self): self.conf.config(group='healthcheck', detailed=True) response = self._get(self._get_enabled_app(), '/healthcheck') self.assertEqual(200, response.status_code) self.assertEqual('OK', response.text) def test_healthcheck_get_json(self): self.conf.config(group='healthcheck', detailed=False) response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'application/json'}) self.assertEqual(200, response.status_code) self.assertFalse(response.json['detailed']) self.assertEqual(['OK'], response.json['reasons']) def test_healthcheck_get_json_detailed(self): self.conf.config(group='healthcheck', detailed=True) response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'application/json'}) self.assertEqual(200, response.status_code) self.assertTrue(response.json['detailed']) self.assertEqual('OK', response.json['reasons'][0]['reason']) self.assertTrue(response.json['gc']) def test_healthcheck_get_html(self): self.conf.config(group='healthcheck', detailed=False) response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'text/html'}) self.assertEqual(200, response.status_code) self.assertIn('OK', response.text) def test_healthcheck_get_html_detailed(self): self.conf.config(group='healthcheck', detailed=True) response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'text/html'}) self.assertEqual(200, response.status_code) self.assertIn('OK', response.text) self.assertIn('Garbage collector', response.text) def test_healthcheck_get_text_cached(self): self.conf.config(group='healthcheck', detailed=False) app = self._get_enabled_app() for i in range(10): response = self._get(app, '/healthcheck') self.assertEqual(200, response.status_code) self.assertEqual('OK', response.text) def test_healthcheck_disabled_get(self): self._get(self._get_disabled_app(), '/healthcheck', status=404) def test_healthcheck_head(self): response = self._head(self._get_enabled_app(), '/healthcheck') self.assertEqual(204, response.status_code) def test_healthcheck_disabled_head(self): self._head(self._get_disabled_app(), '/healthcheck', status=404) # These should be denied by the API def test_healthcheck_post(self): self._post(self._get_enabled_app(), '/healthcheck', {'foo': 'bar'}, status=405) def test_healthcheck_put(self): self._put(self._get_enabled_app(), '/healthcheck', {'foo': 'bar'}, status=405) def test_healthcheck_delete(self): self._delete(self._get_enabled_app(), '/healthcheck', status=405) @mock.patch('octavia.db.api.get_session') def test_healthcheck_get_failed(self, mock_get_session): mock_session = mock.MagicMock() mock_session.execute.side_effect = [Exception('boom')] mock_get_session.return_value = mock_session response = self._get(self._get_enabled_app(), '/healthcheck', status=503) self.assertEqual(503, response.status_code) self.assertEqual(self.UNAVAILABLE, response.text) @mock.patch('octavia.db.api.get_session') def test_healthcheck_head_failed(self, mock_get_session): mock_session = mock.MagicMock() mock_session.execute.side_effect = [Exception('boom')] mock_get_session.return_value = mock_session response = self._head(self._get_enabled_app(), '/healthcheck', status=503) self.assertEqual(503, response.status_code) @mock.patch('octavia.db.healthcheck.check_database_connection', side_effect=Exception('boom')) def test_healthcheck_get_failed_check(self, mock_db_check): response = self._get(self._get_enabled_app(), '/healthcheck', status=503) self.assertEqual(503, response.status_code) self.assertEqual(self.UNAVAILABLE, response.text) @mock.patch('octavia.db.api.get_session') def test_healthcheck_get_json_failed(self, mock_get_session): self.conf.config(group='healthcheck', detailed=False) mock_session = mock.MagicMock() mock_session.execute.side_effect = [Exception('boom')] mock_get_session.return_value = mock_session response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'application/json'}, status=503) self.assertEqual(503, response.status_code) self.assertFalse(response.json['detailed']) self.assertEqual([self.UNAVAILABLE], response.json['reasons']) @mock.patch('octavia.db.api.get_session') def test_healthcheck_get_json_detailed_failed(self, mock_get_session): self.conf.config(group='healthcheck', detailed=True) mock_session = mock.MagicMock() mock_session.execute.side_effect = [Exception('boom')] mock_get_session.return_value = mock_session response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'application/json'}, status=503) self.assertEqual(503, response.status_code) self.assertTrue(response.json['detailed']) self.assertEqual(self.UNAVAILABLE, response.json['reasons'][0]['reason']) self.assertIn('boom', response.json['reasons'][0]['details']) @mock.patch('octavia.db.api.get_session') def test_healthcheck_get_html_failed(self, mock_get_session): self.conf.config(group='healthcheck', detailed=False) mock_session = mock.MagicMock() mock_session.execute.side_effect = [Exception('boom')] mock_get_session.return_value = mock_session response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'text/html'}, status=503) self.assertEqual(503, response.status_code) self.assertIn(self.UNAVAILABLE, response.text) @mock.patch('octavia.db.api.get_session') def test_healthcheck_get_html_detailed_failed(self, mock_get_session): self.conf.config(group='healthcheck', detailed=True) mock_session = mock.MagicMock() mock_session.execute.side_effect = [Exception('boom')] mock_get_session.return_value = mock_session response = self._get(self._get_enabled_app(), '/healthcheck', headers={'Accept': 'text/html'}, status=503) self.assertEqual(503, response.status_code) self.assertIn(self.UNAVAILABLE, response.text) self.assertIn('boom', response.text) self.assertIn('Garbage collector', response.text) # Note: For whatever reason, detailed=True text has no additonal info @mock.patch('octavia.db.api.get_session') def test_healthcheck_get_text_detailed_failed(self, mock_get_session): self.conf.config(group='healthcheck', detailed=True) mock_session = mock.MagicMock() mock_session.execute.side_effect = [Exception('boom')] mock_get_session.return_value = mock_session response = self._get(self._get_enabled_app(), '/healthcheck', status=503) self.assertEqual(503, response.status_code) self.assertEqual(self.UNAVAILABLE, response.text) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/test_root_controller.py0000664000175000017500000000620500000000000025537 0ustar00zuulzuul00000000000000# Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import fixture as oslo_fixture import pecan.testing from octavia.api import config as pconfig from octavia.common import constants from octavia.tests.functional.db import base as base_db_test class TestRootController(base_db_test.OctaviaDBTestBase): def get(self, app, path, params=None, headers=None, status=200, expect_errors=False): response = app.get( path, params=params, headers=headers, status=status, expect_errors=expect_errors) return response def setUp(self): super(TestRootController, self).setUp() self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) def _get_versions_with_config(self): # Note: we need to set argv=() to stop the wsgi setup_app from # pulling in the testing tool sys.argv app = pecan.testing.load_test_app({'app': pconfig.app, 'wsme': pconfig.wsme}, argv=()) return self.get(app=app, path='/').json.get('versions', None) def test_api_versions(self): versions = self._get_versions_with_config() version_ids = tuple(v.get('id') for v in versions) self.assertEqual(17, len(version_ids)) self.assertIn('v2.0', version_ids) self.assertIn('v2.1', version_ids) self.assertIn('v2.2', version_ids) self.assertIn('v2.3', version_ids) self.assertIn('v2.4', version_ids) self.assertIn('v2.5', version_ids) self.assertIn('v2.6', version_ids) self.assertIn('v2.7', version_ids) self.assertIn('v2.8', version_ids) self.assertIn('v2.9', version_ids) self.assertIn('v2.10', version_ids) self.assertIn('v2.11', version_ids) self.assertIn('v2.12', version_ids) self.assertIn('v2.13', version_ids) self.assertIn('v2.14', version_ids) self.assertIn('v2.15', version_ids) self.assertIn('v2.16', version_ids) # Each version should have a 'self' 'href' to the API version URL # [{u'rel': u'self', u'href': u'http://localhost/v2'}] # Validate that the URL exists in the response version_url = 'http://localhost/v2' for version in versions: links = version['links'] # Note, there may be other links present, this test is for 'self' version_link = [link for link in links if link['rel'] == 'self'] self.assertEqual(version_url, version_link[0]['href']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4102166 octavia-6.2.2/octavia/tests/functional/api/v2/0000775000175000017500000000000000000000000021224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/__init__.py0000664000175000017500000000107400000000000023337 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/base.py0000664000175000017500000006451100000000000022517 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils import pecan import pecan.testing from octavia.api import config as pconfig from octavia.common import constants from octavia.common import exceptions from octavia.db import api as db_api from octavia.db import repositories from octavia.tests.functional.db import base as base_db_test class BaseAPITest(base_db_test.OctaviaDBTestBase): BASE_PATH = '/v2' BASE_PATH_v2_0 = '/v2.0' # /lbaas/flavors FLAVORS_PATH = '/flavors' FLAVOR_PATH = FLAVORS_PATH + '/{flavor_id}' # /lbaas/flavorprofiles FPS_PATH = '/flavorprofiles' FP_PATH = FPS_PATH + '/{fp_id}' # /lbaas/availabilityzones AZS_PATH = '/availabilityzones' AZ_PATH = AZS_PATH + '/{az_name}' # /lbaas/availabilityzoneprofiles AZPS_PATH = '/availabilityzoneprofiles' AZP_PATH = AZPS_PATH + '/{azp_id}' # /lbaas/loadbalancers LBS_PATH = '/lbaas/loadbalancers' LB_PATH = LBS_PATH + '/{lb_id}' LB_STATUS_PATH = LB_PATH + '/statuses' LB_STATS_PATH = LB_PATH + '/stats' # /lbaas/listeners/ LISTENERS_PATH = '/lbaas/listeners' LISTENER_PATH = LISTENERS_PATH + '/{listener_id}' LISTENER_STATS_PATH = LISTENER_PATH + '/stats' # /lbaas/pools POOLS_PATH = '/lbaas/pools' POOL_PATH = POOLS_PATH + '/{pool_id}' # /lbaas/pools/{pool_id}/members MEMBERS_PATH = POOL_PATH + '/members' MEMBER_PATH = MEMBERS_PATH + '/{member_id}' # /lbaas/healthmonitors HMS_PATH = '/lbaas/healthmonitors' HM_PATH = HMS_PATH + '/{healthmonitor_id}' # /lbaas/l7policies L7POLICIES_PATH = '/lbaas/l7policies' L7POLICY_PATH = L7POLICIES_PATH + '/{l7policy_id}' L7RULES_PATH = L7POLICY_PATH + '/rules' L7RULE_PATH = L7RULES_PATH + '/{l7rule_id}' QUOTAS_PATH = '/lbaas/quotas' QUOTA_PATH = QUOTAS_PATH + '/{project_id}' QUOTA_DEFAULT_PATH = QUOTAS_PATH + '/{project_id}/default' AMPHORAE_PATH = '/octavia/amphorae' AMPHORA_PATH = AMPHORAE_PATH + '/{amphora_id}' AMPHORA_FAILOVER_PATH = AMPHORA_PATH + '/failover' AMPHORA_STATS_PATH = AMPHORA_PATH + '/stats' AMPHORA_CONFIG_PATH = AMPHORA_PATH + '/config' PROVIDERS_PATH = '/lbaas/providers' FLAVOR_CAPABILITIES_PATH = ( PROVIDERS_PATH + '/{provider}/flavor_capabilities') AVAILABILITY_ZONE_CAPABILITIES_PATH = ( PROVIDERS_PATH + '/{provider}/availability_zone_capabilities') NOT_AUTHORIZED_BODY = { 'debuginfo': None, 'faultcode': 'Client', 'faultstring': 'Policy does not allow this request to be performed.'} def setUp(self): super(BaseAPITest, self).setUp() self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group="controller_worker", network_driver='network_noop_driver') self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) self.conf.config(group='api_settings', default_provider_driver='noop_driver') # We still need to test with the "octavia" alias self.conf.config(group='api_settings', enabled_provider_drivers={ 'amphora': 'Amp driver.', 'noop_driver': 'NoOp driver.', 'noop_driver-alt': 'NoOp driver alt alisas.', 'octavia': 'Octavia driver.'}) self.lb_repo = repositories.LoadBalancerRepository() self.listener_repo = repositories.ListenerRepository() self.listener_stats_repo = repositories.ListenerStatisticsRepository() self.pool_repo = repositories.PoolRepository() self.member_repo = repositories.MemberRepository() self.l7policy_repo = repositories.L7PolicyRepository() self.l7rule_repo = repositories.L7RuleRepository() self.health_monitor_repo = repositories.HealthMonitorRepository() self.amphora_repo = repositories.AmphoraRepository() self.flavor_repo = repositories.FlavorRepository() self.flavor_profile_repo = repositories.FlavorProfileRepository() patcher2 = mock.patch('octavia.certificates.manager.barbican.' 'BarbicanCertManager') self.cert_manager_mock = patcher2.start() self.app = self._make_app() self.project_id = uuidutils.generate_uuid() def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def start_quota_mock(self, object_type): def mock_quota(session, lock_session, _class, project_id, count=1): return _class == object_type check_quota_met_true_mock = mock.patch( 'octavia.db.repositories.Repositories.check_quota_met', side_effect=mock_quota) check_quota_met_true_mock.start() self.addCleanup(check_quota_met_true_mock.stop) def _make_app(self): # Note: we need to set argv=() to stop the wsgi setup_app from # pulling in the testing tool sys.argv return pecan.testing.load_test_app({'app': pconfig.app, 'wsme': pconfig.wsme}, argv=()) def _get_full_path(self, path): return ''.join([self.BASE_PATH, path]) def _get_full_path_v2_0(self, path): return ''.join([self.BASE_PATH_v2_0, path]) def _build_body(self, json): return {self.root_tag: json} def delete(self, path, headers=None, params=None, status=204, expect_errors=False): headers = headers or {} params = params or {} full_path = self._get_full_path(path) param_string = "" for k, v in params.items(): param_string += "{key}={value}&".format(key=k, value=v) if param_string: full_path = "{path}?{params}".format( path=full_path, params=param_string.rstrip("&")) response = self.app.delete(full_path, headers=headers, status=status, expect_errors=expect_errors) return response def post(self, path, body, headers=None, status=201, expect_errors=False, use_v2_0=False): headers = headers or {} if use_v2_0: full_path = self._get_full_path_v2_0(path) else: full_path = self._get_full_path(path) response = self.app.post_json(full_path, params=body, headers=headers, status=status, expect_errors=expect_errors) return response def put(self, path, body, headers=None, status=200, expect_errors=False): headers = headers or {} full_path = self._get_full_path(path) response = self.app.put_json(full_path, params=body, headers=headers, status=status, expect_errors=expect_errors) return response def get(self, path, params=None, headers=None, status=200, expect_errors=False): full_path = self._get_full_path(path) response = self.app.get(full_path, params=params, headers=headers, status=status, expect_errors=expect_errors) return response def create_flavor(self, name, description, flavor_profile_id, enabled): req_dict = {'name': name, 'description': description, 'flavor_profile_id': flavor_profile_id, 'enabled': enabled} body = {'flavor': req_dict} response = self.post(self.FLAVORS_PATH, body) return response.json.get('flavor') def create_flavor_profile(self, name, provider_name, flavor_data): req_dict = {'name': name, 'provider_name': provider_name, constants.FLAVOR_DATA: flavor_data} body = {'flavorprofile': req_dict} response = self.post(self.FPS_PATH, body) return response.json.get('flavorprofile') def create_availability_zone(self, name, description, availability_zone_profile_id, enabled): req_dict = { 'name': name, 'description': description, 'availability_zone_profile_id': availability_zone_profile_id, 'enabled': enabled} body = {'availability_zone': req_dict} response = self.post(self.AZS_PATH, body) return response.json.get('availability_zone') def create_availability_zone_profile(self, name, provider_name, availability_zone_data): req_dict = {'name': name, 'provider_name': provider_name, constants.AVAILABILITY_ZONE_DATA: availability_zone_data} body = {'availability_zone_profile': req_dict} response = self.post(self.AZPS_PATH, body) return response.json.get('availability_zone_profile') def create_load_balancer(self, vip_subnet_id, **optionals): req_dict = {'vip_subnet_id': vip_subnet_id, 'project_id': self.project_id} req_dict.update(optionals) body = {'loadbalancer': req_dict} response = self.post(self.LBS_PATH, body) return response.json def create_listener(self, protocol, protocol_port, lb_id, status=None, **optionals): req_dict = {'protocol': protocol, 'protocol_port': protocol_port, 'loadbalancer_id': lb_id} req_dict.update(optionals) path = self.LISTENERS_PATH body = {'listener': req_dict} status = {'status': status} if status else {} response = self.post(path, body, **status) return response.json def create_listener_stats(self, listener_id, amphora_id): db_ls = self.listener_stats_repo.create( db_api.get_session(), listener_id=listener_id, amphora_id=amphora_id, bytes_in=0, bytes_out=0, active_connections=0, total_connections=0, request_errors=0) return db_ls.to_dict() def create_listener_stats_dynamic(self, listener_id, amphora_id, bytes_in=0, bytes_out=0, active_connections=0, total_connections=0, request_errors=0): db_ls = self.listener_stats_repo.create( db_api.get_session(), listener_id=listener_id, amphora_id=amphora_id, bytes_in=bytes_in, bytes_out=bytes_out, active_connections=active_connections, total_connections=total_connections, request_errors=request_errors) return db_ls.to_dict() def create_amphora(self, amphora_id, loadbalancer_id, **optionals): # We need to default these values in the request. opts = {'compute_id': uuidutils.generate_uuid(), 'status': constants.ACTIVE} opts.update(optionals) amphora = self.amphora_repo.create( self.session, id=amphora_id, load_balancer_id=loadbalancer_id, **opts) return amphora def get_listener(self, listener_id): path = self.LISTENER_PATH.format(listener_id=listener_id) response = self.get(path) return response.json def create_pool_with_listener(self, lb_id, listener_id, protocol, lb_algorithm, **optionals): req_dict = {'loadbalancer_id': lb_id, 'listener_id': listener_id, 'protocol': protocol, 'lb_algorithm': lb_algorithm} req_dict.update(optionals) body = {'pool': req_dict} path = self.POOLS_PATH response = self.post(path, body) return response.json def create_pool(self, lb_id, protocol, lb_algorithm, status=None, **optionals): req_dict = {'loadbalancer_id': lb_id, 'protocol': protocol, 'lb_algorithm': lb_algorithm} req_dict.update(optionals) body = {'pool': req_dict} path = self.POOLS_PATH status = {'status': status} if status else {} response = self.post(path, body, **status) return response.json def create_member(self, pool_id, address, protocol_port, status=None, **optionals): req_dict = {'address': address, 'protocol_port': protocol_port} req_dict.update(optionals) body = {'member': req_dict} path = self.MEMBERS_PATH.format(pool_id=pool_id) status = {'status': status} if status else {} response = self.post(path, body, **status) return response.json def create_health_monitor(self, pool_id, type, delay, timeout, max_retries_down, max_retries, status=None, **optionals): req_dict = {'pool_id': pool_id, 'type': type, 'delay': delay, 'timeout': timeout, 'max_retries_down': max_retries_down, 'max_retries': max_retries} req_dict.update(optionals) body = {'healthmonitor': req_dict} path = self.HMS_PATH status = {'status': status} if status else {} response = self.post(path, body, **status) return response.json def create_l7policy(self, listener_id, action, status=None, **optionals): req_dict = {'listener_id': listener_id, 'action': action} req_dict.update(optionals) body = {'l7policy': req_dict} path = self.L7POLICIES_PATH status = {'status': status} if status else {} response = self.post(path, body, **status) return response.json def create_l7rule(self, l7policy_id, type, compare_type, value, status=None, **optionals): req_dict = {'type': type, 'compare_type': compare_type, 'value': value} req_dict.update(optionals) body = {'rule': req_dict} path = self.L7RULES_PATH.format(l7policy_id=l7policy_id) status = {'status': status} if status else {} response = self.post(path, body, **status) return response.json def create_quota(self, project_id=-1, lb_quota=None, listener_quota=None, pool_quota=None, hm_quota=None, member_quota=None): if project_id == -1: project_id = self.project_id req_dict = {'load_balancer': lb_quota, 'listener': listener_quota, 'pool': pool_quota, 'health_monitor': hm_quota, 'member': member_quota} req_dict = {k: v for k, v in req_dict.items() if v is not None} body = {'quota': req_dict} path = self.QUOTA_PATH.format(project_id=project_id) response = self.put(path, body, status=202) return response.json # NOTE: This method should be used cautiously. On load balancers with a # significant amount of children resources, it will update the status for # each and every resource and thus taking a lot of DB time. def _set_lb_and_children_statuses(self, lb_id, prov_status, op_status, autodetect=True): self.set_object_status(self.lb_repo, lb_id, provisioning_status=prov_status, operating_status=op_status) lb_listeners, _ = self.listener_repo.get_all( db_api.get_session(), load_balancer_id=lb_id) for listener in lb_listeners: if autodetect and (listener.provisioning_status == constants.PENDING_DELETE): listener_prov = constants.DELETED else: listener_prov = prov_status self.set_object_status(self.listener_repo, listener.id, provisioning_status=listener_prov, operating_status=op_status) lb_l7policies, _ = self.l7policy_repo.get_all( db_api.get_session(), listener_id=listener.id) for l7policy in lb_l7policies: if autodetect and (l7policy.provisioning_status == constants.PENDING_DELETE): l7policy_prov = constants.DELETED else: l7policy_prov = prov_status self.set_object_status(self.l7policy_repo, l7policy.id, provisioning_status=l7policy_prov, operating_status=op_status) l7rules, _ = self.l7rule_repo.get_all( db_api.get_session(), l7policy_id=l7policy.id) for l7rule in l7rules: if autodetect and (l7rule.provisioning_status == constants.PENDING_DELETE): l7rule_prov = constants.DELETED else: l7rule_prov = prov_status self.set_object_status(self.l7rule_repo, l7rule.id, provisioning_status=l7rule_prov, operating_status=op_status) lb_pools, _ = self.pool_repo.get_all(db_api.get_session(), load_balancer_id=lb_id) for pool in lb_pools: if autodetect and (pool.provisioning_status == constants.PENDING_DELETE): pool_prov = constants.DELETED else: pool_prov = prov_status self.set_object_status(self.pool_repo, pool.id, provisioning_status=pool_prov, operating_status=op_status) for member in pool.members: if autodetect and (member.provisioning_status == constants.PENDING_DELETE): member_prov = constants.DELETED else: member_prov = prov_status self.set_object_status(self.member_repo, member.id, provisioning_status=member_prov, operating_status=op_status) if pool.health_monitor: if autodetect and (pool.health_monitor.provisioning_status == constants.PENDING_DELETE): hm_prov = constants.DELETED else: hm_prov = prov_status self.set_object_status(self.health_monitor_repo, pool.health_monitor.id, provisioning_status=hm_prov, operating_status=op_status) # NOTE: This method should be used cautiously. On load balancers with a # significant amount of children resources, it will update the status for # each and every resource and thus taking a lot of DB time. def set_lb_status(self, lb_id, status=None): explicit_status = True if status is not None else False if not explicit_status: status = constants.ACTIVE if status == constants.DELETED: op_status = constants.OFFLINE elif status == constants.ACTIVE: op_status = constants.ONLINE else: db_lb = self.lb_repo.get(db_api.get_session(), id=lb_id) op_status = db_lb.operating_status self._set_lb_and_children_statuses(lb_id, status, op_status, autodetect=not explicit_status) if status != constants.DELETED: return self.get(self.LB_PATH.format(lb_id=lb_id)).json @staticmethod def set_object_status(repo, id_, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE): repo.update(db_api.get_session(), id_, provisioning_status=provisioning_status, operating_status=operating_status) def assert_final_listener_statuses(self, lb_id, listener_id, delete=False): expected_prov_status = constants.ACTIVE expected_op_status = constants.ONLINE self.set_lb_status(lb_id, status=expected_prov_status) try: self.assert_correct_listener_status(expected_prov_status, expected_op_status, listener_id) except exceptions.NotFound: if not delete: raise def assert_correct_lb_status(self, lb_id, operating_status, provisioning_status): api_lb = self.get( self.LB_PATH.format(lb_id=lb_id)).json.get('loadbalancer') self.assertEqual(provisioning_status, api_lb.get('provisioning_status')) self.assertEqual(operating_status, api_lb.get('operating_status')) def assert_correct_listener_status(self, provisioning_status, operating_status, listener_id): api_listener = self.get(self.LISTENER_PATH.format( listener_id=listener_id)).json.get('listener') self.assertEqual(provisioning_status, api_listener.get('provisioning_status')) self.assertEqual(operating_status, api_listener.get('operating_status')) def assert_correct_pool_status(self, provisioning_status, operating_status, pool_id): api_pool = self.get(self.POOL_PATH.format( pool_id=pool_id)).json.get('pool') self.assertEqual(provisioning_status, api_pool.get('provisioning_status')) self.assertEqual(operating_status, api_pool.get('operating_status')) def assert_correct_member_status(self, provisioning_status, operating_status, pool_id, member_id): api_member = self.get(self.MEMBER_PATH.format( pool_id=pool_id, member_id=member_id)).json.get('member') self.assertEqual(provisioning_status, api_member.get('provisioning_status')) self.assertEqual(operating_status, api_member.get('operating_status')) def assert_correct_l7policy_status(self, provisioning_status, operating_status, l7policy_id): api_l7policy = self.get(self.L7POLICY_PATH.format( l7policy_id=l7policy_id)).json.get('l7policy') self.assertEqual(provisioning_status, api_l7policy.get('provisioning_status')) self.assertEqual(operating_status, api_l7policy.get('operating_status')) def assert_correct_l7rule_status(self, provisioning_status, operating_status, l7policy_id, l7rule_id): api_l7rule = self.get(self.L7RULE_PATH.format( l7policy_id=l7policy_id, l7rule_id=l7rule_id)).json.get('rule') self.assertEqual(provisioning_status, api_l7rule.get('provisioning_status')) self.assertEqual(operating_status, api_l7rule.get('operating_status')) def assert_correct_hm_status(self, provisioning_status, operating_status, hm_id): api_hm = self.get(self.HM_PATH.format( healthmonitor_id=hm_id)).json.get('healthmonitor') self.assertEqual(provisioning_status, api_hm.get('provisioning_status')) self.assertEqual(operating_status, api_hm.get('operating_status')) def assert_correct_status(self, lb_id=None, listener_id=None, pool_id=None, member_id=None, l7policy_id=None, l7rule_id=None, hm_id=None, lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.ACTIVE, member_prov_status=constants.ACTIVE, l7policy_prov_status=constants.ACTIVE, l7rule_prov_status=constants.ACTIVE, hm_prov_status=constants.ACTIVE, lb_op_status=constants.ONLINE, listener_op_status=constants.ONLINE, pool_op_status=constants.ONLINE, member_op_status=constants.ONLINE, l7policy_op_status=constants.ONLINE, l7rule_op_status=constants.ONLINE, hm_op_status=constants.ONLINE): if lb_id: self.assert_correct_lb_status(lb_id, lb_op_status, lb_prov_status) if listener_id: self.assert_correct_listener_status( listener_prov_status, listener_op_status, listener_id) if pool_id: self.assert_correct_pool_status( pool_prov_status, pool_op_status, pool_id) if member_id: self.assert_correct_member_status( member_prov_status, member_op_status, pool_id, member_id) if l7policy_id: self.assert_correct_l7policy_status( l7policy_prov_status, l7policy_op_status, l7policy_id) if l7rule_id: self.assert_correct_l7rule_status( l7rule_prov_status, l7rule_op_status, l7policy_id, l7rule_id) if hm_id: self.assert_correct_hm_status( hm_prov_status, hm_op_status, hm_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_amphora.py0000664000175000017500000006511400000000000024273 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.common import exceptions from octavia.tests.functional.api.v2 import base class TestAmphora(base.BaseAPITest): root_tag = 'amphora' root_tag_list = 'amphorae' root_tag_links = 'amphorae_links' root_tag_stats = 'amphora_stats' def setUp(self): super(TestAmphora, self).setUp() self.lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.lb_id = self.lb.get('id') self.project_id = self.lb.get('project_id') self.set_lb_status(self.lb_id) self.amp_args = { 'load_balancer_id': self.lb_id, 'compute_id': uuidutils.generate_uuid(), 'lb_network_ip': '192.168.1.2', 'vrrp_ip': '192.168.1.5', 'ha_ip': '192.168.1.10', 'vrrp_port_id': uuidutils.generate_uuid(), 'ha_port_id': uuidutils.generate_uuid(), 'cert_expiration': datetime.datetime.now(), 'cert_busy': False, 'role': constants.ROLE_STANDALONE, 'status': constants.AMPHORA_ALLOCATED, 'vrrp_interface': 'eth1', 'vrrp_id': 1, 'vrrp_priority': 100, 'cached_zone': None, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now(), 'image_id': uuidutils.generate_uuid(), 'compute_flavor': uuidutils.generate_uuid(), } self.amp = self.amphora_repo.create(self.session, **self.amp_args) self.amp_id = self.amp.id self.amp_args['id'] = self.amp_id self.listener1_id = uuidutils.generate_uuid() self.create_listener_stats_dynamic(self.listener1_id, self.amp_id, bytes_in=1, bytes_out=2, active_connections=3, total_connections=4, request_errors=5) self.listener2_id = uuidutils.generate_uuid() self.create_listener_stats_dynamic(self.listener2_id, self.amp_id, bytes_in=6, bytes_out=7, active_connections=8, total_connections=9, request_errors=10) self.listener1_amp_stats = {'active_connections': 3, 'bytes_in': 1, 'bytes_out': 2, 'id': self.amp_id, 'listener_id': self.listener1_id, 'loadbalancer_id': self.lb_id, 'request_errors': 5, 'total_connections': 4} self.listener2_amp_stats = {'active_connections': 8, 'bytes_in': 6, 'bytes_out': 7, 'id': self.amp_id, 'listener_id': self.listener2_id, 'loadbalancer_id': self.lb_id, 'request_errors': 10, 'total_connections': 9} self.ref_amp_stats = [self.listener1_amp_stats, self.listener2_amp_stats] def _create_additional_amp(self): amp_args = { 'load_balancer_id': None, 'compute_id': uuidutils.generate_uuid(), 'lb_network_ip': '192.168.1.2', 'vrrp_ip': '192.168.1.5', 'ha_ip': '192.168.1.10', 'vrrp_port_id': uuidutils.generate_uuid(), 'ha_port_id': uuidutils.generate_uuid(), 'cert_expiration': None, 'cert_busy': False, 'role': constants.ROLE_MASTER, 'status': constants.AMPHORA_READY, 'vrrp_interface': 'eth1', 'vrrp_id': 1, 'vrrp_priority': 100, } return self.amphora_repo.create(self.session, **amp_args) def _assert_amp_equal(self, source, response): self.assertEqual(source.pop('load_balancer_id'), response.pop('loadbalancer_id')) self.assertEqual(source.pop('cert_expiration').isoformat(), response.pop('cert_expiration')) self.assertEqual(source.pop('created_at').isoformat(), response.pop('created_at')) self.assertEqual(source.pop('updated_at').isoformat(), response.pop('updated_at')) self.assertEqual(source, response) def test_get(self): response = self.get(self.AMPHORA_PATH.format( amphora_id=self.amp_id)).json.get(self.root_tag) self._assert_amp_equal(self.amp_args, response) @mock.patch('oslo_messaging.RPCClient.cast') def test_failover(self, mock_cast): self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=self.amp_id), body={}, status=202) payload = {constants.AMPHORA_ID: self.amp_id} mock_cast.assert_called_with({}, 'failover_amphora', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_failover_spare(self, mock_cast): amp_args = { 'compute_id': uuidutils.generate_uuid(), 'status': constants.AMPHORA_READY, 'lb_network_ip': '192.168.1.2', 'cert_expiration': datetime.datetime.now(), 'cert_busy': False, 'cached_zone': 'zone1', 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now(), 'image_id': uuidutils.generate_uuid(), } amp = self.amphora_repo.create(self.session, **amp_args) self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=amp.id), body={}, status=202) payload = {constants.AMPHORA_ID: amp.id} mock_cast.assert_called_once_with({}, 'failover_amphora', **payload) def test_failover_deleted(self): new_amp = self._create_additional_amp() self.amphora_repo.update(self.session, new_amp.id, status=constants.DELETED) self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=new_amp.id), body={}, status=404) @mock.patch('oslo_messaging.RPCClient.cast') def test_failover_bad_amp_id(self, mock_cast): self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id='asdf'), body={}, status=404) self.assertFalse(mock_cast.called) def test_get_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.AMPHORA_PATH.format( amphora_id=self.amp_id)).json.get(self.root_tag) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) self._assert_amp_equal(self.amp_args, response) def test_get_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.AMPHORA_PATH.format( amphora_id=self.amp_id), status=403) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) @mock.patch('oslo_messaging.RPCClient.cast') def test_failover_authorized(self, mock_cast): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=self.amp_id), body={}, status=202) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) payload = {constants.AMPHORA_ID: self.amp_id} mock_cast.assert_called_once_with({}, 'failover_amphora', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_failover_not_authorized(self, mock_cast): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=self.amp_id), body={}, status=403) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) mock_cast.assert_not_called() def test_get_deleted_gives_404(self): new_amp = self._create_additional_amp() self.amphora_repo.update(self.session, new_amp.id, status=constants.DELETED) self.get(self.AMPHORA_PATH.format(amphora_id=new_amp.id), status=404) def test_bad_get(self): self.get(self.AMPHORA_PATH.format( amphora_id=uuidutils.generate_uuid()), status=404) def test_get_all(self): amps = self.get(self.AMPHORAE_PATH).json.get(self.root_tag_list) self.assertIsInstance(amps, list) self.assertEqual(1, len(amps)) self.assertEqual(self.amp_id, amps[0].get('id')) def test_get_all_authorized(self): auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): amps = self.get(self.AMPHORAE_PATH).json.get( self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertIsInstance(amps, list) self.assertEqual(1, len(amps)) self.assertEqual(self.amp_id, amps[0].get('id')) def test_get_all_not_authorized(self): auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): amps = self.get(self.AMPHORAE_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, amps) def test_get_all_hides_deleted(self): new_amp = self._create_additional_amp() response = self.get(self.AMPHORAE_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 2) self.amphora_repo.update(self.session, new_amp.id, status=constants.DELETED) response = self.get(self.AMPHORAE_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) def test_get_by_loadbalancer_id(self): amps = self.get( self.AMPHORAE_PATH, params={'loadbalancer_id': self.lb_id} ).json.get(self.root_tag_list) self.assertEqual(1, len(amps)) amps = self.get( self.AMPHORAE_PATH, params={'loadbalancer_id': uuidutils.generate_uuid()} ).json.get(self.root_tag_list) self.assertEqual(0, len(amps)) def test_get_by_project_id(self): amps = self.get( self.AMPHORAE_PATH, params={'project_id': self.project_id} ).json.get(self.root_tag_list) self.assertEqual(1, len(amps)) false_project_id = uuidutils.generate_uuid() amps = self.get( self.AMPHORAE_PATH, params={'project_id': false_project_id} ).json.get(self.root_tag_list) self.assertEqual(int(false_project_id == self.project_id), len(amps)) def test_get_all_sorted(self): self._create_additional_amp() response = self.get(self.AMPHORAE_PATH, params={'sort': 'role:desc'}) amps_desc = response.json.get(self.root_tag_list) response = self.get(self.AMPHORAE_PATH, params={'sort': 'role:asc'}) amps_asc = response.json.get(self.root_tag_list) self.assertEqual(2, len(amps_desc)) self.assertEqual(2, len(amps_asc)) amp_id_roles_desc = [(amp.get('id'), amp.get('role')) for amp in amps_desc] amp_id_roles_asc = [(amp.get('id'), amp.get('role')) for amp in amps_asc] self.assertEqual(amp_id_roles_asc, list(reversed(amp_id_roles_desc))) def test_get_all_limited(self): self._create_additional_amp() self._create_additional_amp() # First two -- should have 'next' link first_two = self.get(self.AMPHORAE_PATH, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.AMPHORAE_PATH, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.AMPHORAE_PATH, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): amps = self.get(self.AMPHORAE_PATH, params={ 'fields': ['id', 'role']}).json for amp in amps['amphorae']: self.assertIn(u'id', amp) self.assertIn(u'role', amp) self.assertNotIn(u'ha_port_id', amp) def test_get_one_fields_filter(self): amp = self.get( self.AMPHORA_PATH.format(amphora_id=self.amp_id), params={'fields': ['id', 'role']}).json.get(self.root_tag) self.assertIn(u'id', amp) self.assertIn(u'role', amp) self.assertNotIn(u'ha_port_id', amp) def test_get_all_filter(self): self._create_additional_amp() amps = self.get(self.AMPHORAE_PATH, params={ 'id': self.amp_id}).json.get(self.root_tag_list) self.assertEqual(1, len(amps)) self.assertEqual(self.amp_id, amps[0]['id']) def test_empty_get_all(self): self.amphora_repo.delete(self.session, id=self.amp_id) response = self.get(self.AMPHORAE_PATH).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(0, len(response)) def test_get_stats_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.AMPHORA_STATS_PATH.format( amphora_id=self.amp_id)).json.get(self.root_tag_stats) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.ref_amp_stats, response) def test_get_stats_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.AMPHORA_STATS_PATH.format( amphora_id=self.amp_id), status=403) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_stats_bad_amp_id(self): self.get(self.AMPHORA_STATS_PATH.format( amphora_id='bogus_id'), status=404) def test_get_stats_no_listeners(self): self.lb2 = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.lb2_id = self.lb2.get('id') self.set_lb_status(self.lb2_id) self.amp2_args = { 'load_balancer_id': self.lb2_id, 'compute_id': uuidutils.generate_uuid(), 'lb_network_ip': '192.168.1.20', 'vrrp_ip': '192.168.1.5', 'ha_ip': '192.168.1.100', 'vrrp_port_id': uuidutils.generate_uuid(), 'ha_port_id': uuidutils.generate_uuid(), 'cert_expiration': datetime.datetime.now(), 'cert_busy': False, 'role': constants.ROLE_STANDALONE, 'status': constants.AMPHORA_ALLOCATED, 'vrrp_interface': 'eth1', 'vrrp_id': 1, 'vrrp_priority': 100, 'cached_zone': None, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now(), 'image_id': uuidutils.generate_uuid(), } self.amp2 = self.amphora_repo.create(self.session, **self.amp2_args) self.amp2_id = self.amp2.id self.get(self.AMPHORA_STATS_PATH.format( amphora_id=self.amp2_id), status=404) @mock.patch('oslo_messaging.RPCClient.cast') def test_config(self, mock_cast): self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id=self.amp_id), body={}, status=202) payload = {constants.AMPHORA_ID: self.amp_id} mock_cast.assert_called_with({}, 'update_amphora_agent_config', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_config_deleted(self, mock_cast): new_amp = self._create_additional_amp() self.amphora_repo.update(self.session, new_amp.id, status=constants.DELETED) self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id=new_amp.id), body={}, status=404) self.assertFalse(mock_cast.called) @mock.patch('oslo_messaging.RPCClient.cast') def test_config_bad_amp_id(self, mock_cast): self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id='bogus'), body={}, status=404) self.assertFalse(mock_cast.called) @mock.patch('oslo_messaging.RPCClient.cast') def test_config_exception(self, mock_cast): mock_cast.side_effect = exceptions.OctaviaException('boom') self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id=self.amp_id), body={}, status=500) @mock.patch('oslo_messaging.RPCClient.cast') def test_config_spare_amp(self, mock_cast): amp_args = { 'compute_id': uuidutils.generate_uuid(), 'status': constants.AMPHORA_READY, 'lb_network_ip': '192.168.1.2', 'cert_expiration': datetime.datetime.now(), 'cert_busy': False, 'cached_zone': 'zone1', 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now(), 'image_id': uuidutils.generate_uuid(), } amp = self.amphora_repo.create(self.session, **amp_args) self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id=amp.id), body={}, status=202) payload = {constants.AMPHORA_ID: amp.id} mock_cast.assert_called_with({}, 'update_amphora_agent_config', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_config_authorized(self, mock_cast): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id=self.amp_id), body={}, status=202) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) payload = {constants.AMPHORA_ID: self.amp_id} mock_cast.assert_called_with({}, 'update_amphora_agent_config', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_config_not_authorized(self, mock_cast): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): self.put(self.AMPHORA_CONFIG_PATH.format( amphora_id=self.amp_id), body={}, status=403) # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertFalse(mock_cast.called) def test_bogus_path(self): self.put(self.AMPHORA_PATH.format(amphora_id=self.amp_id) + '/bogus', body={}, status=405) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_availability_zone_profiles.py0000664000175000017500000007172100000000000030255 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_db import exception as odb_exceptions from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.tests.functional.api.v2 import base class TestAvailabilityZoneProfiles(base.BaseAPITest): root_tag = 'availability_zone_profile' root_tag_list = 'availability_zone_profiles' root_tag_links = 'availability_zone_profile_links' def _assert_request_matches_response(self, req, resp, **optionals): self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) self.assertEqual(req.get('name'), resp.get('name')) self.assertEqual(req.get(constants.PROVIDER_NAME), resp.get(constants.PROVIDER_NAME)) self.assertEqual(req.get(constants.AVAILABILITY_ZONE_DATA), resp.get(constants.AVAILABILITY_ZONE_DATA)) def test_empty_list(self): response = self.get(self.AZPS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual([], api_list) def test_create(self): az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(az_json) response = self.post(self.AZPS_PATH, body) api_azp = response.json.get(self.root_tag) self._assert_request_matches_response(az_json, api_azp) def test_create_with_missing_name(self): az_json = {constants.PROVIDER_NAME: 'pr1', constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'} body = self._build_body(az_json) response = self.post(self.AZPS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute name. Value: " "'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_missing_provider(self): az_json = {'name': 'xyz', constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'} body = self._build_body(az_json) response = self.post(self.AZPS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute provider_name. " "Value: 'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_missing_availability_zone_data(self): az_json = {'name': 'xyz', constants.PROVIDER_NAME: 'pr1'} body = self._build_body(az_json) response = self.post(self.AZPS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute availability_zone_data. " "Value: 'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_empty_availability_zone_data(self): az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.AVAILABILITY_ZONE_DATA: '{}'} body = self._build_body(az_json) response = self.post(self.AZPS_PATH, body) api_azp = response.json.get(self.root_tag) self._assert_request_matches_response(az_json, api_azp) def test_create_with_long_name(self): az_json = {'name': 'n' * 256, constants.PROVIDER_NAME: 'test1', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(az_json) self.post(self.AZPS_PATH, body, status=400) def test_create_with_long_provider(self): az_json = {'name': 'name1', constants.PROVIDER_NAME: 'n' * 256, constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(az_json) self.post(self.AZPS_PATH, body, status=400) def test_create_with_long_availability_zone_data(self): az_json = {'name': 'name1', constants.PROVIDER_NAME: 'amp', constants.AVAILABILITY_ZONE_DATA: 'n' * 4097} body = self._build_body(az_json) self.post(self.AZPS_PATH, body, status=400) def test_create_authorized(self): az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(az_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.post(self.AZPS_PATH, body) self.conf.config(group='api_settings', auth_strategy=auth_strategy) api_azp = response.json.get(self.root_tag) self._assert_request_matches_response(az_json, api_azp) def test_create_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) az_json = {'name': 'name', constants.PROVIDER_NAME: 'xyz', constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'} body = self._build_body(az_json) response = self.post(self.AZPS_PATH, body, status=403) api_azp = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_azp) def test_create_db_failure(self): az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(az_json) with mock.patch( "octavia.db.repositories.AvailabilityZoneProfileRepository." "create") as mock_create: mock_create.side_effect = Exception self.post(self.AZPS_PATH, body, status=500) mock_create.side_effect = odb_exceptions.DBDuplicateEntry self.post(self.AZPS_PATH, body, status=409) def test_create_with_invalid_json(self): az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.AVAILABILITY_ZONE_DATA: '{hello: "world"}'} body = self._build_body(az_json) self.post(self.AZPS_PATH, body, status=400) def test_get(self): azp = self.create_availability_zone_profile( 'name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) response = self.get( self.AZP_PATH.format( azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('name', response.get('name')) self.assertEqual(azp.get('id'), response.get('id')) def test_get_one_deleted_id(self): response = self.get(self.AZP_PATH.format(azp_id=constants.NIL_UUID), status=404) self.assertEqual('Availability Zone Profile {} not found.'.format( constants.NIL_UUID), response.json.get('faultstring')) def test_get_one_fields_filter(self): azp = self.create_availability_zone_profile( 'name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id')), params={ 'fields': ['id', constants.PROVIDER_NAME]} ).json.get(self.root_tag) self.assertEqual(azp.get('id'), response.get('id')) self.assertIn(u'id', response) self.assertIn(constants.PROVIDER_NAME, response) self.assertNotIn(u'name', response) self.assertNotIn(constants.AVAILABILITY_ZONE_DATA, response) def test_get_authorized(self): azp = self.create_availability_zone_profile( 'name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get( self.AZP_PATH.format( azp_id=azp.get('id'))).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual('name', response.get('name')) self.assertEqual(azp.get('id'), response.get('id')) def test_get_not_authorized(self): azp = self.create_availability_zone_profile( 'name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) self.get(self.AZP_PATH.format(azp_id=azp.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_get_all(self): fp1 = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') ref_fp_1 = {u'availability_zone_data': u'{"compute_zone": "my_az_1"}', u'id': fp1.get('id'), u'name': u'test1', constants.PROVIDER_NAME: u'noop_driver'} self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_availability_zone_profile( 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') ref_fp_2 = {u'availability_zone_data': u'{"compute_zone": "my_az_1"}', u'id': fp2.get('id'), u'name': u'test2', constants.PROVIDER_NAME: u'noop_driver-alt'} self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) response = self.get(self.AZPS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) self.assertIn(ref_fp_1, api_list) self.assertIn(ref_fp_2, api_list) def test_get_all_fields_filter(self): fp1 = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_availability_zone_profile( 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) response = self.get(self.AZPS_PATH, params={ 'fields': ['id', 'name']}) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) for profile in api_list: self.assertIn(u'id', profile) self.assertIn(u'name', profile) self.assertNotIn(constants.PROVIDER_NAME, profile) self.assertNotIn(constants.AVAILABILITY_ZONE_DATA, profile) def test_get_all_authorized(self): fp1 = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_availability_zone_profile( 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.AZPS_PATH) self.conf.config(group='api_settings', auth_strategy=auth_strategy) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) def test_get_all_not_authorized(self): fp1 = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_availability_zone_profile( 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) self.get(self.AZPS_PATH, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_update(self): azp = self.create_availability_zone_profile( 'test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(update_data) self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver-alt', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"hello": "world"}', response.get(constants.AVAILABILITY_ZONE_DATA)) def test_update_deleted_id(self): update_data = {'name': 'fake_profile'} body = self._build_body(update_data) response = self.put(self.AZP_PATH.format(azp_id=constants.NIL_UUID), body, status=404) self.assertEqual('Availability Zone Profile {} not found.'.format( constants.NIL_UUID), response.json.get('faultstring')) def test_update_nothing(self): azp = self.create_availability_zone_profile( 'test_profile', 'noop_driver', '{"x": "y"}') body = self._build_body({}) self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.AVAILABILITY_ZONE_DATA)) def test_update_name_none(self): self._test_update_param_none(constants.NAME) def test_update_provider_name_none(self): self._test_update_param_none(constants.PROVIDER_NAME) def test_update_availability_zone_data_none(self): self._test_update_param_none(constants.AVAILABILITY_ZONE_DATA) def _test_update_param_none(self, param_name): azp = self.create_availability_zone_profile( 'test_profile', 'noop_driver', '{"x": "y"}') expect_error_msg = ("None is not a valid option for %s" % param_name) body = self._build_body({param_name: None}) response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body, status=400) self.assertEqual(expect_error_msg, response.json['faultstring']) def test_update_no_availability_zone_data(self): azp = self.create_availability_zone_profile( 'test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt'} body = self._build_body(update_data) response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver-alt', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.AVAILABILITY_ZONE_DATA)) def test_update_authorized(self): azp = self.create_availability_zone_profile( 'test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(update_data) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver-alt', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"hello": "world"}', response.get(constants.AVAILABILITY_ZONE_DATA)) def test_update_not_authorized(self): azp = self.create_availability_zone_profile( 'test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'amp', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(update_data) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.AVAILABILITY_ZONE_DATA)) def test_update_in_use(self): azp = self.create_availability_zone_profile( 'test_profile', 'noop_driver', '{"x": "y"}') self.create_availability_zone( 'name1', 'description', azp.get('id'), True) # Test updating provider while in use is not allowed update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt'} body = self._build_body(update_data) response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body, status=409) err_msg = ("Availability Zone Profile {} is in use and cannot be " "modified.".format(azp.get('id'))) self.assertEqual(err_msg, response.json.get('faultstring')) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.AVAILABILITY_ZONE_DATA)) # Test updating availability zone data while in use is not allowed update_data = {'name': 'the_profile', constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} body = self._build_body(update_data) response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body, status=409) err_msg = ("Availability Zone Profile {} is in use and cannot be " "modified.".format(azp.get('id'))) self.assertEqual(err_msg, response.json.get('faultstring')) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.AVAILABILITY_ZONE_DATA)) # Test that you can still update the name when in use update_data = {'name': 'the_profile'} body = self._build_body(update_data) response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.AVAILABILITY_ZONE_DATA)) def test_delete(self): azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) self.delete(self.AZP_PATH.format(azp_id=azp.get('id'))) response = self.get(self.AZP_PATH.format( azp_id=azp.get('id')), status=404) err_msg = "Availability Zone Profile %s not found." % azp.get('id') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_deleted_id(self): response = self.delete(self.AZP_PATH.format(azp_id=constants.NIL_UUID), status=404) self.assertEqual('Availability Zone Profile {} not found.'.format( constants.NIL_UUID), response.json.get('faultstring')) def test_delete_nonexistent_id(self): response = self.delete(self.AZP_PATH.format(azp_id='bogus_id'), status=404) self.assertEqual('Availability Zone Profile bogus_id not found.', response.json.get('faultstring')) def test_delete_authorized(self): azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(self.AZP_PATH.format(azp_id=azp.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(self.AZP_PATH.format( azp_id=azp.get('id')), status=404) err_msg = "Availability Zone Profile %s not found." % azp.get('id') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_not_authorized(self): azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.delete(self.AZP_PATH.format( azp_id=azp.get('id')), status=403) api_azp = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_azp) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('test1', response.get('name')) def test_delete_in_use(self): azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') self.create_availability_zone( 'name1', 'description', azp.get('id'), True) response = self.delete(self.AZP_PATH.format(azp_id=azp.get('id')), status=409) err_msg = ("Availability Zone Profile {} is in use and cannot be " "modified.".format(azp.get('id'))) self.assertEqual(err_msg, response.json.get('faultstring')) response = self.get( self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) self.assertEqual('test1', response.get('name')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_availability_zones.py0000664000175000017500000006551200000000000026536 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.common import constants import octavia.common.context from octavia.common import exceptions from octavia.tests.functional.api.v2 import base class TestAvailabilityZones(base.BaseAPITest): root_tag = 'availability_zone' root_tag_list = 'availability_zones' root_tag_links = 'availability_zones_links' def setUp(self): super(TestAvailabilityZones, self).setUp() self.azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') def _assert_request_matches_response(self, req, resp, **optionals): self.assertNotIn('id', resp) # AZs do not expose an ID req_description = req.get('description') self.assertEqual(req.get('name'), resp.get('name')) if not req_description: self.assertEqual('', resp.get('description')) else: self.assertEqual(req.get('description'), resp.get('description')) self.assertEqual(req.get('availability_zone_profile_id'), resp.get('availability_zone_profile_id')) self.assertEqual(req.get('enabled', True), resp.get('enabled')) def test_empty_list(self): response = self.get(self.AZS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual([], api_list) def test_create(self): az_json = {'name': 'test1', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body) api_az = response.json.get(self.root_tag) self._assert_request_matches_response(az_json, api_az) def test_create_with_missing_name(self): az_json = {'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute name. Value: " "'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_long_name(self): az_json = {'name': 'n' * 256, 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) self.post(self.AZS_PATH, body, status=400) def test_create_with_long_description(self): az_json = {'name': 'test-az', 'description': 'n' * 256, 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) self.post(self.AZS_PATH, body, status=400) def test_create_with_missing_availability_zone_profile(self): az_json = {'name': 'xyz'} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body, status=400) err_msg = ( "Invalid input for field/attribute availability_zone_profile_id. " "Value: 'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_bad_availability_zone_profile(self): az_json = {'name': 'xyz', 'availability_zone_profile_id': 'bogus'} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body, status=400) err_msg = ( "Invalid input for field/attribute availability_zone_profile_id. " "Value: 'bogus'. Value should be UUID format") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_duplicate_names(self): self.create_availability_zone( 'name', 'description', self.azp.get('id'), True) az_json = {'name': 'name', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body, status=409) err_msg = "A availability zone of name already exists." self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_authorized(self): az_json = {'name': 'test1', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.post(self.AZS_PATH, body) api_az = response.json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self._assert_request_matches_response(az_json, api_az) def test_create_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) az_json = {'name': 'name', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body, status=403) api_az = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_az) def test_create_db_failure(self): az_json = {'name': 'test1', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) with mock.patch("octavia.db.repositories.AvailabilityZoneRepository." "create") as mock_create: mock_create.side_effect = Exception self.post(self.AZS_PATH, body, status=500) def test_get(self): az = self.create_availability_zone( 'name', 'description', self.azp.get('id'), True) response = self.get( self.AZ_PATH.format( az_name=az.get('name'))).json.get(self.root_tag) self.assertEqual('name', response.get('name')) self.assertEqual('description', response.get('description')) self.assertEqual(az.get('name'), response.get('name')) self.assertEqual(self.azp.get('id'), response.get('availability_zone_profile_id')) self.assertTrue(response.get('enabled')) def test_get_one_fields_filter(self): az = self.create_availability_zone( 'name', 'description', self.azp.get('id'), True) response = self.get( self.AZ_PATH.format(az_name=az.get('name')), params={ 'fields': ['name', 'availability_zone_profile_id']} ).json.get(self.root_tag) self.assertEqual(az.get('name'), response.get('name')) self.assertEqual(self.azp.get('id'), response.get('availability_zone_profile_id')) self.assertIn(u'availability_zone_profile_id', response) self.assertNotIn(u'description', response) self.assertNotIn(u'enabled', response) def test_get_one_deleted_name(self): response = self.get( self.AZ_PATH.format(az_name=constants.NIL_UUID), status=404) self.assertEqual( 'Availability Zone {} not found.'.format(constants.NIL_UUID), response.json.get('faultstring')) def test_get_authorized(self): az = self.create_availability_zone( 'name', 'description', self.azp.get('id'), True) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get( self.AZ_PATH.format( az_name=az.get('name'))).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual('name', response.get('name')) self.assertEqual('description', response.get('description')) self.assertEqual(self.azp.get('id'), response.get('availability_zone_profile_id')) self.assertTrue(response.get('enabled')) def test_get_not_authorized(self): az = self.create_availability_zone( 'name', 'description', self.azp.get('id'), True) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.get(self.AZ_PATH.format( az_name=az.get('name')), status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response) def test_get_all(self): self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) ref_az_1 = { u'description': u'description', u'enabled': True, u'availability_zone_profile_id': self.azp.get('id'), u'name': u'name1'} self.create_availability_zone( 'name2', 'description', self.azp.get('id'), True) ref_az_2 = { u'description': u'description', u'enabled': True, u'availability_zone_profile_id': self.azp.get('id'), u'name': u'name2'} response = self.get(self.AZS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) self.assertIn(ref_az_1, api_list) self.assertIn(ref_az_2, api_list) def test_get_all_fields_filter(self): self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) self.create_availability_zone( 'name2', 'description', self.azp.get('id'), True) response = self.get(self.AZS_PATH, params={ 'fields': ['id', 'name']}) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) for az in api_list: self.assertIn(u'name', az) self.assertNotIn(u'availability_zone_profile_id', az) self.assertNotIn(u'description', az) self.assertNotIn(u'enabled', az) def test_get_all_authorized(self): self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) self.create_availability_zone( 'name2', 'description', self.azp.get('id'), True) response = self.get(self.AZS_PATH) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): api_list = response.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(2, len(api_list)) def test_get_all_not_authorized(self): self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) self.create_availability_zone( 'name2', 'description', self.azp.get('id'), True) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.get(self.AZS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response) def test_update(self): az_json = {'name': 'Fancy_Availability_Zone', 'description': 'A great az. Pick me!', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body) api_az = response.json.get(self.root_tag) availability_zone_name = api_az.get('name') az_json = {'description': 'An even better az. Pick me!', 'enabled': False} body = self._build_body(az_json) self.put(self.AZ_PATH.format(az_name=availability_zone_name), body) updated_az = self.get(self.AZ_PATH.format( az_name=availability_zone_name)).json.get(self.root_tag) self.assertEqual('An even better az. Pick me!', updated_az.get('description')) self.assertEqual(availability_zone_name, updated_az.get('name')) self.assertEqual(self.azp.get('id'), updated_az.get('availability_zone_profile_id')) self.assertFalse(updated_az.get('enabled')) def test_update_deleted_name(self): update_json = {'description': 'fake_desc'} body = self._build_body(update_json) response = self.put( self.AZ_PATH.format(az_name=constants.NIL_UUID), body, status=404) self.assertEqual( 'Availability Zone {} not found.'.format(constants.NIL_UUID), response.json.get('faultstring')) def test_update_none(self): az_json = {'name': 'Fancy_Availability_Zone', 'description': 'A great az. Pick me!', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body) api_az = response.json.get(self.root_tag) availability_zone_name = api_az.get('name') az_json = {} body = self._build_body(az_json) self.put(self.AZ_PATH.format(az_name=availability_zone_name), body) updated_az = self.get(self.AZ_PATH.format( az_name=availability_zone_name)).json.get(self.root_tag) self.assertEqual('Fancy_Availability_Zone', updated_az.get('name')) self.assertEqual('A great az. Pick me!', updated_az.get('description')) self.assertEqual(availability_zone_name, updated_az.get('name')) self.assertEqual(self.azp.get('id'), updated_az.get('availability_zone_profile_id')) self.assertTrue(updated_az.get('enabled')) def test_update_availability_zone_profile_id(self): az_json = {'name': 'Fancy_Availability_Zone', 'description': 'A great az. Pick me!', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body) api_az = response.json.get(self.root_tag) availability_zone_name = api_az.get('name') az_json = {'availability_zone_profile_id': uuidutils.generate_uuid()} body = self._build_body(az_json) self.put(self.AZ_PATH.format(az_name=availability_zone_name), body, status=400) updated_az = self.get(self.AZ_PATH.format( az_name=availability_zone_name)).json.get(self.root_tag) self.assertEqual(self.azp.get('id'), updated_az.get('availability_zone_profile_id')) def test_update_authorized(self): az_json = {'name': 'Fancy_Availability_Zone', 'description': 'A great az. Pick me!', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body) api_az = response.json.get(self.root_tag) availability_zone_name = api_az.get('name') az_json = {'description': 'An even better az. Pick me!', 'enabled': False} body = self._build_body(az_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.put(self.AZ_PATH.format(az_name=availability_zone_name), body) self.conf.config(group='api_settings', auth_strategy=auth_strategy) updated_az = self.get(self.AZ_PATH.format( az_name=availability_zone_name)).json.get(self.root_tag) self.assertEqual('An even better az. Pick me!', updated_az.get('description')) self.assertEqual(availability_zone_name, updated_az.get('name')) self.assertEqual(self.azp.get('id'), updated_az.get('availability_zone_profile_id')) self.assertFalse(updated_az.get('enabled')) def test_update_not_authorized(self): az_json = {'name': 'Fancy_Availability_Zone', 'description': 'A great az. Pick me!', 'availability_zone_profile_id': self.azp.get('id')} body = self._build_body(az_json) response = self.post(self.AZS_PATH, body) api_az = response.json.get(self.root_tag) availability_zone_name = api_az.get('name') az_json = {'description': 'An even better az. Pick me!', 'enabled': False} body = self._build_body(az_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) self.put(self.AZ_PATH.format(az_name=availability_zone_name), body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) updated_az = self.get(self.AZ_PATH.format( az_name=availability_zone_name)).json.get(self.root_tag) self.assertEqual('A great az. Pick me!', updated_az.get('description')) self.assertEqual(availability_zone_name, updated_az.get('name')) self.assertEqual(self.azp.get('id'), updated_az.get('availability_zone_profile_id')) self.assertTrue(updated_az.get('enabled')) @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.update') def test_update_exception(self, mock_update): mock_update.side_effect = [exceptions.OctaviaException()] update_json = {'description': 'Some availability zone.'} body = self._build_body(update_json) response = self.put(self.AZ_PATH.format(az_name='bogus'), body, status=500) self.assertEqual('An unknown exception occurred.', response.json.get('faultstring')) def test_delete(self): az = self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) self.delete(self.AZ_PATH.format(az_name=az.get('name'))) response = self.get(self.AZ_PATH.format(az_name=az.get('name')), status=404) err_msg = "Availability Zone %s not found." % az.get('name') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_nonexistent_name(self): response = self.delete( self.AZ_PATH.format(az_name='bogus_name'), status=404) self.assertEqual('Availability Zone bogus_name not found.', response.json.get('faultstring')) def test_delete_deleted_name(self): response = self.delete( self.AZ_PATH.format(az_name=constants.NIL_UUID), status=404) self.assertEqual( 'Availability Zone {} not found.'.format(constants.NIL_UUID), response.json.get('faultstring')) def test_delete_authorized(self): az = self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete( self.AZ_PATH.format(az_name=az.get('name'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(self.AZ_PATH.format(az_name=az.get('name')), status=404) err_msg = "Availability Zone %s not found." % az.get('name') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_not_authorized(self): az = self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.delete(self.AZ_PATH.format(az_name=az.get('name')), status=403) api_az = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_az) response = self.get(self.AZ_PATH.format( az_name=az.get('name'))).json.get(self.root_tag) self.assertEqual('name1', response.get('name')) def test_delete_in_use(self): az = self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) project_id = uuidutils.generate_uuid() lb_id = uuidutils.generate_uuid() self.create_load_balancer(lb_id, name='lb1', project_id=project_id, description='desc1', availability_zone=az.get('name'), admin_state_up=False) self.delete(self.AZ_PATH.format(az_name=az.get('name')), status=409) response = self.get(self.AZ_PATH.format( az_name=az.get('name'))).json.get(self.root_tag) self.assertEqual('name1', response.get('name')) @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.delete') def test_delete_exception(self, mock_delete): mock_delete.side_effect = [exceptions.OctaviaException()] response = self.delete(self.AZ_PATH.format(az_name='bogus'), status=500) self.assertEqual('An unknown exception occurred.', response.json.get('faultstring')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_flavor_profiles.py0000664000175000017500000007057000000000000026042 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_db import exception as odb_exceptions from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.tests.functional.api.v2 import base class TestFlavorProfiles(base.BaseAPITest): root_tag = 'flavorprofile' root_tag_list = 'flavorprofiles' root_tag_links = 'flavorprofile_links' def _assert_request_matches_response(self, req, resp, **optionals): self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) self.assertEqual(req.get('name'), resp.get('name')) self.assertEqual(req.get(constants.PROVIDER_NAME), resp.get(constants.PROVIDER_NAME)) self.assertEqual(req.get(constants.FLAVOR_DATA), resp.get(constants.FLAVOR_DATA)) def test_empty_list(self): response = self.get(self.FPS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual([], api_list) def test_create(self): fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(fp_json) response = self.post(self.FPS_PATH, body) api_fp = response.json.get(self.root_tag) self._assert_request_matches_response(fp_json, api_fp) def test_create_with_missing_name(self): fp_json = {constants.PROVIDER_NAME: 'pr1', constants.FLAVOR_DATA: '{"x": "y"}'} body = self._build_body(fp_json) response = self.post(self.FPS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute name. Value: " "'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_missing_provider(self): fp_json = {'name': 'xyz', constants.FLAVOR_DATA: '{"x": "y"}'} body = self._build_body(fp_json) response = self.post(self.FPS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute provider_name. " "Value: 'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_missing_flavor_data(self): fp_json = {'name': 'xyz', constants.PROVIDER_NAME: 'pr1'} body = self._build_body(fp_json) response = self.post(self.FPS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute flavor_data. " "Value: 'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_empty_flavor_data(self): fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.FLAVOR_DATA: '{}'} body = self._build_body(fp_json) response = self.post(self.FPS_PATH, body) api_fp = response.json.get(self.root_tag) self._assert_request_matches_response(fp_json, api_fp) def test_create_with_long_name(self): fp_json = {'name': 'n' * 256, constants.PROVIDER_NAME: 'test1', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(fp_json) self.post(self.FPS_PATH, body, status=400) def test_create_with_long_provider(self): fp_json = {'name': 'name1', constants.PROVIDER_NAME: 'n' * 256, constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(fp_json) self.post(self.FPS_PATH, body, status=400) def test_create_with_long_flavor_data(self): fp_json = {'name': 'name1', constants.PROVIDER_NAME: 'amp', constants.FLAVOR_DATA: 'n' * 4097} body = self._build_body(fp_json) self.post(self.FPS_PATH, body, status=400) def test_create_authorized(self): fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(fp_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.post(self.FPS_PATH, body) self.conf.config(group='api_settings', auth_strategy=auth_strategy) api_fp = response.json.get(self.root_tag) self._assert_request_matches_response(fp_json, api_fp) def test_create_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) fp_json = {'name': 'name', constants.PROVIDER_NAME: 'xyz', constants.FLAVOR_DATA: '{"x": "y"}'} body = self._build_body(fp_json) response = self.post(self.FPS_PATH, body, status=403) api_fp = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_fp) def test_create_db_failure(self): fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(fp_json) with mock.patch("octavia.db.repositories.FlavorProfileRepository." "create") as mock_create: mock_create.side_effect = Exception self.post(self.FPS_PATH, body, status=500) mock_create.side_effect = odb_exceptions.DBDuplicateEntry self.post(self.FPS_PATH, body, status=409) def test_create_with_invalid_json(self): fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', constants.FLAVOR_DATA: '{hello: "world"}'} body = self._build_body(fp_json) self.post(self.FPS_PATH, body, status=400) def test_get(self): fp = self.create_flavor_profile('name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) response = self.get( self.FP_PATH.format( fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('name', response.get('name')) self.assertEqual(fp.get('id'), response.get('id')) def test_get_one_deleted_id(self): response = self.get(self.FP_PATH.format(fp_id=constants.NIL_UUID), status=404) self.assertEqual('Flavor profile {} not found.'.format( constants.NIL_UUID), response.json.get('faultstring')) def test_get_one_fields_filter(self): fp = self.create_flavor_profile('name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) response = self.get( self.FP_PATH.format(fp_id=fp.get('id')), params={ 'fields': ['id', constants.PROVIDER_NAME]} ).json.get(self.root_tag) self.assertEqual(fp.get('id'), response.get('id')) self.assertIn(u'id', response) self.assertIn(constants.PROVIDER_NAME, response) self.assertNotIn(u'name', response) self.assertNotIn(constants.FLAVOR_DATA, response) def test_get_authorized(self): fp = self.create_flavor_profile('name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get( self.FP_PATH.format( fp_id=fp.get('id'))).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual('name', response.get('name')) self.assertEqual(fp.get('id'), response.get('id')) def test_get_not_authorized(self): fp = self.create_flavor_profile('name', 'noop_driver', '{"x": "y"}') self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) self.get(self.FP_PATH.format(fp_id=fp.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_get_all(self): fp1 = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') ref_fp_1 = {u'flavor_data': u'{"image": "ubuntu"}', u'id': fp1.get('id'), u'name': u'test1', constants.PROVIDER_NAME: u'noop_driver'} self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', '{"image": "ubuntu"}') ref_fp_2 = {u'flavor_data': u'{"image": "ubuntu"}', u'id': fp2.get('id'), u'name': u'test2', constants.PROVIDER_NAME: u'noop_driver-alt'} self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) response = self.get(self.FPS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) self.assertIn(ref_fp_1, api_list) self.assertIn(ref_fp_2, api_list) def test_get_all_fields_filter(self): fp1 = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) response = self.get(self.FPS_PATH, params={ 'fields': ['id', 'name']}) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) for profile in api_list: self.assertIn(u'id', profile) self.assertIn(u'name', profile) self.assertNotIn(constants.PROVIDER_NAME, profile) self.assertNotIn(constants.FLAVOR_DATA, profile) def test_get_all_authorized(self): fp1 = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.FPS_PATH) self.conf.config(group='api_settings', auth_strategy=auth_strategy) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) def test_get_all_not_authorized(self): fp1 = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) self.get(self.FPS_PATH, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_update(self): fp = self.create_flavor_profile('test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(update_data) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver-alt', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"hello": "world"}', response.get(constants.FLAVOR_DATA)) def test_update_deleted_id(self): update_data = {'name': 'fake_profile'} body = self._build_body(update_data) response = self.put(self.FP_PATH.format(fp_id=constants.NIL_UUID), body, status=404) self.assertEqual('Flavor profile {} not found.'.format( constants.NIL_UUID), response.json.get('faultstring')) def test_update_nothing(self): fp = self.create_flavor_profile('test_profile', 'noop_driver', '{"x": "y"}') body = self._build_body({}) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) def test_update_name_none(self): self._test_update_param_none(constants.NAME) def test_update_provider_name_none(self): self._test_update_param_none(constants.PROVIDER_NAME) def test_update_flavor_data_none(self): self._test_update_param_none(constants.FLAVOR_DATA) def _test_update_param_none(self, param_name): fp = self.create_flavor_profile('test_profile', 'noop_driver', '{"x": "y"}') expect_error_msg = ("None is not a valid option for %s" % param_name) body = self._build_body({param_name: None}) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body, status=400) self.assertEqual(expect_error_msg, response.json['faultstring']) def test_update_no_flavor_data(self): fp = self.create_flavor_profile('test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt'} body = self._build_body(update_data) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver-alt', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) def test_update_authorized(self): fp = self.create_flavor_profile('test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(update_data) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver-alt', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"hello": "world"}', response.get(constants.FLAVOR_DATA)) def test_update_not_authorized(self): fp = self.create_flavor_profile('test_profile', 'noop_driver', '{"x": "y"}') update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'amp', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(update_data) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) def test_update_in_use(self): fp = self.create_flavor_profile('test_profile', 'noop_driver', '{"x": "y"}') self.create_flavor('name1', 'description', fp.get('id'), True) # Test updating provider while in use is not allowed update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'noop_driver-alt'} body = self._build_body(update_data) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body, status=409) err_msg = ("Flavor profile {} is in use and cannot be " "modified.".format(fp.get('id'))) self.assertEqual(err_msg, response.json.get('faultstring')) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) # Test updating flavor data while in use is not allowed update_data = {'name': 'the_profile', constants.FLAVOR_DATA: '{"hello": "world"}'} body = self._build_body(update_data) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body, status=409) err_msg = ("Flavor profile {} is in use and cannot be " "modified.".format(fp.get('id'))) self.assertEqual(err_msg, response.json.get('faultstring')) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('test_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) # Test that you can still update the name when in use update_data = {'name': 'the_profile'} body = self._build_body(update_data) response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('the_profile', response.get('name')) self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) def test_delete(self): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) self.delete(self.FP_PATH.format(fp_id=fp.get('id'))) response = self.get(self.FP_PATH.format( fp_id=fp.get('id')), status=404) err_msg = "Flavor Profile %s not found." % fp.get('id') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_deleted_id(self): response = self.delete(self.FP_PATH.format(fp_id=constants.NIL_UUID), status=404) self.assertEqual('Flavor profile {} not found.'.format( constants.NIL_UUID), response.json.get('faultstring')) def test_delete_nonexistent_id(self): response = self.delete(self.FP_PATH.format(fp_id='bogus_id'), status=404) self.assertEqual('Flavor profile bogus_id not found.', response.json.get('faultstring')) def test_delete_authorized(self): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(self.FP_PATH.format(fp_id=fp.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(self.FP_PATH.format( fp_id=fp.get('id')), status=404) err_msg = "Flavor Profile %s not found." % fp.get('id') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_not_authorized(self): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.delete(self.FP_PATH.format( fp_id=fp.get('id')), status=403) api_fp = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_fp) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('test1', response.get('name')) def test_delete_in_use(self): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') self.create_flavor('name1', 'description', fp.get('id'), True) response = self.delete(self.FP_PATH.format(fp_id=fp.get('id')), status=409) err_msg = ("Flavor profile {} is in use and cannot be " "modified.".format(fp.get('id'))) self.assertEqual(err_msg, response.json.get('faultstring')) response = self.get( self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) self.assertEqual('test1', response.get('name')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_flavors.py0000664000175000017500000007120600000000000024317 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.common import constants import octavia.common.context from octavia.common import exceptions from octavia.tests.functional.api.v2 import base class TestFlavors(base.BaseAPITest): root_tag = 'flavor' root_tag_list = 'flavors' root_tag_links = 'flavors_links' def setUp(self): super(TestFlavors, self).setUp() self.fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') def _assert_request_matches_response(self, req, resp, **optionals): self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) req_description = req.get('description') self.assertEqual(req.get('name'), resp.get('name')) if not req_description: self.assertEqual('', resp.get('description')) else: self.assertEqual(req.get('description'), resp.get('description')) self.assertEqual(req.get('flavor_profile_id'), resp.get('flavor_profile_id')) self.assertEqual(req.get('enabled', True), resp.get('enabled')) def test_empty_list(self): response = self.get(self.FLAVORS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual([], api_list) def test_create(self): flavor_json = {'name': 'test1', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body) api_flavor = response.json.get(self.root_tag) self._assert_request_matches_response(flavor_json, api_flavor) def test_create_with_missing_name(self): flavor_json = {'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute name. Value: " "'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_long_name(self): flavor_json = {'name': 'n' * 256, 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) self.post(self.FLAVORS_PATH, body, status=400) def test_create_with_long_description(self): flavor_json = {'name': 'test-flavor', 'description': 'n' * 256, 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) self.post(self.FLAVORS_PATH, body, status=400) def test_create_with_missing_flavor_profile(self): flavor_json = {'name': 'xyz'} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute flavor_profile_id. " "Value: 'None'. Mandatory field missing.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_bad_flavor_profile(self): flavor_json = {'name': 'xyz', 'flavor_profile_id': 'bogus'} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute flavor_profile_id. " "Value: 'bogus'. Value should be UUID format") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_duplicate_names(self): flavor1 = self.create_flavor('name', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) flavor_json = {'name': 'name', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body, status=409) err_msg = "A flavor of name already exists." self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_authorized(self): flavor_json = {'name': 'test1', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.post(self.FLAVORS_PATH, body) api_flavor = response.json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self._assert_request_matches_response(flavor_json, api_flavor) def test_create_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) flavor_json = {'name': 'name', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body, status=403) api_flavor = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_flavor) def test_create_db_failure(self): flavor_json = {'name': 'test1', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) with mock.patch("octavia.db.repositories.FlavorRepository." "create") as mock_create: mock_create.side_effect = Exception self.post(self.FLAVORS_PATH, body, status=500) def test_get(self): flavor = self.create_flavor('name', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) response = self.get( self.FLAVOR_PATH.format( flavor_id=flavor.get('id'))).json.get(self.root_tag) self.assertEqual('name', response.get('name')) self.assertEqual('description', response.get('description')) self.assertEqual(flavor.get('id'), response.get('id')) self.assertEqual(self.fp.get('id'), response.get('flavor_profile_id')) self.assertTrue(response.get('enabled')) def test_get_one_fields_filter(self): flavor = self.create_flavor('name', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) response = self.get( self.FLAVOR_PATH.format(flavor_id=flavor.get('id')), params={ 'fields': ['id', 'flavor_profile_id']}).json.get(self.root_tag) self.assertEqual(flavor.get('id'), response.get('id')) self.assertEqual(self.fp.get('id'), response.get('flavor_profile_id')) self.assertIn(u'id', response) self.assertIn(u'flavor_profile_id', response) self.assertNotIn(u'name', response) self.assertNotIn(u'description', response) self.assertNotIn(u'enabled', response) def test_get_one_deleted_id(self): response = self.get( self.FLAVOR_PATH.format(flavor_id=constants.NIL_UUID), status=404) self.assertEqual('Flavor {} not found.'.format(constants.NIL_UUID), response.json.get('faultstring')) def test_get_authorized(self): flavor = self.create_flavor('name', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get( self.FLAVOR_PATH.format( flavor_id=flavor.get('id'))).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual('name', response.get('name')) self.assertEqual('description', response.get('description')) self.assertEqual(flavor.get('id'), response.get('id')) self.assertEqual(self.fp.get('id'), response.get('flavor_profile_id')) self.assertTrue(response.get('enabled')) def test_get_not_authorized(self): flavor = self.create_flavor('name', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.get(self.FLAVOR_PATH.format( flavor_id=flavor.get('id')), status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response) def test_get_all(self): flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) ref_flavor_1 = { u'description': u'description', u'enabled': True, u'flavor_profile_id': self.fp.get('id'), u'id': flavor1.get('id'), u'name': u'name1'} flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) ref_flavor_2 = { u'description': u'description', u'enabled': True, u'flavor_profile_id': self.fp.get('id'), u'id': flavor2.get('id'), u'name': u'name2'} response = self.get(self.FLAVORS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) self.assertIn(ref_flavor_1, api_list) self.assertIn(ref_flavor_2, api_list) def test_get_all_fields_filter(self): flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) response = self.get(self.FLAVORS_PATH, params={ 'fields': ['id', 'name']}) api_list = response.json.get(self.root_tag_list) self.assertEqual(2, len(api_list)) for flavor in api_list: self.assertIn(u'id', flavor) self.assertIn(u'name', flavor) self.assertNotIn(u'flavor_profile_id', flavor) self.assertNotIn(u'description', flavor) self.assertNotIn(u'enabled', flavor) def test_get_all_authorized(self): flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.FLAVORS_PATH) api_list = response.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(2, len(api_list)) def test_get_all_not_authorized(self): flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.get(self.FLAVORS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response) def test_update(self): flavor_json = {'name': 'Fancy_Flavor', 'description': 'A great flavor. Pick me!', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body) api_flavor = response.json.get(self.root_tag) flavor_id = api_flavor.get('id') flavor_json = {'name': 'Better_Flavor', 'description': 'An even better flavor. Pick me!', 'enabled': False} body = self._build_body(flavor_json) response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), body) updated_flavor = self.get(self.FLAVOR_PATH.format( flavor_id=flavor_id)).json.get(self.root_tag) self.assertEqual('Better_Flavor', updated_flavor.get('name')) self.assertEqual('An even better flavor. Pick me!', updated_flavor.get('description')) self.assertEqual(flavor_id, updated_flavor.get('id')) self.assertEqual(self.fp.get('id'), updated_flavor.get('flavor_profile_id')) self.assertFalse(updated_flavor.get('enabled')) def test_update_deleted_id(self): update_json = {'name': 'fake_name'} body = self._build_body(update_json) response = self.put( self.FLAVOR_PATH.format(flavor_id=constants.NIL_UUID), body, status=404) self.assertEqual('Flavor {} not found.'.format(constants.NIL_UUID), response.json.get('faultstring')) def test_update_none(self): flavor_json = {'name': 'Fancy_Flavor', 'description': 'A great flavor. Pick me!', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body) api_flavor = response.json.get(self.root_tag) flavor_id = api_flavor.get('id') flavor_json = {} body = self._build_body(flavor_json) response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), body) updated_flavor = self.get(self.FLAVOR_PATH.format( flavor_id=flavor_id)).json.get(self.root_tag) self.assertEqual('Fancy_Flavor', updated_flavor.get('name')) self.assertEqual('A great flavor. Pick me!', updated_flavor.get('description')) self.assertEqual(flavor_id, updated_flavor.get('id')) self.assertEqual(self.fp.get('id'), updated_flavor.get('flavor_profile_id')) self.assertTrue(updated_flavor.get('enabled')) def test_update_flavor_profile_id(self): flavor_json = {'name': 'Fancy_Flavor', 'description': 'A great flavor. Pick me!', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body) api_flavor = response.json.get(self.root_tag) flavor_id = api_flavor.get('id') flavor_json = {'flavor_profile_id': uuidutils.generate_uuid()} body = self._build_body(flavor_json) response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), body, status=400) updated_flavor = self.get(self.FLAVOR_PATH.format( flavor_id=flavor_id)).json.get(self.root_tag) self.assertEqual(self.fp.get('id'), updated_flavor.get('flavor_profile_id')) def test_update_authorized(self): flavor_json = {'name': 'Fancy_Flavor', 'description': 'A great flavor. Pick me!', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body) api_flavor = response.json.get(self.root_tag) flavor_id = api_flavor.get('id') flavor_json = {'name': 'Better_Flavor', 'description': 'An even better flavor. Pick me!', 'enabled': False} body = self._build_body(flavor_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.put(self.FLAVOR_PATH.format( flavor_id=flavor_id), body) self.conf.config(group='api_settings', auth_strategy=auth_strategy) updated_flavor = self.get(self.FLAVOR_PATH.format( flavor_id=flavor_id)).json.get(self.root_tag) self.assertEqual('Better_Flavor', updated_flavor.get('name')) self.assertEqual('An even better flavor. Pick me!', updated_flavor.get('description')) self.assertEqual(flavor_id, updated_flavor.get('id')) self.assertEqual(self.fp.get('id'), updated_flavor.get('flavor_profile_id')) self.assertFalse(updated_flavor.get('enabled')) def test_update_not_authorized(self): flavor_json = {'name': 'Fancy_Flavor', 'description': 'A great flavor. Pick me!', 'flavor_profile_id': self.fp.get('id')} body = self._build_body(flavor_json) response = self.post(self.FLAVORS_PATH, body) api_flavor = response.json.get(self.root_tag) flavor_id = api_flavor.get('id') flavor_json = {'name': 'Better_Flavor', 'description': 'An even better flavor. Pick me!', 'enabled': False} body = self._build_body(flavor_json) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) updated_flavor = self.get(self.FLAVOR_PATH.format( flavor_id=flavor_id)).json.get(self.root_tag) self.assertEqual('Fancy_Flavor', updated_flavor.get('name')) self.assertEqual('A great flavor. Pick me!', updated_flavor.get('description')) self.assertEqual(flavor_id, updated_flavor.get('id')) self.assertEqual(self.fp.get('id'), updated_flavor.get('flavor_profile_id')) self.assertTrue(updated_flavor.get('enabled')) @mock.patch('octavia.db.repositories.FlavorRepository.update') def test_update_exception(self, mock_update): mock_update.side_effect = [exceptions.OctaviaException()] update_json = {'name': 'A_Flavor'} body = self._build_body(update_json) response = self.put(self.FLAVOR_PATH.format(flavor_id='bogus'), body, status=500) self.assertEqual('An unknown exception occurred.', response.json.get('faultstring')) def test_delete(self): flavor = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) self.delete(self.FLAVOR_PATH.format(flavor_id=flavor.get('id'))) response = self.get(self.FLAVOR_PATH.format( flavor_id=flavor.get('id')), status=404) err_msg = "Flavor %s not found." % flavor.get('id') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_nonexistent_id(self): response = self.delete( self.FLAVOR_PATH.format(flavor_id='bogus_id'), status=404) self.assertEqual('Flavor bogus_id not found.', response.json.get('faultstring')) def test_delete_deleted_id(self): response = self.delete( self.FLAVOR_PATH.format(flavor_id=constants.NIL_UUID), status=404) self.assertEqual('Flavor {} not found.'.format(constants.NIL_UUID), response.json.get('faultstring')) def test_delete_authorized(self): flavor = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete( self.FLAVOR_PATH.format(flavor_id=flavor.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(self.FLAVOR_PATH.format( flavor_id=flavor.get('id')), status=404) err_msg = "Flavor %s not found." % flavor.get('id') self.assertEqual(err_msg, response.json.get('faultstring')) def test_delete_not_authorized(self): flavor = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) response = self.delete(self.FLAVOR_PATH.format( flavor_id=flavor.get('id')), status=403) api_flavor = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_flavor) response = self.get(self.FLAVOR_PATH.format( flavor_id=flavor.get('id'))).json.get(self.root_tag) self.assertEqual('name1', response.get('name')) def test_delete_in_use(self): flavor = self.create_flavor('name1', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) project_id = uuidutils.generate_uuid() lb_id = uuidutils.generate_uuid() self.create_load_balancer(lb_id, name='lb1', project_id=project_id, description='desc1', flavor_id=flavor.get('id'), admin_state_up=False) self.delete(self.FLAVOR_PATH.format(flavor_id=flavor.get('id')), status=409) response = self.get(self.FLAVOR_PATH.format( flavor_id=flavor.get('id'))).json.get(self.root_tag) self.assertEqual('name1', response.get('name')) @mock.patch('octavia.db.repositories.FlavorRepository.delete') def test_delete_exception(self, mock_delete): mock_delete.side_effect = [exceptions.OctaviaException()] response = self.delete(self.FLAVOR_PATH.format(flavor_id='bogus'), status=500) self.assertEqual('An unknown exception occurred.', response.json.get('faultstring')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_health_monitor.py0000664000175000017500000026767500000000000025700 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.db import repositories from octavia.tests.functional.api.v2 import base class TestHealthMonitor(base.BaseAPITest): root_tag = 'healthmonitor' root_tag_list = 'healthmonitors' root_tag_links = 'healthmonitors_links' def setUp(self): super(TestHealthMonitor, self).setUp() self.lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.lb_id = self.lb.get('id') self.project_id = self.lb.get('project_id') self.set_lb_status(self.lb_id) self.listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get('listener') self.listener_id = self.listener.get('id') self.set_lb_status(self.lb_id) self.pool = self.create_pool(self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) self.pool_id = self.pool.get('pool').get('id') self.set_lb_status(self.lb_id) self.pool_with_listener = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id) self.pool_with_listener_id = ( self.pool_with_listener.get('pool').get('id')) self.set_lb_status(self.lb_id) self.pool_repo = repositories.PoolRepository() self._setup_udp_lb_resources() def _setup_udp_lb_resources(self): self.udp_lb = self.create_load_balancer(uuidutils.generate_uuid()).get( 'loadbalancer') self.udp_lb_id = self.udp_lb.get('id') self.set_lb_status(self.udp_lb_id) self.udp_listener = self.create_listener( constants.PROTOCOL_UDP, 8888, self.udp_lb_id).get('listener') self.udp_listener_id = self.udp_listener.get('id') self.set_lb_status(self.udp_lb_id) self.udp_pool_with_listener = self.create_pool( None, constants.PROTOCOL_UDP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.udp_listener_id) self.udp_pool_with_listener_id = ( self.udp_pool_with_listener.get('pool').get('id')) self.set_lb_status(self.udp_lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config( group='api_settings', udp_connect_min_interval_health_monitor='3') def test_get(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, tags=['test_tag']).get(self.root_tag) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_hm['provisioning_status'] = constants.ACTIVE api_hm['operating_status'] = constants.ONLINE api_hm.pop('updated_at') self.set_lb_status(self.lb_id) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_hm, response) def test_get_authorized(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_hm['provisioning_status'] = constants.ACTIVE api_hm['operating_status'] = constants.ONLINE api_hm.pop('updated_at') self.set_lb_status(self.lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response.pop('updated_at') self.assertEqual(api_hm, response) def test_get_not_authorized(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_hm['provisioning_status'] = constants.ACTIVE api_hm['operating_status'] = constants.ONLINE api_hm.pop('updated_at') self.set_lb_status(self.lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_deleted_gives_404(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_object_status(self.health_monitor_repo, api_hm.get('id'), provisioning_status=constants.DELETED) self.get(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), status=404) def test_bad_get(self): self.get(self.HM_PATH.format( healthmonitor_id=uuidutils.generate_uuid()), status=404) def test_get_all(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, tags=['test_tag']).get(self.root_tag) self.set_lb_status(self.lb_id) hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(1, len(hms)) self.assertEqual(api_hm.get('id'), hms[0].get('id')) self.assertEqual(api_hm['tags'], hms[0]['tags']) def test_get_all_not_authorized(self): self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): hms = self.get(self.HMS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, hms) def test_get_all_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool3 = self.create_pool( lb1_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) hm1 = self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hm2 = self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hm3 = self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TLS_HELLO, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) self.assertEqual(3, len(hms)) hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] self.assertIn((hm1.get('id'), hm1.get('type')), hm_id_protocols) self.assertIn((hm2.get('id'), hm2.get('type')), hm_id_protocols) self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) def test_get_all_non_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hm3 = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', hm3['project_id']): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(1, len(hms)) hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) def test_get_all_unscoped_token(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', None): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': None} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.HMS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, result) def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool3 = self.create_pool( lb1_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) hm1 = self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hm2 = self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hm3 = self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', hm3['project_id']): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_global_observer'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(3, len(hms)) hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] self.assertIn((hm1.get('id'), hm1.get('type')), hm_id_protocols) self.assertIn((hm2.get('id'), hm2.get('type')), hm_id_protocols) self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) def test_get_all_hides_deleted(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) response = self.get(self.HMS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) self.set_object_status(self.health_monitor_repo, api_hm.get('id'), provisioning_status=constants.DELETED) response = self.get(self.HMS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 0) def test_get_by_project_id(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project1_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project2_id) lb2_id = lb2.get('loadbalancer').get('id') self.set_lb_status(lb2_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb1_id) pool3 = self.create_pool( lb2_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb2_id) hm1 = self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hm2 = self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb1_id) hm3 = self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(lb2_id) hms = self.get( self.HMS_PATH, params={'project_id': project1_id}).json.get(self.root_tag_list) self.assertEqual(2, len(hms)) hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] self.assertIn((hm1.get('id'), hm1.get('type')), hm_id_protocols) self.assertIn((hm2.get('id'), hm2.get('type')), hm_id_protocols) hms = self.get( self.HMS_PATH, params={'project_id': project2_id}).json.get(self.root_tag_list) self.assertEqual(1, len(hms)) hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) def test_get_all_sorted(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get('pool') self.set_lb_status(self.lb_id) pool2 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2').get('pool') self.set_lb_status(self.lb_id) pool3 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3').get('pool') self.set_lb_status(self.lb_id) self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, name='hm1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1, name='hm2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1, name='hm3').get(self.root_tag) self.set_lb_status(self.lb_id) response = self.get(self.HMS_PATH, params={'sort': 'name:desc'}) hms_desc = response.json.get(self.root_tag_list) response = self.get(self.HMS_PATH, params={'sort': 'name:asc'}) hms_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(hms_desc)) self.assertEqual(3, len(hms_asc)) hm_id_names_desc = [(hm.get('id'), hm.get('name')) for hm in hms_desc] hm_id_names_asc = [(hm.get('id'), hm.get('name')) for hm in hms_asc] self.assertEqual(hm_id_names_asc, list(reversed(hm_id_names_desc))) def test_get_all_sorted_by_max_retries(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get('pool') self.set_lb_status(self.lb_id) pool2 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2').get('pool') self.set_lb_status(self.lb_id) pool3 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3').get('pool') self.set_lb_status(self.lb_id) hm1 = self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 2, name='hm1').get(self.root_tag) self.set_lb_status(self.lb_id) hm2 = self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1, name='hm2').get(self.root_tag) self.set_lb_status(self.lb_id) hm3 = self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 3, name='hm3').get(self.root_tag) self.set_lb_status(self.lb_id) response = self.get(self.HMS_PATH, params={'sort': 'max_retries:desc'}) hms_desc = response.json.get(self.root_tag_list) response = self.get(self.HMS_PATH, params={'sort': 'max_retries:asc'}) hms_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(hms_desc)) self.assertEqual(3, len(hms_asc)) hm_id_names_desc = [(hm.get('id'), hm.get('name')) for hm in hms_desc] hm_id_names_asc = [(hm.get('id'), hm.get('name')) for hm in hms_asc] self.assertEqual(hm_id_names_asc, list(reversed(hm_id_names_desc))) self.assertEqual(hm2[constants.MAX_RETRIES], hms_asc[0][constants.MAX_RETRIES]) self.assertEqual(hm1[constants.MAX_RETRIES], hms_asc[1][constants.MAX_RETRIES]) self.assertEqual(hm3[constants.MAX_RETRIES], hms_asc[2][constants.MAX_RETRIES]) self.assertEqual(hm3[constants.MAX_RETRIES], hms_desc[0][constants.MAX_RETRIES]) self.assertEqual(hm1[constants.MAX_RETRIES], hms_desc[1][constants.MAX_RETRIES]) self.assertEqual(hm2[constants.MAX_RETRIES], hms_desc[2][constants.MAX_RETRIES]) def test_get_all_limited(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get('pool') self.set_lb_status(self.lb_id) pool2 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2').get('pool') self.set_lb_status(self.lb_id) pool3 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3').get('pool') self.set_lb_status(self.lb_id) self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, name='hm1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1, name='hm2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1, name='hm3').get(self.root_tag) self.set_lb_status(self.lb_id) # First two -- should have 'next' link first_two = self.get(self.HMS_PATH, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.HMS_PATH, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.HMS_PATH, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get('pool') self.set_lb_status(self.lb_id) pool2 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2').get('pool') self.set_lb_status(self.lb_id) pool3 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3').get('pool') self.set_lb_status(self.lb_id) self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, name='hm1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1, name='hm2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1, name='hm3').get(self.root_tag) self.set_lb_status(self.lb_id) hms = self.get(self.HMS_PATH, params={ 'fields': ['id', 'project_id']}).json for hm in hms['healthmonitors']: self.assertIn(u'id', hm) self.assertIn(u'project_id', hm) self.assertNotIn(u'description', hm) def test_get_one_fields_filter(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get('pool') self.set_lb_status(self.lb_id) self.set_lb_status(self.lb_id) hm1 = self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, name='hm1').get(self.root_tag) self.set_lb_status(self.lb_id) hm = self.get( self.HM_PATH.format(healthmonitor_id=hm1.get('id')), params={'fields': ['id', 'project_id']}).json.get(self.root_tag) self.assertIn(u'id', hm) self.assertIn(u'project_id', hm) self.assertNotIn(u'description', hm) def test_get_all_filter(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get('pool') self.set_lb_status(self.lb_id) pool2 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2').get('pool') self.set_lb_status(self.lb_id) pool3 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3').get('pool') self.set_lb_status(self.lb_id) hm1 = self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, name='hm1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1, name='hm2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1, name='hm3').get(self.root_tag) self.set_lb_status(self.lb_id) hms = self.get(self.HMS_PATH, params={ 'id': hm1['id']}).json self.assertEqual(1, len(hms['healthmonitors'])) self.assertEqual(hm1['id'], hms['healthmonitors'][0]['id']) def test_get_all_tags_filter(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get('pool') self.set_lb_status(self.lb_id) pool2 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2').get('pool') self.set_lb_status(self.lb_id) pool3 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3').get('pool') self.set_lb_status(self.lb_id) hm1 = self.create_health_monitor( pool1.get('id'), constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, tags=['test_tag1', 'test_tag2']).get(self.root_tag) self.set_lb_status(self.lb_id) hm2 = self.create_health_monitor( pool2.get('id'), constants.HEALTH_MONITOR_PING, 1, 1, 1, 1, tags=['test_tag2', 'test_tag3']).get(self.root_tag) self.set_lb_status(self.lb_id) hm3 = self.create_health_monitor( pool3.get('id'), constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1, tags=['test_tag4', 'test_tag5']).get(self.root_tag) self.set_lb_status(self.lb_id) hms = self.get( self.HMS_PATH, params={'tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(2, len(hms)) self.assertEqual( [hm1.get('id'), hm2.get('id')], [hm.get('id') for hm in hms] ) hms = self.get( self.HMS_PATH, params={'tags': ['test_tag2', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(1, len(hms)) self.assertEqual( [hm2.get('id')], [hm.get('id') for hm in hms] ) hms = self.get( self.HMS_PATH, params={'tags-any': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(2, len(hms)) self.assertEqual( [hm1.get('id'), hm2.get('id')], [hm.get('id') for hm in hms] ) hms = self.get( self.HMS_PATH, params={'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(1, len(hms)) self.assertEqual( [hm3.get('id')], [hm.get('id') for hm in hms] ) hms = self.get( self.HMS_PATH, params={'not-tags-any': ['test_tag2', 'test_tag4']} ).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(0, len(hms)) hms = self.get( self.HMS_PATH, params={'tags': 'test_tag2', 'tags-any': ['test_tag1', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(2, len(hms)) self.assertEqual( [hm1.get('id'), hm2.get('id')], [hm.get('id') for hm in hms] ) hms = self.get( self.HMS_PATH, params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(hms, list) self.assertEqual(0, len(hms)) def test_empty_get_all(self): response = self.get(self.HMS_PATH).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(0, len(response)) def test_create_http_monitor_with_relative_path(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, url_path="/").get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) def test_create_http_monitor_with_url_path(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, url_path="/v2/api/index").get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) def test_create_sans_listener(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) self.assertEqual(1, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) # Verify optional field defaults self.assertEqual('GET', api_hm.get('http_method')) self.assertEqual('/', api_hm.get('url_path')) self.assertEqual('200', api_hm.get('expected_codes')) def test_create_http_full(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/').get(self.root_tag) self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) self.assertEqual(1, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) self.assertFalse(api_hm.get('admin_state_up')) self.assertEqual('Test HM', api_hm.get('name')) self.assertEqual('GET', api_hm.get('http_method')) self.assertEqual('/', api_hm.get('url_path')) self.assertEqual('200', api_hm.get('expected_codes')) def test_create_https_full(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTPS, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/').get(self.root_tag) self.assertEqual(constants.HEALTH_MONITOR_HTTPS, api_hm.get('type')) self.assertEqual(1, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) self.assertFalse(api_hm.get('admin_state_up')) self.assertEqual('Test HM', api_hm.get('name')) self.assertEqual('GET', api_hm.get('http_method')) self.assertEqual('/', api_hm.get('url_path')) self.assertEqual('200', api_hm.get('expected_codes')) def test_create_udp_case_with_udp_connect_type(self): # create with UDP-CONNECT type api_hm = self.create_health_monitor( self.udp_pool_with_listener_id, constants.HEALTH_MONITOR_UDP_CONNECT, 3, 1, 1, 1).get(self.root_tag) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) self.set_lb_status(self.udp_lb_id) self.assertEqual(constants.HEALTH_MONITOR_UDP_CONNECT, api_hm.get('type')) self.assertEqual(3, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) # Verify the L7 fields is None self.assertIsNone(api_hm.get('http_method')) self.assertIsNone(api_hm.get('url_path')) self.assertIsNone(api_hm.get('expected_codes')) def test_create_udp_case_with_tcp_type(self): # create with TCP type api_hm = self.create_health_monitor( self.udp_pool_with_listener_id, constants.HEALTH_MONITOR_TCP, 3, 1, 1, 1).get(self.root_tag) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) self.set_lb_status(self.udp_lb_id) self.assertEqual(constants.HEALTH_MONITOR_TCP, api_hm.get('type')) self.assertEqual(3, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) self.assertIsNone(api_hm.get('http_method')) self.assertIsNone(api_hm.get('url_path')) self.assertIsNone(api_hm.get('expected_codes')) def test_create_udp_case_with_http_type(self): # create with HTTP type api_hm = self.create_health_monitor( self.udp_pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 3, 1, 1, 1, url_path='/test.html', http_method=constants.HEALTH_MONITOR_HTTP_METHOD_GET, expected_codes='200-201').get(self.root_tag) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) self.set_lb_status(self.udp_lb_id) self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) self.assertEqual(3, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) self.assertEqual(3, api_hm.get('delay')) self.assertEqual(constants.HEALTH_MONITOR_HTTP_METHOD_GET, api_hm.get('http_method')) self.assertEqual('/test.html', api_hm.get('url_path')) self.assertEqual('200-201', api_hm.get('expected_codes')) def test_udp_case_when_udp_connect_min_interval_health_monitor_set(self): # negative case first req_dict = {'pool_id': self.udp_pool_with_listener_id, 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1} res = self.post(self.HMS_PATH, self._build_body(req_dict), status=400, expect_errors=True) expect_error_msg = ("Validation failure: The request delay value 1 " "should be larger than 3 for %s health monitor " "type.") % constants.HEALTH_MONITOR_UDP_CONNECT self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id) # pass cases self.conf.config( group='api_settings', udp_connect_min_interval_health_monitor='-3') res = self.post(self.HMS_PATH, self._build_body(req_dict)) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id, hm_id=res.json['healthmonitor']['id'], lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) def test_negative_create_udp_case(self): req_dict = {'pool_id': self.udp_pool_with_listener_id, 'delay': 3, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1} expect_error_msg = ("Validation failure: The associated pool protocol " "is %(pool_protocol)s, so only a %(types)s health " "monitor is supported.") % { 'pool_protocol': constants.PROTOCOL_UDP, 'types': '/'.join([constants.HEALTH_MONITOR_UDP_CONNECT, constants.HEALTH_MONITOR_TCP, constants.HEALTH_MONITOR_HTTP])} # Not allowed types specified. update_req = {'type': constants.HEALTH_MONITOR_TLS_HELLO} req_dict.update(update_req) res = self.post(self.HMS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id) # Hit error during create with a non-UDP pool req_dict = {'pool_id': self.pool_with_listener_id, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'type': constants.HEALTH_MONITOR_UDP_CONNECT} expect_error_msg = ("Validation failure: The %(type)s type is only " "supported for pools of type " "%(protocol)s.") % { 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'protocol': constants.PROTOCOL_UDP} res = self.post(self.HMS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id) def test_ensure_L7_fields_filled_during_create(self): # Create a health monitor with a load balancer pool api_hm = self.create_health_monitor( self.pool_id, constants.PROTOCOL_HTTP, 1, 1, 1, 1).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.HEALTH_MONITOR_HTTP_DEFAULT_METHOD, api_hm.get('http_method')) self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_URL_PATH, api_hm.get('url_path')) self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES, api_hm.get('expected_codes')) def test_create_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) self.assertEqual(1, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) # Verify optional field defaults self.assertEqual('GET', api_hm.get('http_method')) self.assertEqual('/', api_hm.get('url_path')) self.assertEqual('200', api_hm.get('expected_codes')) def test_create_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_hm) def test_create_pool_in_error(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') pool1_id = pool1.get('id') self.set_lb_status(lb1_id) self.set_object_status(self.pool_repo, pool1_id, provisioning_status=constants.ERROR) api_hm = self.create_health_monitor( pool1_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) ref_msg = 'Pool %s is immutable and cannot be updated.' % pool1_id self.assertEqual(ref_msg, api_hm.get('faultstring')) def test_create_with_listener(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_CREATE, hm_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) self.assertEqual(1, api_hm.get('delay')) self.assertEqual(1, api_hm.get('timeout')) self.assertEqual(1, api_hm.get('max_retries_down')) self.assertEqual(1, api_hm.get('max_retries')) # Verify optional field defaults self.assertEqual('GET', api_hm.get('http_method')) self.assertEqual('/', api_hm.get('url_path')) self.assertEqual('200', api_hm.get('expected_codes')) def test_pool_returns_hm_id(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) pool = self.get(self.POOL_PATH.format( pool_id=self.pool_id)).json.get("pool") self.assertEqual(pool.get('healthmonitor_id'), api_hm.get('id')) # TODO(rm_work) Remove after deprecation of project_id in POST (R series) def test_create_with_project_id_is_ignored(self): pid = uuidutils.generate_uuid() api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, project_id=pid).get(self.root_tag) self.assertEqual(self.project_id, api_hm.get('project_id')) def test_create_with_default_http_version(self): # Use the default HTTP/1.0 api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/', http_version='1.0').get(self.root_tag) self.assertEqual(1.0, api_hm.get('http_version')) def test_create_without_http_version(self): # Check the default http_version is 1.0 api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/').get(self.root_tag) self.assertIsNone(api_hm.get('http_version')) def test_create_with_http_version_11_and_domain_name(self): # Create with http_version 1.1 and domain_name api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTPS, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/', http_version=1.1, domain_name='testlab.com').get(self.root_tag) self.assertEqual(1.1, api_hm.get('http_version')) self.assertEqual('testlab.com', api_hm.get('domain_name')) def test_create_with_http_version_11(self): # Create with http_version 1.1 api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTPS, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/', http_version=1.1).get(self.root_tag) self.assertEqual(1.1, api_hm.get('http_version')) def test_bad_create(self): hm_json = {'name': 'test1', 'pool_id': self.pool_id} self.post(self.HMS_PATH, self._build_body(hm_json), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_bad_create_with_invalid_url_path(self): req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'url_path': 'https://openstack.org'} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_ping_when_ping_disabled(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='api_settings', allow_ping_health_monitors=False) req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_PING, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_bad_provider(self, mock_provider): mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'url_path': '/foo'} response = self.post(self.HMS_PATH, self._build_body(req_dict), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_create_with_type_none(self): req_dict = {'pool_id': self.pool_id, 'type': None, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'url_path': '/'} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_with_delay_none(self): req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': None, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'url_path': '/'} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_with_max_retries_none(self): req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': None, 'url_path': '/'} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_with_timeout_none(self): req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': None, 'max_retries_down': 1, 'max_retries': 1, 'url_path': '/'} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_with_pool_id_none(self): req_dict = {'pool_id': None, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'url_path': '/'} self.post(self.HMS_PATH, self._build_body(req_dict), status=404) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_TCP_with_http_method(self): req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_TCP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'http_method': constants.HEALTH_MONITOR_HTTP_METHOD_GET} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_TCP_with_url_path(self): req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_TCP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'url_path': '/'} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_create_TCP_with_expected_codes(self): req_dict = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_TCP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'expected_codes': '200'} self.post(self.HMS_PATH, self._build_body(req_dict), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id) def test_duplicate_create(self): self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1) self.set_lb_status(self.lb_id) self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) def test_create_over_quota(self): self.start_quota_mock(data_models.HealthMonitor) hm = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1} self.post(self.HMS_PATH, self._build_body(hm), status=403) def test_bad_create_with_http_version_and_domain_name_cases(self): hm_json = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'expected_codes': '200', 'http_version': 1.00, 'domain_name': 'testlab.com'} api_hm = self.post( self.HMS_PATH, self._build_body(hm_json), status=400).json expect_error_msg = ("http_version 1.0 is not a valid option for " "health monitors HTTP 1.1 domain name health " "check") self.assertEqual(expect_error_msg, api_hm['faultstring']) for bad_case in [{'http_version': 1.0, 'domain_name': '^testla&b.com'}, {'http_version': 1.1, 'domain_name': 'testla\nb.com'}]: hm_json = {'pool_id': self.pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'expected_codes': '200'} hm_json.update(bad_case) api_hm = self.post( self.HMS_PATH, self._build_body(hm_json), status=400).json expect_error_msg = ( "Invalid input for field/attribute domain_name. Value: '%s'. " "Value should match the pattern %s") % (bad_case[ 'domain_name'], constants.DOMAIN_NAME_REGEX) self.assertEqual(expect_error_msg, api_hm['faultstring']) def test_update(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, tags=['old_tag']).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'max_retries': 2, 'tags': ['new_tag']} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_UPDATE) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.assertEqual(2, response[constants.MAX_RETRIES]) self.assertEqual(['new_tag'], response['tags']) def test_update_HTTPS(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTPS, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/').get(self.root_tag) self.set_lb_status(self.lb_id) self.assertEqual('/', api_hm[constants.URL_PATH]) new_hm = {constants.URL_PATH: '/health'} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_UPDATE) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.assertEqual('/health', response[constants.URL_PATH]) def test_update_http_version_and_domain_name(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/').get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'http_version': 1.1, 'domain_name': 'testlab.com'} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_UPDATE) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.assertEqual(1.1, response['http_version']) self.assertEqual('testlab.com', response['domain_name']) def test_update_TCP(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'max_retries': 2} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_UPDATE) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.assertEqual(2, response[constants.MAX_RETRIES]) def test_update_authorized(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'max_retries': 2} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_UPDATE) def test_update_not_authorized(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'max_retries': 2} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.ACTIVE, hm_prov_status=constants.ACTIVE) def test_update_udp_case(self): api_hm = self.create_health_monitor( self.udp_pool_with_listener_id, constants.HEALTH_MONITOR_TCP, 3, 1, 1, 1).get( self.root_tag) self.set_lb_status(self.udp_lb_id) new_hm = {'timeout': 2} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_UPDATE) def test_negative_update_udp_case(self): api_hm = self.create_health_monitor( self.udp_pool_with_listener_id, constants.HEALTH_MONITOR_UDP_CONNECT, 3, 1, 1, 1).get( self.root_tag) self.set_lb_status(self.udp_lb_id) # Hit error during update with invalid parameter req_dict = {'delay': 3, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1, 'http_method': constants.HEALTH_MONITOR_HTTP_METHOD_GET} expect_error_msg = (("http_method is not a valid option for health " "monitors of type %s") % constants.HEALTH_MONITOR_UDP_CONNECT) res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id) # Hit error during update with smaller delay value req_dict = {'delay': 1} expect_error_msg = ("Validation failure: The request delay value 1 " "should be larger than 3 for %s health monitor " "type.") % constants.HEALTH_MONITOR_UDP_CONNECT res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=self.udp_pool_with_listener_id) def test_bad_update(self): api_hm = self.create_health_monitor(self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) new_hm = {'http_method': 'bad_method', 'delay': 2} self.set_lb_status(self.lb_id) self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) def test_update_delay_none(self): api_hm = self.create_health_monitor(self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) new_hm = {constants.DELAY: None} self.set_lb_status(self.lb_id) expect_error_msg = ("None is not a valid option for %s" % constants.DELAY) res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) self.assertEqual(expect_error_msg, res.json['faultstring']) def test_update_max_retries_none(self): api_hm = self.create_health_monitor(self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) new_hm = {constants.MAX_RETRIES: None} self.set_lb_status(self.lb_id) expect_error_msg = ("None is not a valid option for %s" % constants.MAX_RETRIES) res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) self.assertEqual(expect_error_msg, res.json['faultstring']) def test_update_timeout_none(self): api_hm = self.create_health_monitor(self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) new_hm = {constants.TIMEOUT: None} self.set_lb_status(self.lb_id) expect_error_msg = ("None is not a valid option for %s" % constants.TIMEOUT) res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) self.assertEqual(expect_error_msg, res.json['faultstring']) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'max_retries': 2} mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_update_TCP_setting_http_method(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'http_method': constants.HEALTH_MONITOR_HTTP_METHOD_GET} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) def test_update_TCP_setting_url_path(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'url_path': '/'} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) def test_update_TCP_setting_expected_codes(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'expected_codes': '200'} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) def test_update_HTTP_http_method_none(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'http_method': None} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.assertEqual(constants.HEALTH_MONITOR_HTTP_METHOD_GET, response['http_method']) def test_update_HTTP_url_path_none(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'url_path': None} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_URL_PATH, response['url_path']) def test_update_HTTP_expected_codes_none(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'expected_codes': None} self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)) response = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES, response['expected_codes']) def test_bad_update_http_version_and_domain_name(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, admin_state_up=False, expected_codes='200', http_method='GET', name='Test HM', url_path='/').get(self.root_tag) self.set_lb_status(self.lb_id) new_hm = {'http_version': 1.0, 'domain_name': 'testlab.com'} response = self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) expect_error_msg = ("http_version 1.0 is not a valid option for " "health monitors HTTP 1.1 domain name health " "check") self.assertEqual(expect_error_msg, response.json['faultstring']) new_hm = {'http_version': 1.0, 'domain_name': '^testla&b.com'} response = self.put( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm), status=400) expect_error_msg = ( "Invalid input for field/attribute domain_name. Value: '%s'. " "Value should match the pattern %s") % (new_hm[ 'domain_name'], constants.DOMAIN_NAME_REGEX) self.assertEqual(expect_error_msg, response.json['faultstring']) def test_update_unset_defaults(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, name='test', domain_name='test.example.com', expected_codes='400', http_method='HEAD', http_version='1.1', url_path='/test').get(self.root_tag) new_hm = {constants.DOMAIN_NAME: None, constants.EXPECTED_CODES: None, constants.HTTP_METHOD: None, constants.HTTP_VERSION: None, constants.MAX_RETRIES_DOWN: None, 'name': None, constants.URL_PATH: None} self.set_lb_status(self.lb_id) res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), self._build_body(new_hm)).json.get(self.root_tag) self.assertIsNone(res[constants.DOMAIN_NAME]) self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES, res[constants.EXPECTED_CODES]) self.assertEqual(constants.HEALTH_MONITOR_HTTP_DEFAULT_METHOD, res[constants.HTTP_METHOD]) self.assertIsNone(res[constants.HTTP_VERSION]) self.assertEqual(constants.DEFAULT_MAX_RETRIES_DOWN, res[constants.MAX_RETRIES_DOWN]) self.assertEqual('', res['name']) self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_URL_PATH, res[constants.URL_PATH]) def test_delete(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) hm = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) api_hm['provisioning_status'] = constants.ACTIVE api_hm['operating_status'] = constants.ONLINE self.assertIsNone(api_hm.pop('updated_at')) self.assertIsNotNone(hm.pop('updated_at')) self.assertEqual(api_hm, hm) self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id'))) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_DELETE) def test_delete_authorized(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) hm = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) api_hm['provisioning_status'] = constants.ACTIVE api_hm['operating_status'] = constants.ONLINE self.assertIsNone(api_hm.pop('updated_at')) self.assertIsNotNone(hm.pop('updated_at')) self.assertEqual(api_hm, hm) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete( self.HM_PATH.format(healthmonitor_id=api_hm.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, hm_prov_status=constants.PENDING_DELETE) def test_delete_not_authorized(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) hm = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) api_hm['provisioning_status'] = constants.ACTIVE api_hm['operating_status'] = constants.ONLINE self.assertIsNone(api_hm.pop('updated_at')) self.assertIsNotNone(hm.pop('updated_at')) self.assertEqual(api_hm, hm) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): self.delete( self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.ACTIVE, hm_prov_status=constants.ACTIVE) def test_bad_delete(self): self.delete( self.HM_PATH.format(healthmonitor_id=uuidutils.generate_uuid()), status=404) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_bad_provider(self, mock_provider): api_hm = self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) hm = self.get(self.HM_PATH.format( healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) api_hm['provisioning_status'] = constants.ACTIVE api_hm['operating_status'] = constants.ONLINE self.assertIsNone(api_hm.pop('updated_at')) self.assertIsNotNone(hm.pop('updated_at')) self.assertEqual(api_hm, hm) mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), status=500) def test_create_when_lb_pending_update(self): self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) self.create_health_monitor( self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) def test_update_when_lb_pending_update(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) new_hm = {'max_retries': 2} self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), body=self._build_body(new_hm), status=409) def test_delete_when_lb_pending_update(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), status=409) def test_create_when_lb_pending_delete(self): self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) def test_update_when_lb_pending_delete(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) new_hm = {'max_retries': 2} self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), body=self._build_body(new_hm), status=409) def test_update_already_deleted(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), body=self._build_body({'max_retries': 2}), status=404) def test_delete_when_lb_pending_delete(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), status=409) def test_delete_already_deleted(self): api_hm = self.create_health_monitor( self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), status=404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_l7policy.py0000664000175000017500000020722400000000000024406 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.tests.common import constants as c_const from octavia.tests.functional.api.v2 import base class TestL7Policy(base.BaseAPITest): root_tag = 'l7policy' root_tag_list = 'l7policies' root_tag_links = 'l7policies_links' def setUp(self): super(TestL7Policy, self).setUp() self.lb = self.create_load_balancer(uuidutils.generate_uuid()) self.lb_id = self.lb.get('loadbalancer').get('id') self.project_id = self.lb.get('loadbalancer').get('project_id') self.set_lb_status(self.lb_id) self.listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb_id=self.lb_id) self.listener_id = self.listener.get('listener').get('id') self.set_lb_status(self.lb_id) self.pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) self.pool_id = self.pool.get('pool').get('id') self.set_lb_status(self.lb_id) def test_get(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, tags=['test_tag']).get(self.root_tag) response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) self.assertEqual(api_l7policy, response) def test_get_authorized(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))) response = response.json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(api_l7policy, response) def test_get_not_authorized(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_deleted_gives_404(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_object_status(self.l7policy_repo, api_l7policy.get('id'), provisioning_status=constants.DELETED) self.get(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), status=404) def test_bad_get(self): self.get(self.L7POLICY_PATH.format( l7policy_id=uuidutils.generate_uuid()), status=404) def test_get_all(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, tags=['test_tag'] ).get(self.root_tag) self.set_lb_status(self.lb_id) policies = self.get(self.L7POLICIES_PATH).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(1, len(policies)) self.assertEqual(api_l7policy.get('id'), policies[0].get('id')) self.assertEqual(api_l7policy['tags'], policies[0]['tags']) def test_get_all_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) listener1_id = listener1.get('listener').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) pool1_id = pool1.get('pool').get('id') self.set_lb_status(lb1_id) api_l7p_a = self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(lb1_id) api_l7p_b = self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=pool1_id).get(self.root_tag) self.set_lb_status(lb1_id) api_l7p_c = self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, position=3, redirect_url='http://localhost/').get(self.root_tag) self.set_lb_status(lb1_id) policies = self.get(self.L7POLICIES_PATH).json.get(self.root_tag_list) self.assertEqual(3, len(policies)) policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] self.assertIn((api_l7p_a.get('id'), api_l7p_a.get('action')), policy_id_actions) self.assertIn((api_l7p_b.get('id'), api_l7p_b.get('action')), policy_id_actions) self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), policy_id_actions) def test_get_all_non_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) listener1_id = listener1.get('listener').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) pool1_id = pool1.get('pool').get('id') self.set_lb_status(lb1_id) self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(lb1_id) self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=pool1_id).get(self.root_tag) self.set_lb_status(lb1_id) api_l7p_c = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/').get(self.root_tag) self.set_lb_status(lb1_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', api_l7p_c.get('project_id')): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): policies = self.get( self.L7POLICIES_PATH).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(1, len(policies)) policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), policy_id_actions) def test_get_all_unscoped_token(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) listener1_id = listener1.get('listener').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) pool1_id = pool1.get('pool').get('id') self.set_lb_status(lb1_id) self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(lb1_id) self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=pool1_id).get(self.root_tag) self.set_lb_status(lb1_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/').get(self.root_tag) self.set_lb_status(lb1_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', None): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': None} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.L7POLICIES_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, result) def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) listener1_id = listener1.get('listener').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) pool1_id = pool1.get('pool').get('id') self.set_lb_status(lb1_id) api_l7p_a = self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(lb1_id) api_l7p_b = self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=pool1_id).get(self.root_tag) self.set_lb_status(lb1_id) api_l7p_c = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/').get(self.root_tag) self.set_lb_status(lb1_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', api_l7p_c.get('project_id')): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_global_observer'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): policies = self.get( self.L7POLICIES_PATH).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(3, len(policies)) policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] self.assertIn((api_l7p_a.get('id'), api_l7p_a.get('action')), policy_id_actions) self.assertIn((api_l7p_b.get('id'), api_l7p_b.get('action')), policy_id_actions) self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), policy_id_actions) def test_get_all_not_authorized(self): self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): policies = self.get(self.L7POLICIES_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, policies) def test_get_all_hides_deleted(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) response = self.get(self.L7POLICIES_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) self.set_object_status(self.l7policy_repo, api_l7policy.get('id'), provisioning_status=constants.DELETED) response = self.get(self.L7POLICIES_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 0) def test_get_by_project_id(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project1_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project2_id) lb2_id = lb2.get('loadbalancer').get('id') self.set_lb_status(lb2_id) listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) listener1_id = listener1.get('listener').get('id') self.set_lb_status(lb1_id) listener2 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb2_id) listener2_id = listener2.get('listener').get('id') self.set_lb_status(lb2_id) pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) pool1_id = pool1.get('pool').get('id') self.set_lb_status(lb1_id) api_l7p_a = self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(lb1_id) api_l7p_b = self.create_l7policy( listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=pool1_id).get(self.root_tag) self.set_lb_status(lb1_id) api_l7p_c = self.create_l7policy( listener2_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/').get(self.root_tag) self.set_lb_status(lb2_id) policies = self.get( self.L7POLICIES_PATH, params={'project_id': project1_id}).json.get(self.root_tag_list) self.assertEqual(2, len(policies)) policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] self.assertIn((api_l7p_a.get('id'), api_l7p_a.get('action')), policy_id_actions) self.assertIn((api_l7p_b.get('id'), api_l7p_b.get('action')), policy_id_actions) policies = self.get( self.L7POLICIES_PATH, params={'project_id': project2_id}).json.get(self.root_tag_list) self.assertEqual(1, len(policies)) policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), policy_id_actions) def test_get_all_sorted(self): self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, name='policy3').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=self.pool_id, name='policy2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/', name='policy1').get(self.root_tag) self.set_lb_status(self.lb_id) response = self.get(self.L7POLICIES_PATH, params={'sort': 'position:desc'}) policies_desc = response.json.get(self.root_tag_list) response = self.get(self.L7POLICIES_PATH, params={'sort': 'position:asc'}) policies_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(policies_desc)) self.assertEqual(3, len(policies_asc)) policy_id_names_desc = [(policy.get('id'), policy.get('position')) for policy in policies_desc] policy_id_names_asc = [(policy.get('id'), policy.get('position')) for policy in policies_asc] self.assertEqual(policy_id_names_asc, list(reversed(policy_id_names_desc))) def test_get_all_limited(self): self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, name='policy1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=self.pool_id, name='policy2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/', name='policy3').get(self.root_tag) self.set_lb_status(self.lb_id) # First two -- should have 'next' link first_two = self.get(self.L7POLICIES_PATH, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.L7POLICIES_PATH, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.L7POLICIES_PATH, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, name='policy1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=self.pool_id, name='policy2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/', name='policy3').get(self.root_tag) self.set_lb_status(self.lb_id) l7pos = self.get(self.L7POLICIES_PATH, params={ 'fields': ['id', 'project_id']}).json for l7po in l7pos['l7policies']: self.assertIn(u'id', l7po) self.assertIn(u'project_id', l7po) self.assertNotIn(u'description', l7po) def test_get_one_fields_filter(self): l7p1 = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, name='policy1').get(self.root_tag) self.set_lb_status(self.lb_id) l7po = self.get( self.L7POLICY_PATH.format(l7policy_id=l7p1.get('id')), params={'fields': ['id', 'project_id']}).json.get(self.root_tag) self.assertIn(u'id', l7po) self.assertIn(u'project_id', l7po) self.assertNotIn(u'description', l7po) def test_get_all_filter(self): policy1 = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, name='policy1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=self.pool_id, name='policy2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/', name='policy3').get(self.root_tag) self.set_lb_status(self.lb_id) l7pos = self.get(self.L7POLICIES_PATH, params={ 'id': policy1['id']}).json self.assertEqual(1, len(l7pos['l7policies'])) self.assertEqual(policy1['id'], l7pos['l7policies'][0]['id']) def test_get_all_tags_filter(self): policy1 = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, tags=['test_tag1', 'test_tag2']).get(self.root_tag) self.set_lb_status(self.lb_id) policy2 = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, position=2, redirect_pool_id=self.pool_id, tags=['test_tag2', 'test_tag3']).get(self.root_tag) self.set_lb_status(self.lb_id) policy3 = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://localhost/', tags=['test_tag4', 'test_tag5']).get(self.root_tag) self.set_lb_status(self.lb_id) policies = self.get( self.L7POLICIES_PATH, params={'tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(2, len(policies)) self.assertEqual( [policy1.get('id'), policy2.get('id')], [policy.get('id') for policy in policies] ) policies = self.get( self.L7POLICIES_PATH, params={'tags': ['test_tag2', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(1, len(policies)) self.assertEqual( [policy2.get('id')], [policy.get('id') for policy in policies] ) policies = self.get( self.L7POLICIES_PATH, params={'tags-any': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(2, len(policies)) self.assertEqual( [policy1.get('id'), policy2.get('id')], [policy.get('id') for policy in policies] ) policies = self.get( self.L7POLICIES_PATH, params={'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(1, len(policies)) self.assertEqual( [policy3.get('id')], [policy.get('id') for policy in policies] ) policies = self.get( self.L7POLICIES_PATH, params={'not-tags-any': ['test_tag2', 'test_tag4']} ).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(0, len(policies)) policies = self.get( self.L7POLICIES_PATH, params={'tags': 'test_tag2', 'tags-any': ['test_tag1', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(2, len(policies)) self.assertEqual( [policy1.get('id'), policy2.get('id')], [policy.get('id') for policy in policies] ) policies = self.get( self.L7POLICIES_PATH, params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(policies, list) self.assertEqual(0, len(policies)) def test_empty_get_all(self): response = self.get(self.L7POLICIES_PATH).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(0, len(response)) def test_create_reject_policy(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.assertEqual(constants.L7POLICY_ACTION_REJECT, api_l7policy['action']) self.assertEqual(1, api_l7policy['position']) self.assertIsNone(api_l7policy['redirect_pool_id']) self.assertIsNone(api_l7policy['redirect_url']) self.assertTrue(api_l7policy['admin_state_up']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_CREATE, l7policy_op_status=constants.OFFLINE) def test_create_policy_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(constants.L7POLICY_ACTION_REJECT, api_l7policy['action']) self.assertEqual(1, api_l7policy['position']) self.assertIsNone(api_l7policy['redirect_pool_id']) self.assertIsNone(api_l7policy['redirect_url']) self.assertTrue(api_l7policy['admin_state_up']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_CREATE, l7policy_op_status=constants.OFFLINE) def test_create_policy_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_l7policy) def test_create_redirect_to_pool(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=self.pool_id).get(self.root_tag) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, api_l7policy['action']) self.assertEqual(1, api_l7policy['position']) self.assertEqual(self.pool_id, api_l7policy['redirect_pool_id']) self.assertIsNone(api_l7policy['redirect_url']) self.assertTrue(api_l7policy['admin_state_up']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_CREATE, l7policy_op_status=constants.OFFLINE) def test_create_redirect_to_url(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://www.example.com').get(self.root_tag) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, api_l7policy['action']) self.assertEqual(1, api_l7policy['position']) self.assertIsNone(api_l7policy.get('redirect_pool_id')) self.assertEqual('http://www.example.com', api_l7policy['redirect_url']) self.assertTrue(api_l7policy['admin_state_up']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_CREATE, l7policy_op_status=constants.OFFLINE) def test_create_with_redirect_http_code(self): action_key_values = { constants.L7POLICY_ACTION_REDIRECT_PREFIX: { 'redirect_prefix': 'https://example.com', 'redirect_http_code': 302}, constants.L7POLICY_ACTION_REDIRECT_TO_URL: { 'redirect_url': 'http://www.example.com', 'redirect_http_code': 301}} count = 1 # First, test with redirect actions for action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, constants.L7POLICY_ACTION_REDIRECT_PREFIX]: api_l7policy = self.create_l7policy( self.listener_id, action, **action_key_values[action]).get(self.root_tag) self.assertEqual(action, api_l7policy['action']) self.assertEqual(count, api_l7policy['position']) self.assertIsNone(api_l7policy.get('redirect_pool_id')) if api_l7policy.get('redirect_url'): self.assertEqual(action_key_values[action]['redirect_url'], api_l7policy['redirect_url']) elif api_l7policy.get('redirect_prefix'): self.assertEqual(action_key_values[action]['redirect_prefix'], api_l7policy['redirect_prefix']) self.assertEqual(action_key_values[action]['redirect_http_code'], api_l7policy['redirect_http_code']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_CREATE, l7policy_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) count += 1 # test with redirect_pool action api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=self.pool_id, redirect_http_code=308).get(self.root_tag) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, api_l7policy['action']) self.assertEqual(self.pool_id, api_l7policy.get('redirect_pool_id')) self.assertIsNone(api_l7policy.get('redirect_url')) self.assertIsNone(api_l7policy.get('redirect_prefix')) self.assertIsNone(api_l7policy.get('redirect_http_code')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_CREATE, l7policy_op_status=constants.OFFLINE) def test_bad_create(self): l7policy = {'listener_id': self.listener_id, 'name': 'test1'} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=400) def test_bad_create_redirect_to_pool(self): l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'listener_id': self.listener_id, 'redirect_pool_id': uuidutils.generate_uuid()} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=404) def test_bad_create_redirect_to_url(self): l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'bad url'} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=400) def test_bad_create_with_redirect_http_code(self): for test_code in [1, '', 'HTTPCODE']: l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com', 'redirect_http_code': test_code} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=400) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_bad_provider(self, mock_provider): mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://a.com'} response = self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_create_over_quota(self): self.start_quota_mock(data_models.L7Policy) l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://a.com'} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=403) def test_update(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, tags=['old_tag'] ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com', 'tags': ['new_tag']} response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy)).json.get(self.root_tag) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, response.get('action')) self.assertEqual(['new_tag'], response['tags']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE) def test_update_authorized(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy)).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, response.get('action')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE) def test_update_not_authorized(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, l7policy_prov_status=constants.ACTIVE) def test_bad_update(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) new_l7policy = {'listener_id': self.listener_id, 'action': 'bad action'} self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400) def test_bad_update_redirect_to_pool(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) new_l7policy = { 'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': uuidutils.generate_uuid()} self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400) def test_bad_update_redirect_to_url(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) new_l7policy = { 'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'bad url'} self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_update_redirect_to_pool_bad_pool_id(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = {'redirect_pool_id': uuidutils.generate_uuid()} self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=404) def test_update_redirect_to_pool_minimal(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = {'redirect_pool_id': self.pool_id} self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy)) def test_update_redirect_to_url_bad_url(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = {'listener_id': self.listener_id, 'redirect_url': 'bad-url'} self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400) def test_update_redirect_to_url_minimal(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = {'redirect_url': 'http://www.example.com/'} self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy)) def test_update_with_redirect_http_code(self): # test from non exist api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com', 'redirect_http_code': 308} response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy)).json.get(self.root_tag) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, response.get('action')) self.assertEqual(308, response.get('redirect_http_code')) self.set_lb_status(self.lb_id) # test from exist to new api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url='http://www.example.com', redirect_http_code=302).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = { 'redirect_http_code': 308} response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy)).json.get(self.root_tag) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, response.get('action')) self.assertEqual(308, response.get('redirect_http_code')) self.set_lb_status(self.lb_id) # test from exist to null new_l7policy = { 'redirect_http_code': None} response = self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy)).json.get(self.root_tag) self.assertIsNone(response.get('redirect_http_code')) def test_bad_update_with_redirect_http_code(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com', 'redirect_http_code': ''} self.put(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400).json.get(self.root_tag) def test_delete(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7policy['provisioning_status'] = constants.ACTIVE api_l7policy['operating_status'] = constants.ONLINE api_l7policy.pop('updated_at') response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_l7policy, response) self.delete(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_DELETE) def test_delete_authorized(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7policy['provisioning_status'] = constants.ACTIVE api_l7policy['operating_status'] = constants.ONLINE api_l7policy.pop('updated_at') response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_l7policy, response) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_DELETE) def test_delete_not_authorized(self): api_l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7policy['provisioning_status'] = constants.ACTIVE api_l7policy['operating_status'] = constants.ONLINE api_l7policy.pop('updated_at') response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_l7policy, response) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): self.delete(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=api_l7policy.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, l7policy_prov_status=constants.ACTIVE) def test_bad_delete(self): self.delete(self.L7POLICY_PATH.format( l7policy_id=uuidutils.generate_uuid()), status=404) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_bad_provider(self, mock_provider): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7policy['provisioning_status'] = constants.ACTIVE api_l7policy['operating_status'] = constants.ONLINE response = self.get(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) self.assertIsNone(api_l7policy.pop('updated_at')) self.assertIsNotNone(response.pop('updated_at')) self.assertEqual(api_l7policy, response) mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') self.delete(self.L7POLICY_PATH.format( l7policy_id=api_l7policy.get('id')), status=500) def test_create_when_lb_pending_update(self): self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) new_l7policy = { 'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} self.post(self.L7POLICIES_PATH, body=self._build_body(new_l7policy), status=409) def test_update_when_lb_pending_update(self): l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) new_l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} self.put(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), body=self._build_body(new_l7policy), status=409) def test_delete_when_lb_pending_update(self): l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) self.delete(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), status=409) def test_create_when_lb_pending_delete(self): self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) new_l7policy = { 'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} self.post(self.L7POLICIES_PATH, body=self._build_body(new_l7policy), status=409) def test_update_when_lb_pending_delete(self): l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} self.put(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), body=self._build_body(new_l7policy), status=409) def test_delete_when_lb_pending_delete(self): l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) self.delete(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), status=409) def test_update_already_deleted(self): l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) new_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com'} self.put(self.L7POLICY_PATH.format(l7policy_id=l7policy.get('id')), body=self._build_body(new_l7policy), status=404) def test_delete_already_deleted(self): l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, ).get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) self.delete(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), status=404) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_pool_protocol_map_post(self, mock_cert_data): cert = data_models.TLSContainer(certificate='cert') mock_cert_data.return_value = {'sni_certs': [cert]} valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP port = 1 l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} for listener_proto in valid_map: for pool_proto in valid_map[listener_proto]: port = port + 1 opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') l7policy['listener_id'] = listener.get('id') l7policy['redirect_pool_id'] = pool.get('id') self.set_object_status(self.lb_repo, self.lb_id) self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=201) self.set_object_status(self.lb_repo, self.lb_id) invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP port = 100 for listener_proto in invalid_map: opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) port = port + 1 for pool_proto in invalid_map[listener_proto]: pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_object_status(self.lb_repo, self.lb_id) l7policy['listener_id'] = listener.get('id') l7policy['redirect_pool_id'] = pool.get('id') expect_error_msg = ("Validation failure: The pool protocol " "'%s' is invalid while the listener " "protocol is '%s'.") % (pool_proto, listener_proto) res = self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=400) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_pool_protocol_map_put(self, mock_cert_data): cert = data_models.TLSContainer(certificate='cert') mock_cert_data.return_value = {'sni_certs': [cert]} valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP port = 1 new_l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} for listener_proto in valid_map: for pool_proto in valid_map[listener_proto]: port = port + 1 opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_object_status(self.lb_repo, self.lb_id) l7policy = self.create_l7policy( listener.get('id'), constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_object_status(self.lb_repo, self.lb_id) new_l7policy['redirect_pool_id'] = pool.get('id') self.put( self.L7POLICY_PATH.format(l7policy_id=l7policy.get('id')), self._build_body(new_l7policy), status=200) self.set_object_status(self.lb_repo, self.lb_id) invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP port = 100 for listener_proto in invalid_map: opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) port = port + 1 for pool_proto in invalid_map[listener_proto]: pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_object_status(self.lb_repo, self.lb_id) l7policy = self.create_l7policy( listener.get('id'), constants.L7POLICY_ACTION_REJECT).get(self.root_tag) self.set_object_status(self.lb_repo, self.lb_id) new_l7policy['redirect_pool_id'] = pool.get('id') expect_error_msg = ("Validation failure: The pool protocol " "'%s' is invalid while the listener " "protocol is '%s'.") % (pool_proto, listener_proto) res = self.put(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), self._build_body(new_l7policy), status=400) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_l7rule.py0000664000175000017500000016655700000000000024073 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.common import exceptions from octavia.db import repositories from octavia.tests.functional.api.v2 import base class TestL7Rule(base.BaseAPITest): root_tag = 'rule' root_tag_list = 'rules' root_tag_links = 'rules_links' def setUp(self): super(TestL7Rule, self).setUp() self.lb = self.create_load_balancer(uuidutils.generate_uuid()) self.lb_id = self.lb.get('loadbalancer').get('id') self.project_id = self.lb.get('loadbalancer').get('project_id') self.set_lb_status(self.lb_id) self.listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb_id=self.lb_id) self.listener_id = self.listener.get('listener').get('id') self.set_lb_status(self.lb_id) self.l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT) self.l7policy_id = self.l7policy.get('l7policy').get('id') self.set_lb_status(self.lb_id) self.l7rules_path = self.L7RULES_PATH.format( l7policy_id=self.l7policy_id) self.l7rule_path = self.l7rules_path + '/{l7rule_id}' self.l7policy_repo = repositories.L7PolicyRepository() def test_get(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api', tags=['test_tag']).get(self.root_tag) response = self.get(self.l7rule_path.format( l7rule_id=l7rule.get('id'))).json.get(self.root_tag) self.assertEqual(l7rule, response) def test_get_authorized(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.l7rule_path.format( l7rule_id=l7rule.get('id'))).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(l7rule, response) def test_get_not_authorized(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): response = self.get(self.l7rule_path.format( l7rule_id=l7rule.get('id')), status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response) def test_get_deleted_gives_404(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_object_status(self.l7rule_repo, api_l7rule.get('id'), provisioning_status=constants.DELETED) self.get(self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), status=404) def test_get_bad_parent_policy(self): bad_path = (self.L7RULES_PATH.format( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=uuidutils.generate_uuid()) + '/' + uuidutils.generate_uuid()) self.get(bad_path, status=404) def test_bad_get(self): self.get(self.l7rule_path.format( l7rule_id=uuidutils.generate_uuid()), status=404) def test_get_all(self): api_l7r_a = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api', tags=['test_tag1']).get(self.root_tag) self.set_lb_status(self.lb_id) api_l7r_b = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie', tags=['test_tag2']).get(self.root_tag) self.set_lb_status(self.lb_id) rules = self.get(self.l7rules_path).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(2, len(rules)) rule_id_types = [(r.get('id'), r.get('type'), r['tags']) for r in rules] self.assertIn((api_l7r_a.get('id'), api_l7r_a.get('type'), api_l7r_a['tags']), rule_id_types) self.assertIn((api_l7r_b.get('id'), api_l7r_b.get('type'), api_l7r_b['tags']), rule_id_types) def test_get_all_authorized(self): api_l7r_a = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) api_l7r_b = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.set_lb_status(self.lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): rules = self.get( self.l7rules_path).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertIsInstance(rules, list) self.assertEqual(2, len(rules)) rule_id_types = [(r.get('id'), r.get('type')) for r in rules] self.assertIn((api_l7r_a.get('id'), api_l7r_a.get('type')), rule_id_types) self.assertIn((api_l7r_b.get('id'), api_l7r_b.get('type')), rule_id_types) def test_get_all_unscoped_token(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.set_lb_status(self.lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', None): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': None} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.l7rules_path, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, result) def test_get_all_not_authorized(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.set_lb_status(self.lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): rules = self.get(self.l7rules_path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, rules.json) def test_get_all_sorted(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com').get(self.root_tag) self.set_lb_status(self.lb_id) response = self.get(self.l7rules_path, params={'sort': 'type:desc'}) rules_desc = response.json.get(self.root_tag_list) response = self.get(self.l7rules_path, params={'sort': 'type:asc'}) rules_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(rules_desc)) self.assertEqual(3, len(rules_asc)) rule_id_types_desc = [(rule.get('id'), rule.get('type')) for rule in rules_desc] rule_id_types_asc = [(rule.get('id'), rule.get('type')) for rule in rules_asc] self.assertEqual(rule_id_types_asc, list(reversed(rule_id_types_desc))) def test_get_all_limited(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com').get(self.root_tag) self.set_lb_status(self.lb_id) # First two -- should have 'next' link first_two = self.get(self.l7rules_path, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.l7rules_path, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.l7rules_path, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com').get(self.root_tag) self.set_lb_status(self.lb_id) l7rus = self.get(self.l7rules_path, params={ 'fields': ['id', 'compare_type']}).json for l7ru in l7rus['rules']: self.assertIn(u'id', l7ru) self.assertIn(u'compare_type', l7ru) self.assertNotIn(u'project_id', l7ru) def test_get_one_fields_filter(self): l7r1 = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) l7ru = self.get( self.l7rule_path.format(l7rule_id=l7r1.get('id')), params={'fields': ['id', 'compare_type']}).json.get(self.root_tag) self.assertIn(u'id', l7ru) self.assertIn(u'compare_type', l7ru) self.assertNotIn(u'project_id', l7ru) def test_get_all_filter(self): ru1 = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com').get(self.root_tag) self.set_lb_status(self.lb_id) l7rus = self.get(self.l7rules_path, params={ 'id': ru1['id']}).json self.assertEqual(1, len(l7rus['rules'])) self.assertEqual(ru1['id'], l7rus['rules'][0]['id']) def test_get_all_tags_filter(self): rule1 = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api', tags=['test_tag1', 'test_tag2']).get(self.root_tag) self.set_lb_status(self.lb_id) rule2 = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie', tags=['test_tag2', 'test_tag3']).get(self.root_tag) self.set_lb_status(self.lb_id) rule3 = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com', tags=['test_tag4', 'test_tag5']).get(self.root_tag) self.set_lb_status(self.lb_id) rules = self.get( self.l7rules_path, params={'tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(2, len(rules)) self.assertEqual( [rule1.get('id'), rule2.get('id')], [rule.get('id') for rule in rules] ) rules = self.get( self.l7rules_path, params={'tags': ['test_tag2', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(1, len(rules)) self.assertEqual( [rule2.get('id')], [rule.get('id') for rule in rules] ) rules = self.get( self.l7rules_path, params={'tags-any': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(2, len(rules)) self.assertEqual( [rule1.get('id'), rule2.get('id')], [rule.get('id') for rule in rules] ) rules = self.get( self.l7rules_path, params={'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(1, len(rules)) self.assertEqual( [rule3.get('id')], [rule.get('id') for rule in rules] ) rules = self.get( self.l7rules_path, params={'not-tags-any': ['test_tag2', 'test_tag4']} ).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(0, len(rules)) rules = self.get( self.l7rules_path, params={'tags': 'test_tag2', 'tags-any': ['test_tag1', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(2, len(rules)) self.assertEqual( [rule1.get('id'), rule2.get('id')], [rule.get('id') for rule in rules] ) rules = self.get( self.l7rules_path, params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(rules, list) self.assertEqual(0, len(rules)) def test_empty_get_all(self): response = self.get(self.l7rules_path).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(0, len(response)) def test_get_all_hides_deleted(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) response = self.get(self.l7rules_path) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) self.set_object_status(self.l7rule_repo, api_l7rule.get('id'), provisioning_status=constants.DELETED) response = self.get(self.l7rules_path) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 0) def test_create_host_name_rule(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com').get(self.root_tag) self.assertEqual(constants.L7RULE_TYPE_HOST_NAME, api_l7rule.get('type')) self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, api_l7rule.get('compare_type')) self.assertEqual('www.example.com', api_l7rule.get('value')) self.assertIsNone(api_l7rule.get('key')) self.assertFalse(api_l7rule.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_CREATE, l7rule_op_status=constants.OFFLINE) def test_create_rule_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com').get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(constants.L7RULE_TYPE_HOST_NAME, api_l7rule.get('type')) self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, api_l7rule.get('compare_type')) self.assertEqual('www.example.com', api_l7rule.get('value')) self.assertIsNone(api_l7rule.get('key')) self.assertFalse(api_l7rule.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_CREATE, l7rule_op_status=constants.OFFLINE) def test_create_rule_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com', status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_l7rule) def test_create_l7policy_in_error(self): l7policy = self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REJECT) l7policy_id = l7policy.get('l7policy').get('id') self.set_lb_status(self.lb_id) self.set_object_status(self.l7policy_repo, l7policy_id, provisioning_status=constants.ERROR) api_l7rule = self.create_l7rule( l7policy_id, constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com', status=409) ref_msg = ('L7Policy %s is immutable and cannot be updated.' % l7policy_id) self.assertEqual(ref_msg, api_l7rule.get('faultstring')) def test_create_path_rule(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api', invert=True).get(self.root_tag) self.assertEqual(constants.L7RULE_TYPE_PATH, api_l7rule.get('type')) self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, api_l7rule.get('compare_type')) self.assertEqual('/api', api_l7rule.get('value')) self.assertIsNone(api_l7rule.get('key')) self.assertTrue(api_l7rule.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_CREATE, l7rule_op_status=constants.OFFLINE) def test_create_file_type_rule(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_FILE_TYPE, constants.L7RULE_COMPARE_TYPE_REGEX, 'jpg|png').get(self.root_tag) self.assertEqual(constants.L7RULE_TYPE_FILE_TYPE, api_l7rule.get('type')) self.assertEqual(constants.L7RULE_COMPARE_TYPE_REGEX, api_l7rule.get('compare_type')) self.assertEqual('jpg|png', api_l7rule.get('value')) self.assertIsNone(api_l7rule.get('key')) self.assertFalse(api_l7rule.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_CREATE, l7rule_op_status=constants.OFFLINE) def test_create_header_rule(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_HEADER, constants.L7RULE_COMPARE_TYPE_ENDS_WITH, '"some string"', key='Some-header').get(self.root_tag) self.assertEqual(constants.L7RULE_TYPE_HEADER, api_l7rule.get('type')) self.assertEqual(constants.L7RULE_COMPARE_TYPE_ENDS_WITH, api_l7rule.get('compare_type')) self.assertEqual('"some string"', api_l7rule.get('value')) self.assertEqual('Some-header', api_l7rule.get('key')) self.assertFalse(api_l7rule.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_CREATE, l7rule_op_status=constants.OFFLINE) def test_create_cookie_rule(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_COOKIE, constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', key='some-cookie').get(self.root_tag) self.assertEqual(constants.L7RULE_TYPE_COOKIE, api_l7rule.get('type')) self.assertEqual(constants.L7RULE_COMPARE_TYPE_CONTAINS, api_l7rule.get('compare_type')) self.assertEqual('some-value', api_l7rule.get('value')) self.assertEqual('some-cookie', api_l7rule.get('key')) self.assertFalse(api_l7rule.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_CREATE, l7rule_op_status=constants.OFFLINE) @mock.patch('octavia.common.constants.MAX_L7RULES_PER_L7POLICY', new=2) def test_create_too_many_rules(self): for i in range(0, constants.MAX_L7RULES_PER_L7POLICY): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) body = {'type': constants.L7RULE_TYPE_PATH, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, 'value': '/api'} self.post(self.l7rules_path, self._build_body(body), status=409) def test_bad_create(self): l7rule = {'name': 'test1'} self.post(self.l7rules_path, self._build_body(l7rule), status=400) def test_bad_create_host_name_rule(self): l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH} self.post(self.l7rules_path, self._build_body(l7rule), status=400) def test_bad_create_path_rule(self): l7rule = {'type': constants.L7RULE_TYPE_PATH, 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, 'value': 'bad string\\'} self.post(self.l7rules_path, self._build_body(l7rule), status=400) def test_bad_create_file_type_rule(self): l7rule = {'type': constants.L7RULE_TYPE_FILE_TYPE, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, 'value': 'png'} self.post(self.l7rules_path, self._build_body(l7rule), status=400) def test_bad_create_header_rule(self): l7rule = {'type': constants.L7RULE_TYPE_HEADER, 'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS, 'value': 'some-string'} self.post(self.l7rules_path, self._build_body(l7rule), status=400) def test_bad_create_cookie_rule(self): l7rule = {'type': constants.L7RULE_TYPE_COOKIE, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'key': 'bad cookie name', 'value': 'some-string'} self.post(self.l7rules_path, self._build_body(l7rule), status=400) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_bad_provider(self, mock_provider): mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') l7rule = {'compare_type': 'REGEX', 'invert': False, 'type': 'PATH', 'value': '/images*', 'admin_state_up': True} response = self.post(self.l7rules_path, self._build_body(l7rule), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_create_with_ssl_rule_types(self): test_mapping = { constants.L7RULE_TYPE_SSL_CONN_HAS_CERT: { 'value': 'tRuE', 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, constants.L7RULE_TYPE_SSL_VERIFY_RESULT: { 'value': '0', 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, constants.L7RULE_TYPE_SSL_DN_FIELD: { 'key': 'st-1', 'value': 'ST-FIELD1-PREFIX', 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH} } for l7rule_type, test_body in test_mapping.items(): self.set_lb_status(self.lb_id) test_body.update({'type': l7rule_type}) api_l7rule = self.create_l7rule( self.l7policy_id, l7rule_type, test_body['compare_type'], test_body['value'], key=test_body.get('key')).get(self.root_tag) self.assertEqual(l7rule_type, api_l7rule.get('type')) self.assertEqual(test_body['compare_type'], api_l7rule.get('compare_type')) self.assertEqual(test_body['value'], api_l7rule.get('value')) if test_body.get('key'): self.assertEqual(test_body['key'], api_l7rule.get('key')) self.assertFalse(api_l7rule.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_CREATE, l7rule_op_status=constants.OFFLINE) def _test_bad_cases_with_ssl_rule_types(self, is_create=True, rule_id=None): if is_create: req_func = self.post first_req_arg = self.l7rules_path else: req_func = self.put first_req_arg = self.l7rule_path.format(l7rule_id=rule_id) # test bad cases of L7RULE_TYPE_SSL_CONN_HAS_CERT l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'invert': False, 'type': constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, 'value': 'true', 'admin_state_up': True, 'key': 'no-need-key'} response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn('L7rule type {0} does not use the "key" field.'.format( constants.L7RULE_TYPE_SSL_CONN_HAS_CERT), response.get('faultstring')) l7rule.pop('key') l7rule['value'] = 'not-true-string' response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn( 'L7rule value {0} is not a boolean True string.'.format( l7rule['value']), response.get('faultstring')) l7rule['value'] = 'tRUe' l7rule['compare_type'] = constants.L7RULE_COMPARE_TYPE_STARTS_WITH response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn( 'L7rule type {0} only supports the {1} compare type.'.format( constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, constants.L7RULE_COMPARE_TYPE_EQUAL_TO), response.get('faultstring')) # test bad cases of L7RULE_TYPE_SSL_VERIFY_RES l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'invert': False, 'type': constants.L7RULE_TYPE_SSL_VERIFY_RESULT, 'value': 'true', 'admin_state_up': True, 'key': 'no-need-key'} response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn( 'L7rule type {0} does not use the "key" field.'.format( l7rule['type']), response.get('faultstring')) l7rule.pop('key') response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn( 'L7rule type {0} needs a int value, which is >= 0'.format( l7rule['type']), response.get('faultstring')) l7rule['value'] = '0' l7rule['compare_type'] = constants.L7RULE_COMPARE_TYPE_STARTS_WITH response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn( 'L7rule type {0} only supports the {1} compare type.'.format( l7rule['type'], constants.L7RULE_COMPARE_TYPE_EQUAL_TO), response.get('faultstring')) # test bad cases of L7RULE_TYPE_SSL_DN_FIELD l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, 'invert': False, 'type': constants.L7RULE_TYPE_SSL_DN_FIELD, 'value': 'bad regex\\', 'admin_state_up': True} # This case just test that fail to parse the regex from the value req_func(first_req_arg, self._build_body(l7rule), status=400).json l7rule['value'] = '^.test*$' response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn( 'L7rule type {0} needs to specify a key and a value.'.format( l7rule['type']), response.get('faultstring')) l7rule['key'] = 'NOT_SUPPORTED_DN_FIELD' response = req_func(first_req_arg, self._build_body(l7rule), status=400).json self.assertIn('Invalid L7rule distinguished name field.', response.get('faultstring')) def test_create_bad_cases_with_ssl_rule_types(self): self._test_bad_cases_with_ssl_rule_types() def test_update(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api', tags=['old_tag']).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7rule = {'value': '/images', 'tags': ['new_tag']} response = self.put(self.l7rule_path.format( l7rule_id=api_l7rule.get('id')), self._build_body(new_l7rule)).json.get(self.root_tag) self.assertEqual('/images', response.get('value')) self.assertEqual(['new_tag'], response['tags']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_UPDATE) def test_update_authorized(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) new_l7rule = {'value': '/images'} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.put(self.l7rule_path.format( l7rule_id=api_l7rule.get('id')), self._build_body(new_l7rule)).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual('/images', response.get('value')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_UPDATE) def test_update_not_authorized(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) new_l7rule = {'value': '/images'} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): response = self.put(self.l7rule_path.format( l7rule_id=api_l7rule.get('id')), self._build_body(new_l7rule), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, l7policy_prov_status=constants.ACTIVE, l7rule_prov_status=constants.ACTIVE) def test_bad_update(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) new_l7rule = {'type': 'bad type'} self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), self._build_body(new_l7rule), status=400) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) new_l7rule = {'value': '/images'} mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.put( self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), self._build_body(new_l7rule), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_update_with_invalid_rule(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) new_l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, 'value': 'bad string\\'} self.put(self.l7rule_path.format( l7rule_id=api_l7rule.get('id')), self._build_body(new_l7rule), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), l7rule_prov_status=constants.ACTIVE) def test_update_with_ssl_rule_types(self): test_mapping = { constants.L7RULE_TYPE_SSL_CONN_HAS_CERT: { 'value': 'tRuE', 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, constants.L7RULE_TYPE_SSL_VERIFY_RESULT: { 'value': '0', 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, constants.L7RULE_TYPE_SSL_DN_FIELD: { 'key': 'st-1', 'value': 'ST-FIELD1-PREFIX', 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH} } for l7rule_type, test_body in test_mapping.items(): self.set_lb_status(self.lb_id) api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) test_body.update({'type': l7rule_type}) response = self.put(self.l7rule_path.format( l7rule_id=api_l7rule.get('id')), self._build_body(test_body)).json.get(self.root_tag) self.assertEqual(l7rule_type, response.get('type')) self.assertEqual(test_body['compare_type'], response.get('compare_type')) self.assertEqual(test_body['value'], response.get('value')) if test_body.get('key'): self.assertEqual(test_body['key'], response.get('key')) self.assertFalse(response.get('invert')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=response.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_UPDATE) def test_update_bad_cases_with_ssl_rule_types(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self._test_bad_cases_with_ssl_rule_types( is_create=False, rule_id=api_l7rule.get('id')) def test_update_invert_none(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api', tags=['old_tag'], invert=True).get(self.root_tag) self.set_lb_status(self.lb_id) new_l7rule = {'invert': None} response = self.put(self.l7rule_path.format( l7rule_id=api_l7rule.get('id')), self._build_body(new_l7rule)).json.get(self.root_tag) self.assertFalse(response.get('invert')) def test_delete(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7rule['provisioning_status'] = constants.ACTIVE api_l7rule['operating_status'] = constants.ONLINE api_l7rule.pop('updated_at') response = self.get(self.l7rule_path.format( l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_l7rule, response) self.delete(self.l7rule_path.format(l7rule_id=api_l7rule.get('id'))) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_DELETE) self.set_lb_status(self.lb_id) def test_delete_authorized(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7rule['provisioning_status'] = constants.ACTIVE api_l7rule['operating_status'] = constants.ONLINE api_l7rule.pop('updated_at') response = self.get(self.l7rule_path.format( l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_l7rule, response) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete( self.l7rule_path.format(l7rule_id=api_l7rule.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, l7policy_prov_status=constants.PENDING_UPDATE, l7rule_prov_status=constants.PENDING_DELETE) self.set_lb_status(self.lb_id) def test_delete_not_authorized(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7rule['provisioning_status'] = constants.ACTIVE api_l7rule['operating_status'] = constants.ONLINE api_l7rule.pop('updated_at') response = self.get(self.l7rule_path.format( l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_l7rule, response) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): self.delete( self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, l7policy_prov_status=constants.ACTIVE, l7rule_prov_status=constants.ACTIVE) def test_bad_delete(self): self.delete(self.l7rule_path.format( l7rule_id=uuidutils.generate_uuid()), status=404) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_bad_provider(self, mock_provider): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_l7rule['provisioning_status'] = constants.ACTIVE api_l7rule['operating_status'] = constants.ONLINE response = self.get(self.l7rule_path.format( l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) self.assertIsNone(api_l7rule.pop('updated_at')) self.assertIsNotNone(response.pop('updated_at')) self.assertEqual(api_l7rule, response) mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') self.delete(self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), status=500) def test_create_when_lb_pending_update(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) new_l7rule = {'type': constants.L7RULE_TYPE_PATH, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'value': '/api'} self.post(self.l7rules_path, body=self._build_body(new_l7rule), status=409) def test_update_when_lb_pending_update(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) new_l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, 'value': '.*.example.com'} self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), body=self._build_body(new_l7rule), status=409) def test_delete_when_lb_pending_update(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) self.delete(self.l7rule_path.format(l7rule_id=l7rule.get('id')), status=409) def test_create_when_lb_pending_delete(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) new_l7rule = {'type': constants.L7RULE_TYPE_HEADER, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, 'value': 'some-string', 'key': 'Some-header'} self.post(self.l7rules_path, body=self._build_body(new_l7rule), status=409) def test_update_when_lb_pending_delete(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) new_l7rule = {'type': constants.L7RULE_TYPE_COOKIE, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'value': 'some-string', 'key': 'some-cookie'} self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), body=self._build_body(new_l7rule), status=409) def test_delete_when_lb_pending_delete(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) self.delete(self.l7rule_path.format(l7rule_id=l7rule.get('id')), status=409) def test_update_already_deleted(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) new_l7rule = {'type': constants.L7RULE_TYPE_COOKIE, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'value': 'some-string', 'key': 'some-cookie'} self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), body=self._build_body(new_l7rule), status=404) def test_delete_already_deleted(self): l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api').get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) self.delete(self.l7rule_path.format(l7rule_id=l7rule.get('id')), status=404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_listener.py0000664000175000017500000040665600000000000024503 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import random from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api from octavia.tests.common import constants as c_const from octavia.tests.common import sample_certs from octavia.tests.functional.api.v2 import base class TestListener(base.BaseAPITest): root_tag = 'listener' root_tag_list = 'listeners' root_tag_links = 'listeners_links' def setUp(self): super(TestListener, self).setUp() self.lb = self.create_load_balancer(uuidutils.generate_uuid()) self.lb_id = self.lb.get('loadbalancer').get('id') self.project_id = self.lb.get('loadbalancer').get('project_id') self.set_lb_status(self.lb_id) self.listener_path = self.LISTENERS_PATH + '/{listener_id}' self.pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) self.pool_id = self.pool.get('pool').get('id') self.set_lb_status(self.lb_id) def test_get_all_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) listener1 = self.create_listener( constants.PROTOCOL_HTTP, 80, lb1_id, tags=['test_tag1']).get(self.root_tag) self.set_lb_status(lb1_id) listener2 = self.create_listener( constants.PROTOCOL_HTTP, 81, lb1_id, tags=['test_tag2']).get(self.root_tag) self.set_lb_status(lb1_id) listener3 = self.create_listener( constants.PROTOCOL_HTTP, 82, lb1_id, tags=['test_tag3']).get(self.root_tag) self.set_lb_status(lb1_id) listeners = self.get(self.LISTENERS_PATH).json.get(self.root_tag_list) self.assertEqual(3, len(listeners)) listener_id_ports = [(li.get('id'), li.get('protocol_port'), li.get('tags')) for li in listeners] self.assertIn((listener1.get('id'), listener1.get('protocol_port'), listener1.get('tags')), listener_id_ports) self.assertIn((listener2.get('id'), listener2.get('protocol_port'), listener2.get('tags')), listener_id_ports) self.assertIn((listener3.get('id'), listener3.get('protocol_port'), listener3.get('tags')), listener_id_ports) def test_get_all_non_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 81, lb1_id) self.set_lb_status(lb1_id) listener3 = self.create_listener(constants.PROTOCOL_HTTP, 82, self.lb_id).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', listener3['project_id']): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): listeners = self.get( self.LISTENERS_PATH).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(1, len(listeners)) listener_id_ports = [(li.get('id'), li.get('protocol_port')) for li in listeners] self.assertIn((listener3.get('id'), listener3.get('protocol_port')), listener_id_ports) def test_get_all_unscoped_token(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 81, lb1_id) self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 82, self.lb_id).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', None): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': None} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.LISTENERS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, result) def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) listener1 = self.create_listener( constants.PROTOCOL_HTTP, 80, lb1_id).get(self.root_tag) self.set_lb_status(lb1_id) listener2 = self.create_listener( constants.PROTOCOL_HTTP, 81, lb1_id).get(self.root_tag) self.set_lb_status(lb1_id) listener3 = self.create_listener( constants.PROTOCOL_HTTP, 82, lb1_id).get(self.root_tag) self.set_lb_status(lb1_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_global_observer'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): listeners = self.get(self.LISTENERS_PATH) listeners = listeners.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(3, len(listeners)) listener_id_ports = [(li.get('id'), li.get('protocol_port')) for li in listeners] self.assertIn((listener1.get('id'), listener1.get('protocol_port')), listener_id_ports) self.assertIn((listener2.get('id'), listener2.get('protocol_port')), listener_id_ports) self.assertIn((listener3.get('id'), listener3.get('protocol_port')), listener_id_ports) def test_get_all_not_authorized(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id) self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 81, lb1_id) self.set_lb_status(lb1_id) self.create_listener(constants.PROTOCOL_HTTP, 82, self.lb_id).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): listeners = self.get(self.LISTENERS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, listeners) def test_get_all_by_project_id(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project1_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project2_id) lb2_id = lb2.get('loadbalancer').get('id') self.set_lb_status(lb2_id) listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id, name='listener1').get(self.root_tag) self.set_lb_status(lb1_id) listener2 = self.create_listener(constants.PROTOCOL_HTTP, 81, lb1_id, name='listener2').get(self.root_tag) self.set_lb_status(lb1_id) listener3 = self.create_listener(constants.PROTOCOL_HTTP, 82, lb2_id, name='listener3').get(self.root_tag) self.set_lb_status(lb2_id) listeners = self.get( self.LISTENERS_PATH, params={'project_id': project1_id}).json.get(self.root_tag_list) self.assertEqual(2, len(listeners)) listener_id_ports = [(li.get('id'), li.get('protocol_port')) for li in listeners] self.assertIn((listener1.get('id'), listener1.get('protocol_port')), listener_id_ports) self.assertIn((listener2.get('id'), listener2.get('protocol_port')), listener_id_ports) listeners = self.get( self.LISTENERS_PATH, params={'project_id': project2_id}).json.get(self.root_tag_list) listener_id_ports = [(li.get('id'), li.get('protocol_port')) for li in listeners] self.assertEqual(1, len(listeners)) self.assertIn((listener3.get('id'), listener3.get('protocol_port')), listener_id_ports) def test_get_all_sorted(self): self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id, name='listener1') self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 81, self.lb_id, name='listener2') self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 82, self.lb_id, name='listener3') self.set_lb_status(self.lb_id) response = self.get(self.LISTENERS_PATH, params={'sort': 'name:desc'}) listeners_desc = response.json.get(self.root_tag_list) response = self.get(self.LISTENERS_PATH, params={'sort': 'name:asc'}) listeners_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(listeners_desc)) self.assertEqual(3, len(listeners_asc)) listener_id_names_desc = [(listener.get('id'), listener.get('name')) for listener in listeners_desc] listener_id_names_asc = [(listener.get('id'), listener.get('name')) for listener in listeners_asc] self.assertEqual(listener_id_names_asc, list(reversed(listener_id_names_desc))) def test_get_all_limited(self): self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id, name='listener1') self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 81, self.lb_id, name='listener2') self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 82, self.lb_id, name='listener3') self.set_lb_status(self.lb_id) # First two -- should have 'next' link first_two = self.get(self.LISTENERS_PATH, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.LISTENERS_PATH, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.LISTENERS_PATH, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id, name='listener1') self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 81, self.lb_id, name='listener2') self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 82, self.lb_id, name='listener3') self.set_lb_status(self.lb_id) lis = self.get(self.LISTENERS_PATH, params={ 'fields': ['id', 'project_id']}).json for li in lis['listeners']: self.assertIn(u'id', li) self.assertIn(u'project_id', li) self.assertNotIn(u'description', li) def test_get_one_fields_filter(self): listener1 = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id, name='listener1').get(self.root_tag) self.set_lb_status(self.lb_id) li = self.get( self.LISTENER_PATH.format(listener_id=listener1.get('id')), params={'fields': ['id', 'project_id']}).json.get(self.root_tag) self.assertIn(u'id', li) self.assertIn(u'project_id', li) self.assertNotIn(u'description', li) def test_get_all_filter(self): li1 = self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id, name='listener1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 81, self.lb_id, name='listener2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 82, self.lb_id, name='listener3').get(self.root_tag) self.set_lb_status(self.lb_id) lis = self.get(self.LISTENERS_PATH, params={ 'id': li1['id']}).json self.assertEqual(1, len(lis['listeners'])) self.assertEqual(li1['id'], lis['listeners'][0]['id']) def test_get_all_tags_filter(self): listener1 = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id, name='listener1', tags=['test_tag1', 'test_tag2'] ).get(self.root_tag) self.set_lb_status(self.lb_id) listener2 = self.create_listener( constants.PROTOCOL_HTTP, 81, self.lb_id, name='listener2', tags=['test_tag2', 'test_tag3'] ).get(self.root_tag) self.set_lb_status(self.lb_id) listener3 = self.create_listener( constants.PROTOCOL_HTTP, 82, self.lb_id, name='listener3', tags=['test_tag4', 'test_tag5'] ).get(self.root_tag) self.set_lb_status(self.lb_id) listeners = self.get( self.LISTENERS_PATH, params={'tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(listeners, list) self.assertEqual(2, len(listeners)) self.assertEqual( [listener1.get('id'), listener2.get('id')], [listener.get('id') for listener in listeners] ) listeners = self.get( self.LISTENERS_PATH, params={'tags': ['test_tag2', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(listeners, list) self.assertEqual(1, len(listeners)) self.assertEqual( [listener2.get('id')], [listener.get('id') for listener in listeners] ) listeners = self.get( self.LISTENERS_PATH, params={'tags-any': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(listeners, list) self.assertEqual(2, len(listeners)) self.assertEqual( [listener1.get('id'), listener2.get('id')], [listener.get('id') for listener in listeners] ) listeners = self.get( self.LISTENERS_PATH, params={'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(listeners, list) self.assertEqual(1, len(listeners)) self.assertEqual( [listener3.get('id')], [listener.get('id') for listener in listeners] ) listeners = self.get( self.LISTENERS_PATH, params={'not-tags-any': ['test_tag2', 'test_tag4']} ).json.get(self.root_tag_list) self.assertIsInstance(listeners, list) self.assertEqual(0, len(listeners)) listeners = self.get( self.LISTENERS_PATH, params={'tags': 'test_tag2', 'tags-any': ['test_tag1', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(listeners, list) self.assertEqual(2, len(listeners)) self.assertEqual( [listener1.get('id'), listener2.get('id')], [listener.get('id') for listener in listeners] ) listeners = self.get( self.LISTENERS_PATH, params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(listeners, list) self.assertEqual(0, len(listeners)) def test_get_all_hides_deleted(self): api_listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) response = self.get(self.LISTENERS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) self.set_object_status(self.listener_repo, api_listener.get('id'), provisioning_status=constants.DELETED) response = self.get(self.LISTENERS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 0) def test_get(self): listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) response = self.get(self.listener_path.format( listener_id=listener['id'])) api_listener = response.json.get(self.root_tag) self.assertEqual(listener, api_listener) self.assertEqual([], api_listener['tags']) def test_get_authorized(self): listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.listener_path.format( listener_id=listener['id'])) api_listener = response.json.get(self.root_tag) self.assertEqual(listener, api_listener) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_get_not_authorized(self): listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.listener_path.format( listener_id=listener['id']), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_deleted_gives_404(self): api_listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) self.set_object_status(self.listener_repo, api_listener.get('id'), provisioning_status=constants.DELETED) self.get(self.LISTENER_PATH.format(listener_id=api_listener.get('id')), status=404) def test_get_bad_listener_id(self): listener_path = self.listener_path self.get(listener_path.format(listener_id='SEAN-CONNERY'), status=404) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create(self, mock_cert_data, response_status=201, **optionals): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() lb_listener = {'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'connection_limit': 10, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'insert_headers': {}, 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'tags': ['test_tag']} lb_listener.update(optionals) body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body, status=response_status) if response_status >= 300: return response listener_api = response.json['listener'] extra_expects = {'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE} lb_listener.update(extra_expects) self.assertTrue(uuidutils.is_uuid_like(listener_api.get('id'))) for key, value in optionals.items(): self.assertEqual(value, lb_listener.get(key)) lb_listener['id'] = listener_api.get('id') lb_listener.pop('sni_container_refs') sni_ex = [sni1, sni2] sni_resp = listener_api.pop('sni_container_refs') self.assertEqual(2, len(sni_resp)) for sni in sni_resp: self.assertIn(sni, sni_ex) self.assertIsNotNone(listener_api.pop('created_at')) self.assertIsNone(listener_api.pop('updated_at')) self.assertEqual(['test_tag'], listener_api['tags']) self.assertNotEqual(lb_listener, listener_api) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, listener_api.get('id')) return listener_api def test_create_with_timeouts(self): optionals = { 'timeout_client_data': 1, 'timeout_member_connect': 2, 'timeout_member_data': constants.MIN_TIMEOUT, 'timeout_tcp_inspect': constants.MAX_TIMEOUT, } listener_api = self.test_create(**optionals) self.assertEqual(1, listener_api.get('timeout_client_data')) self.assertEqual(2, listener_api.get('timeout_member_connect')) self.assertEqual(constants.MIN_TIMEOUT, listener_api.get('timeout_member_data')) self.assertEqual(constants.MAX_TIMEOUT, listener_api.get('timeout_tcp_inspect')) def test_create_with_default_timeouts(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='haproxy_amphora', timeout_client_data=20) self.conf.config(group='haproxy_amphora', timeout_member_connect=21) self.conf.config(group='haproxy_amphora', timeout_member_data=constants.MIN_TIMEOUT) self.conf.config(group='haproxy_amphora', timeout_tcp_inspect=constants.MAX_TIMEOUT) listener_api = self.test_create() self.assertEqual(20, listener_api.get('timeout_client_data')) self.assertEqual(21, listener_api.get('timeout_member_connect')) self.assertEqual(constants.MIN_TIMEOUT, listener_api.get('timeout_member_data')) self.assertEqual(constants.MAX_TIMEOUT, listener_api.get('timeout_tcp_inspect')) def test_create_with_timeouts_too_high(self): optionals = { 'timeout_client_data': 1, 'timeout_member_connect': 1, 'timeout_member_data': 1, 'timeout_tcp_inspect': 1, } for field in optionals.items(): optionals.update({field[0]: constants.MAX_TIMEOUT + 1}) resp = self.test_create(response_status=400, **optionals).json optionals.update({field[0]: 1}) fault = resp.get('faultstring') self.assertIn( 'Invalid input for field/attribute {0}'.format( field[0]), fault) self.assertIn( 'Value should be lower or equal to {0}'.format( constants.MAX_TIMEOUT), fault) def test_create_with_timeouts_too_low(self): optionals = { 'timeout_client_data': 1, 'timeout_member_connect': 2, 'timeout_member_data': 3, 'timeout_tcp_inspect': constants.MIN_TIMEOUT - 1, } resp = self.test_create(response_status=400, **optionals).json fault = resp.get('faultstring') self.assertIn( 'Invalid input for field/attribute timeout_tcp_inspect', fault) self.assertIn( 'Value should be greater or equal to {0}'.format( constants.MIN_TIMEOUT), fault) def test_create_udp_case(self): api_listener = self.create_listener(constants.PROTOCOL_UDP, 6666, self.lb_id).get(self.root_tag) self.assertEqual(constants.PROTOCOL_UDP, api_listener.get('protocol')) self.assertEqual(6666, api_listener.get('protocol_port')) self.assert_correct_status( lb_id=self.lb_id, listener_id=api_listener.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_CREATE, listener_op_status=constants.OFFLINE) def test_negative_create_udp_case(self): sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() req_dict = {'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_UDP, 'protocol_port': 6666, 'connection_limit': 10, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'insert_headers': {}, 'loadbalancer_id': self.lb_id} expect_error_msg = ("Validation failure: %s protocol listener does " "not support TLS.") % constants.PROTOCOL_UDP res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) # Default pool protocol is udp which is different with listener # protocol. udp_pool_id = self.create_pool( self.lb_id, constants.PROTOCOL_UDP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') self.set_lb_status(self.lb_id) lb_listener = {'name': 'listener1', 'default_pool_id': udp_pool_id, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} expect_error_msg = ("Validation failure: The pool protocol '%s' is " "invalid while the listener protocol is '%s'.") % ( constants.PROTOCOL_UDP, lb_listener['protocol']) res = self.post(self.LISTENERS_PATH, self._build_body(lb_listener), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) def test_create_duplicate_fails(self): self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id, status=409) def test_create_bad_tls_ref(self): sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() tls_ref = uuidutils.generate_uuid() lb_listener = {'name': 'listener1', 'default_pool_id': None, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'sni_container_refs': [sni1, sni2], 'default_tls_container_ref': tls_ref, 'loadbalancer_id': self.lb_id} body = self._build_body(lb_listener) self.cert_manager_mock().get_cert.side_effect = [ Exception("bad cert"), None, Exception("bad_cert")] response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertIn(sni1, response['faultstring']) self.assertNotIn(sni2, response['faultstring']) self.assertIn(tls_ref, response['faultstring']) def test_create_with_certs_not_terminated_https(self): optionals = { 'default_tls_container_ref': uuidutils.generate_uuid(), 'protocol': constants.PROTOCOL_TCP } resp = self.test_create(response_status=400, **optionals).json fault = resp.get('faultstring') self.assertIn( 'Certificate container references are only allowed on ', fault) self.assertIn( '{} protocol listeners.'.format( constants.PROTOCOL_TERMINATED_HTTPS), fault) def test_create_without_certs_if_terminated_https(self): optionals = { 'default_tls_container_ref': None, 'sni_container_refs': None, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS } resp = self.test_create(response_status=400, **optionals).json fault = resp.get('faultstring') self.assertIn( 'An SNI or default certificate container reference must ', fault) self.assertIn( 'be provided for {} protocol listeners.'.format( constants.PROTOCOL_TERMINATED_HTTPS), fault) def test_create_client_ca_cert_without_tls_cert(self): optionals = { 'default_tls_container_ref': None, 'sni_container_refs': None, 'client_ca_tls_container_ref': uuidutils.generate_uuid(), 'protocol': constants.PROTOCOL_TERMINATED_HTTPS } resp = self.test_create(response_status=400, **optionals).json fault = resp.get('faultstring') self.assertIn( 'An SNI or default certificate container reference must ', fault) self.assertIn( 'be provided with a client CA container reference.', fault) def test_create_crl_without_ca_cert(self): optionals = { 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'client_ca_tls_container_ref': None, 'client_crl_container_ref': uuidutils.generate_uuid() } resp = self.test_create(response_status=400, **optionals).json fault = resp.get('faultstring') self.assertIn( 'A client authentication CA reference is required to specify a ' 'client authentication revocation list.', fault) def test_create_with_default_pool_id(self): lb_listener = {'name': 'listener1', 'default_pool_id': self.pool_id, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body) api_listener = response.json['listener'] self.assertEqual(api_listener.get('default_pool_id'), self.pool_id) def test_create_with_bad_default_pool_id(self): lb_listener = {'name': 'listener1', 'default_pool_id': uuidutils.generate_uuid(), 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} body = self._build_body(lb_listener) self.post(self.LISTENERS_PATH, body, status=404) def test_create_with_shared_default_pool_id(self): lb_listener1 = {'name': 'listener1', 'default_pool_id': self.pool_id, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} lb_listener2 = {'name': 'listener2', 'default_pool_id': self.pool_id, 'description': 'desc2', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 81, 'loadbalancer_id': self.lb_id} body1 = self._build_body(lb_listener1) body2 = self._build_body(lb_listener2) listener1 = self.post(self.LISTENERS_PATH, body1).json['listener'] self.set_lb_status(self.lb_id, constants.ACTIVE) listener2 = self.post(self.LISTENERS_PATH, body2).json['listener'] self.assertEqual(listener1['default_pool_id'], self.pool_id) self.assertEqual(listener1['default_pool_id'], listener2['default_pool_id']) def test_create_with_project_id(self): self.test_create(project_id=self.project_id) def test_create_defaults(self): defaults = {'name': None, 'default_pool_id': None, 'description': None, 'admin_state_up': True, 'connection_limit': None, 'default_tls_container_ref': None, 'sni_container_refs': [], 'project_id': None, 'insert_headers': {}} lb_listener = {'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body) listener_api = response.json['listener'] extra_expects = {'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE} lb_listener.update(extra_expects) lb_listener.update(defaults) self.assertTrue(uuidutils.is_uuid_like(listener_api.get('id'))) lb_listener['id'] = listener_api.get('id') self.assertIsNotNone(listener_api.pop('created_at')) self.assertIsNone(listener_api.pop('updated_at')) self.assertNotEqual(lb_listener, listener_api) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, listener_api['id']) def test_create_over_quota(self): self.start_quota_mock(data_models.Listener) lb_listener = {'name': 'listener1', 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} body = self._build_body(lb_listener) self.post(self.LISTENERS_PATH, body, status=403) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_bad_provider(self, mock_provider): mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') lb_listener = {'name': 'listener1', 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body, status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_create_authorized(self, **optionals): lb_listener = {'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'default_tls_container_ref': None, 'sni_container_refs': None, 'insert_headers': {}, 'project_id': self.project_id, 'loadbalancer_id': self.lb_id} lb_listener.update(optionals) body = self._build_body(lb_listener) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.post(self.LISTENERS_PATH, body) self.conf.config(group='api_settings', auth_strategy=auth_strategy) listener_api = response.json['listener'] extra_expects = {'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE} lb_listener.update(extra_expects) self.assertTrue(uuidutils.is_uuid_like(listener_api.get('id'))) for key, value in optionals.items(): self.assertEqual(value, lb_listener.get(key)) lb_listener['id'] = listener_api.get('id') self.assertIsNotNone(listener_api.pop('created_at')) self.assertIsNone(listener_api.pop('updated_at')) self.assertNotEqual(lb_listener, listener_api) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, listener_api.get('id')) def test_create_not_authorized(self, **optionals): lb_listener = {'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'default_tls_container_ref': None, 'sni_container_refs': None, 'insert_headers': {}, 'project_id': self.project_id, 'loadbalancer_id': self.lb_id} lb_listener.update(optionals) body = self._build_body(lb_listener) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.post(self.LISTENERS_PATH, body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_create_with_ca_cert(self): self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) optionals = { 'client_ca_tls_container_ref': uuidutils.generate_uuid() } listener_api = self.test_create(**optionals) self.assertEqual(optionals['client_ca_tls_container_ref'], listener_api.get('client_ca_tls_container_ref')) self.assertEqual(constants.CLIENT_AUTH_NONE, listener_api.get('client_authentication')) def test_create_with_ca_cert_and_option(self): self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) optionals = { 'client_ca_tls_container_ref': uuidutils.generate_uuid(), 'client_authentication': constants.CLIENT_AUTH_MANDATORY } listener_api = self.test_create(**optionals) self.assertEqual(optionals['client_ca_tls_container_ref'], listener_api.get('client_ca_tls_container_ref')) self.assertEqual(optionals['client_authentication'], listener_api.get('client_authentication')) def test_create_with_ca_cert_and_crl(self): # Load up sample certs to test the validation self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] optionals = { 'client_ca_tls_container_ref': uuidutils.generate_uuid(), 'client_crl_container_ref': uuidutils.generate_uuid() } listener_api = self.test_create(**optionals) self.assertEqual(optionals['client_ca_tls_container_ref'], listener_api.get('client_ca_tls_container_ref')) self.assertEqual(constants.CLIENT_AUTH_NONE, listener_api.get('client_authentication')) self.assertEqual(optionals['client_crl_container_ref'], listener_api.get('client_crl_container_ref')) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_with_crl_mismatch_ca_cert(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CERT, sample_certs.X509_CA_CRL] sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() lb_listener = { 'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'client_ca_tls_container_ref': uuidutils.generate_uuid(), 'client_crl_container_ref': uuidutils.generate_uuid() } body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertEqual( "Validation failure: The CRL specified is not valid for client " "certificate authority reference supplied.", response['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_with_ca_cert_negative_cases(self, mock_load_cert): # create just with option or crl, # no client_ca_tls_container_ref specified. sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() for opt in [{'client_authentication': constants.CLIENT_AUTH_MANDATORY, 'client_crl_container_ref': uuidutils.generate_uuid()}, {'client_authentication': constants.CLIENT_AUTH_OPTIONAL, 'client_crl_container_ref': uuidutils.generate_uuid()}]: lb_listener = { 'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'project_id': self.project_id, 'loadbalancer_id': self.lb_id} lb_listener.update(opt) body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertEqual( "Validation failure: Client authentication setting %s " "requires a client CA container reference." % opt['client_authentication'], response['faultstring']) def test_create_with_bad_ca_cert_ref(self): sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() lb_listener = { 'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'client_ca_tls_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) self.cert_manager_mock().get_cert.side_effect = [ 'cert 1', 'cert 2', 'cert 3'] self.cert_manager_mock().get_secret.side_effect = [ Exception('bad ca cert')] response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertEqual("Could not retrieve certificate: ['%s']" % lb_listener['client_ca_tls_container_ref'], response['faultstring']) def test_create_with_unreachable_crl(self): sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() lb_listener = { 'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'client_ca_tls_container_ref': uuidutils.generate_uuid(), 'client_crl_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) self.cert_manager_mock().get_secret.side_effect = Exception( 'bad CRL ref') response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertIn(lb_listener['client_crl_container_ref'], response['faultstring']) def test_create_with_bad_ca_cert(self): sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() lb_listener = { 'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'client_ca_tls_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) self.cert_manager_mock().get_cert.side_effect = [ 'cert 1', 'cert 2', 'cert 3'] self.cert_manager_mock().get_secret.return_value = 'bad cert' response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertIn("The client authentication CA certificate is invalid. " "It must be a valid x509 PEM format certificate.", response['faultstring']) def _test_create_with_allowed_cidrs(self, allowed_cidrs): listener = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id, allowed_cidrs=allowed_cidrs) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) get_listener = self.get(listener_path).json['listener'] self.assertEqual(allowed_cidrs, get_listener.get('allowed_cidrs')) def test_create_with_allowed_cidrs_ipv4(self): allowed_cidrs = ['10.0.1.0/24', '172.16.55.0/25'] self._test_create_with_allowed_cidrs(allowed_cidrs) def test_create_with_allowed_cidrs_ipv6(self): allowed_cidrs = ['2001:db8:a0b:12f0::/64', '2a02:8071:69e::/64'] with mock.patch('octavia.db.repositories.VipRepository.' 'get') as repo_mock: repo_mock.return_value.ip_address = "2001:db9:a1b:13f0::1" self._test_create_with_allowed_cidrs(allowed_cidrs) def test_create_with_bad_allowed_cidrs(self): allowed_cidrs = [u'10.0.1.0/33', u'172.16.55.1.0/25'] lb_listener = { 'protocol': constants.PROTOCOL_TCP, 'protocol_port': 80, 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'allowed_cidrs': allowed_cidrs} body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertIn("Invalid input for field/attribute allowed_cidrs. " "Value: '%s'. Value should be IPv4 or IPv6 CIDR format" % allowed_cidrs, response['faultstring']) def test_create_with_incompatible_allowed_cidrs_ipv6(self): lb_listener = { 'protocol': constants.PROTOCOL_TCP, 'protocol_port': 80, 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'allowed_cidrs': ['2001:db8:a0b:12f0::/64']} body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertIn("Validation failure: CIDR 2001:db8:a0b:12f0::/64 IP " "version incompatible with VIP 198.0.2.5 IP version.", response['faultstring']) def test_create_with_incompatible_allowed_cidrs_ipv4(self): lb_listener = { 'protocol': constants.PROTOCOL_TCP, 'protocol_port': 80, 'project_id': self.project_id, 'loadbalancer_id': self.lb_id, 'allowed_cidrs': ['10.0.1.0/24']} with mock.patch('octavia.db.repositories.VipRepository.' 'get') as repo_mock: repo_mock.return_value.ip_address = "2001:db9:a1b:13f0::1" body = self._build_body(lb_listener) response = self.post(self.LISTENERS_PATH, body, status=400).json self.assertIn("Validation failure: CIDR 10.0.1.0/24 IP version " "incompatible with VIP 2001:db9:a1b:13f0::1 IP " "version.", response['faultstring']) def test_create_with_duplicated_allowed_cidrs(self): allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24', '10.0.2.0/24'] self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id, allowed_cidrs=allowed_cidrs) def _test_negative_create_with_headers(self, protocol): req_dict = {'name': 'listener1', 'default_pool_id': None, 'description': 'desc1', 'admin_state_up': False, 'protocol': protocol, 'protocol_port': 6666, 'connection_limit': 10, 'insert_headers': { "X-Forwarded-Port": "true", "X-Forwarded-For": "true"}, 'loadbalancer_id': self.lb_id} res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), status=400) self.assertIn(protocol, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) def test_negative_create_HTTPS_with_headers(self): self._test_negative_create_with_headers(constants.PROTOCOL_HTTPS) def test_negative_create_PROXY_with_headers(self): self._test_negative_create_with_headers(constants.PROTOCOL_PROXY) def test_negative_create_TCP_with_headers(self): self._test_negative_create_with_headers(constants.PROTOCOL_TCP) def test_negative_create_UDP_with_headers(self): self._test_negative_create_with_headers(constants.PROTOCOL_UDP) def test_update_allowed_cidrs(self): allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24'] new_cidrs = ['10.0.1.0/24', '10.0.3.0/24'] listener = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id, allowed_cidrs=allowed_cidrs) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) lb_listener = {'allowed_cidrs': new_cidrs} body = self._build_body(lb_listener) response = self.put(listener_path, body).json.get(self.root_tag) self.assertEqual(new_cidrs, response.get('allowed_cidrs')) def test_update_unset_allowed_cidrs(self): allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24'] listener = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id, allowed_cidrs=allowed_cidrs) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) lb_listener = {'allowed_cidrs': None} body = self._build_body(lb_listener) api_listener = self.put(listener_path, body).json.get(self.root_tag) self.assertIsNone(api_listener.get('allowed_cidrs')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_listener = {'name': 'new_name'} mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.put( self.LISTENER_PATH.format(listener_id=api_listener.get('id')), self._build_body(new_listener), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_bad_provider(self, mock_provider): api_listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_listener['provisioning_status'] = constants.ACTIVE api_listener['operating_status'] = constants.ONLINE response = self.get(self.LISTENER_PATH.format( listener_id=api_listener.get('id'))).json.get(self.root_tag) self.assertIsNone(api_listener.pop('updated_at')) self.assertIsNotNone(response.pop('updated_at')) self.assertEqual(api_listener, response) mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') self.delete(self.LISTENER_PATH.format( listener_id=api_listener.get('id')), status=500) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update(self, mock_cert_data, **options): cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) tls_uuid = uuidutils.generate_uuid() ca_tls_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=tls_uuid, default_pool_id=None, tags=['old_tag'], client_ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) ori_listener = copy.deepcopy(listener) self.set_lb_status(self.lb_id) new_listener = {'name': 'listener2', 'admin_state_up': True, 'default_pool_id': self.pool_id, 'timeout_client_data': 1, 'timeout_member_connect': 2, 'timeout_member_data': 3, 'timeout_tcp_inspect': 4, 'tags': ['new_tag']} new_listener.update(options) body = self._build_body(new_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) api_listener = self.put(listener_path, body).json.get(self.root_tag) update_expect = {'provisioning_status': constants.PENDING_UPDATE, 'operating_status': constants.ONLINE} update_expect.update(new_listener) listener.update(update_expect) self.assertEqual(listener['created_at'], api_listener['created_at']) self.assertNotEqual(listener['updated_at'], api_listener['updated_at']) self.assertEqual(['new_tag'], api_listener['tags']) self.assertNotEqual(listener, api_listener) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, api_listener['id']) return ori_listener, api_listener def test_update_with_bad_tls_ref(self): listener = self.create_listener(constants.PROTOCOL_TCP, 443, self.lb_id) tls_uuid = uuidutils.generate_uuid() self.set_lb_status(self.lb_id) self.listener_repo.update(db_api.get_session(), listener['listener']['id'], tls_certificate_id=tls_uuid, protocol=constants.PROTOCOL_TERMINATED_HTTPS) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) update_data = {'name': 'listener2'} body = self._build_body(update_data) api_listener = self.put(listener_path, body).json.get(self.root_tag) response = self.get(self.listener_path.format( listener_id=listener['listener']['id'])) api_listener = response.json.get(self.root_tag) self.assertEqual('listener2', api_listener['name']) def test_negative_update_udp_case(self): api_listener = self.create_listener(constants.PROTOCOL_UDP, 6666, self.lb_id).get(self.root_tag) self.set_lb_status(self.lb_id) sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() new_listener = {'name': 'new-listener', 'admin_state_up': True, 'connection_limit': 10, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], 'insert_headers': { "X-Forwarded-Port": "true", "X-Forwarded-For": "true"}} listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) expect_error_msg = ( "Validation failure: %s protocol listener does not support TLS or " "header insertion.") % constants.PROTOCOL_UDP res = self.put(listener_path, self._build_body(new_listener), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) def test_update_bad_listener_id(self): self.put(self.listener_path.format(listener_id='SEAN-CONNERY'), body={}, status=404) def test_update_with_bad_default_pool_id(self): bad_pool_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TCP, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_pool_id=self.pool_id) self.set_lb_status(self.lb_id) new_listener = {'name': 'listener2', 'admin_state_up': True, 'default_pool_id': bad_pool_uuid} body = self._build_body(new_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) self.put(listener_path, body, status=404) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.ACTIVE) self.assert_final_listener_statuses(self.lb_id, listener['listener']['id']) def test_update_with_certs_not_terminated_https(self): listener = self.create_listener( constants.PROTOCOL_TCP, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_pool_id=None,).get(self.root_tag) self.set_lb_status(self.lb_id) lb_listener = { 'default_tls_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) response = self.put(listener_path, body, status=400).json fault = response.get('faultstring') self.assertIn( 'Certificate container references are only allowed on ', fault) self.assertIn( '{} protocol listeners.'.format( constants.PROTOCOL_TERMINATED_HTTPS), fault) def test_update_with_ca_cert(self): self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) optionals = { 'client_ca_tls_container_ref': uuidutils.generate_uuid() } ori_listener, update_listener = self.test_update(**optionals) self.assertEqual(optionals['client_ca_tls_container_ref'], update_listener.get('client_ca_tls_container_ref')) self.assertNotEqual(ori_listener['client_ca_tls_container_ref'], optionals['client_ca_tls_container_ref']) def test_update_with_only_client_auth_option(self): optionals = { 'client_authentication': constants.CLIENT_AUTH_OPTIONAL } ori_listener, update_listener = self.test_update(**optionals) self.assertEqual(optionals['client_authentication'], update_listener.get('client_authentication')) self.assertNotEqual(ori_listener['client_authentication'], optionals['client_authentication']) def test_update_with_crl(self): # Load up sample certs to test the validation self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] optionals = { 'client_crl_container_ref': uuidutils.generate_uuid() } ori_listener, update_listener = self.test_update(**optionals) self.assertEqual(optionals['client_crl_container_ref'], update_listener.get('client_crl_container_ref')) self.assertNotEqual(ori_listener['client_crl_container_ref'], optionals['client_crl_container_ref']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_from_nonexist_ca_cert_to_new_ca_cert(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) tls_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=tls_uuid, default_pool_id=None).get(self.root_tag) self.set_lb_status(self.lb_id) ca_tls_uuid = uuidutils.generate_uuid() new_listener = { 'client_ca_tls_container_ref': ca_tls_uuid} body = self._build_body(new_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) api_listener = self.put(listener_path, body).json.get(self.root_tag) update_expect = {'provisioning_status': constants.PENDING_UPDATE, 'operating_status': constants.ONLINE} update_expect.update(new_listener) listener.update(update_expect) self.assertEqual(ca_tls_uuid, api_listener['client_ca_tls_container_ref']) self.assertEqual(constants.CLIENT_AUTH_NONE, api_listener['client_authentication']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_ca_cert_missing(self, mock_cert_data): # update a listener, no ca cert exist cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} tls_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=tls_uuid, default_pool_id=None).get(self.root_tag) self.set_lb_status(self.lb_id) for opt in [{'client_authentication': constants.CLIENT_AUTH_OPTIONAL, 'client_crl_container_ref': uuidutils.generate_uuid()}, {'client_authentication': constants.CLIENT_AUTH_MANDATORY, 'client_crl_container_ref': uuidutils.generate_uuid()}]: body = self._build_body(opt) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) response = self.put(listener_path, body, status=400).json self.assertEqual( "Validation failure: Client authentication setting %s " "requires a client CA container reference." % opt['client_authentication'], response['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_crl_but_ca_cert_missing(self, mock_cert_data): # update a listener, no ca cert exist cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1, 'client_ca_cert': None} tls_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=tls_uuid, default_pool_id=None).get(self.root_tag) self.set_lb_status(self.lb_id) body = self._build_body( {'client_crl_container_ref': uuidutils.generate_uuid()}) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) response = self.put(listener_path, body, status=400).json self.assertEqual( "Validation failure: A client authentication CA reference is " "required to specify a client authentication revocation list.", response['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_unset_ca_cert(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) tls_uuid = uuidutils.generate_uuid() ca_tls_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=tls_uuid, default_pool_id=None, client_ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) self.set_lb_status(self.lb_id) lb_listener = {'client_ca_tls_container_ref': None} body = self._build_body(lb_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) api_listener = self.put(listener_path, body).json.get(self.root_tag) self.assertIsNone(api_listener.get('client_ca_tls_container_ref')) self.assertIsNone(api_listener.get('client_auth_option')) self.assertIsNone(api_listener.get('client_crl_container_ref')) @mock.patch( 'octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_unset_crl(self, mock_cert_data): # Load up sample certs to test the validation self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=uuidutils.generate_uuid(), default_pool_id=None, client_ca_tls_container_ref=uuidutils.generate_uuid(), client_crl_container_ref=uuidutils.generate_uuid(), client_authentication=constants.CLIENT_AUTH_MANDATORY).get( self.root_tag) self.set_lb_status(self.lb_id) lb_listener = {'client_crl_container_ref': None} body = self._build_body(lb_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) api_listener = self.put(listener_path, body).json.get(self.root_tag) self.assertEqual(listener.get('client_ca_tls_container_ref'), api_listener.get('client_ca_tls_container_ref')) self.assertEqual(listener.get('client_authentication'), api_listener.get('client_authentication')) self.assertIsNone(api_listener.get('client_crl_container_ref')) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_unset_defaults(self, mock_cert_data): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='haproxy_amphora', timeout_client_data=20) self.conf.config(group='haproxy_amphora', timeout_member_connect=21) self.conf.config(group='haproxy_amphora', timeout_member_data=22) self.conf.config(group='haproxy_amphora', timeout_tcp_inspect=23) self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) tls_uuid = uuidutils.generate_uuid() ca_tls_uuid = uuidutils.generate_uuid() crl_tls_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=tls_uuid, default_pool_id=self.pool_id, tags=['old_tag'], insert_headers={'X-Forwarded-For': 'true'}, timeout_client_data=1, timeout_member_connect=2, timeout_member_data=3, timeout_tcp_inspect=4, client_authentication=constants.CLIENT_AUTH_OPTIONAL, client_crl_container_ref=crl_tls_uuid, client_ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) self.set_lb_status(self.lb_id) unset_params = { 'name': None, 'description': None, 'connection_limit': None, 'default_tls_container_ref': None, 'sni_container_refs': None, 'insert_headers': None, 'timeout_client_data': None, 'timeout_member_connect': None, 'timeout_member_data': None, 'timeout_tcp_inspect': None, 'client_ca_tls_container_ref': None, 'client_authentication': None, 'default_pool_id': None, 'client_crl_container_ref': None} body = self._build_body(unset_params) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) api_listener = self.put(listener_path, body).json.get(self.root_tag) self.assertEqual('', api_listener['name']) self.assertEqual('', api_listener['description']) self.assertEqual(constants.DEFAULT_CONNECTION_LIMIT, api_listener['connection_limit']) self.assertIsNone(api_listener['default_tls_container_ref']) self.assertEqual([], api_listener['sni_container_refs']) self.assertEqual({}, api_listener['insert_headers']) self.assertEqual(20, api_listener['timeout_client_data']) self.assertEqual(21, api_listener['timeout_member_connect']) self.assertEqual(22, api_listener['timeout_member_data']) self.assertEqual(23, api_listener['timeout_tcp_inspect']) self.assertIsNone(api_listener['client_ca_tls_container_ref']) self.assertIsNone(api_listener['client_crl_container_ref']) self.assertEqual(constants.CLIENT_AUTH_NONE, api_listener['client_authentication']) self.assertIsNone(api_listener['default_pool_id']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_bad_ca_cert(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) tls_uuid = uuidutils.generate_uuid() ca_tls_uuid = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=tls_uuid, default_pool_id=None, client_ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) self.set_lb_status(self.lb_id) self.cert_manager_mock().get_secret.side_effect = Exception( 'bad ca cert') self.cert_manager_mock().get_secret.side_effect = Exception( 'bad secret') lb_listener = { 'client_ca_tls_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) response = self.put(listener_path, body, status=400).json self.assertIn(lb_listener['client_ca_tls_container_ref'], response['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_unreachable_crl(self, mock_cert_data): # Load up sample certs to test the validation tls_cert_mock = mock.MagicMock() tls_cert_mock.get_certificate.return_value = sample_certs.X509_CA_CERT self.cert_manager_mock().get_cert.return_value = tls_cert_mock self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, Exception('bad CRL ref')] cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=uuidutils.generate_uuid(), default_pool_id=None, client_ca_tls_container_ref=uuidutils.generate_uuid(), client_crl_container_ref=uuidutils.generate_uuid()).get( self.root_tag) self.set_lb_status(self.lb_id) lb_listener = { 'client_crl_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) response = self.put(listener_path, body, status=400).json self.assertIn(lb_listener['client_crl_container_ref'], response['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_bad_crl(self, mock_cert_data): # Load up sample certs to test the validation tls_cert_mock = mock.MagicMock() tls_cert_mock.get_certificate.return_value = sample_certs.X509_CA_CERT self.cert_manager_mock().get_cert.return_value = tls_cert_mock self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, 'bad CRL'] cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_tls_container_ref=uuidutils.generate_uuid(), default_pool_id=None, client_ca_tls_container_ref=uuidutils.generate_uuid(), client_crl_container_ref=uuidutils.generate_uuid()).get( self.root_tag) self.set_lb_status(self.lb_id) lb_listener = { 'client_crl_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) response = self.put(listener_path, body, status=400).json self.assertIn("The client authentication certificate revocation list " "is invalid. It must be a valid x509 PEM format " "certificate revocation list.", response['faultstring']) def test_update_authorized(self): listener = self.create_listener( constants.PROTOCOL_TCP, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_pool_id=None).get(self.root_tag) self.set_lb_status(self.lb_id) new_listener = {'name': 'listener2', 'admin_state_up': True, 'default_pool_id': self.pool_id} body = self._build_body(new_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): api_listener = self.put(listener_path, body) api_listener = api_listener.json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) update_expect = {'name': 'listener2', 'admin_state_up': True, 'default_pool_id': self.pool_id, 'provisioning_status': constants.PENDING_UPDATE, 'operating_status': constants.ONLINE} listener.update(update_expect) self.assertEqual(listener['created_at'], api_listener['created_at']) self.assertNotEqual(listener['updated_at'], api_listener['updated_at']) self.assertNotEqual(listener, api_listener) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, api_listener['id']) def test_update_not_authorized(self): listener = self.create_listener( constants.PROTOCOL_TCP, 80, self.lb_id, name='listener1', description='desc1', admin_state_up=False, connection_limit=10, default_pool_id=None).get(self.root_tag) self.set_lb_status(self.lb_id) new_listener = {'name': 'listener2', 'admin_state_up': True, 'default_pool_id': self.pool_id} body = self._build_body(new_listener) listener_path = self.LISTENER_PATH.format( listener_id=listener['id']) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): api_listener = self.put(listener_path, body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_listener.json) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.ACTIVE) def test_create_listeners_same_port(self): listener1 = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener2_post = {'protocol': listener1['listener']['protocol'], 'protocol_port': listener1['listener']['protocol_port'], 'loadbalancer_id': self.lb_id} body = self._build_body(listener2_post) self.post(self.LISTENERS_PATH, body, status=409) def test_create_listeners_tcp_https_same_port(self): listener1 = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener2_post = {'protocol': constants.PROTOCOL_HTTPS, 'protocol_port': listener1['listener']['protocol_port'], 'loadbalancer_id': self.lb_id} body = self._build_body(listener2_post) self.post(self.LISTENERS_PATH, body, status=409) def test_create_listeners_tcp_udp_same_port(self): listener1 = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener2_post = {'protocol': constants.PROTOCOL_UDP, 'protocol_port': listener1['listener']['protocol_port'], 'loadbalancer_id': self.lb_id} body = self._build_body(listener2_post) self.post(self.LISTENERS_PATH, body, status=201) def test_delete(self): listener = self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) self.delete(listener_path) response = self.get(listener_path) api_listener = response.json['listener'] expected = {'name': None, 'default_pool_id': None, 'description': None, 'admin_state_up': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_DELETE, 'connection_limit': None} listener['listener'].update(expected) self.assertIsNone(listener['listener'].pop('updated_at')) self.assertIsNotNone(api_listener.pop('updated_at')) self.assertNotEqual(listener, api_listener) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, api_listener['id'], delete=True) # Problems with TLS certs should not block a delete def test_delete_with_bad_tls_ref(self): listener = self.create_listener(constants.PROTOCOL_TCP, 443, self.lb_id) tls_uuid = uuidutils.generate_uuid() self.set_lb_status(self.lb_id) self.listener_repo.update(db_api.get_session(), listener['listener']['id'], tls_certificate_id=tls_uuid, protocol=constants.PROTOCOL_TERMINATED_HTTPS) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) self.delete(listener_path) response = self.get(listener_path) api_listener = response.json['listener'] expected = {'name': None, 'default_pool_id': None, 'description': None, 'admin_state_up': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_DELETE, 'connection_limit': None} listener['listener'].update(expected) self.assertIsNone(listener['listener'].pop('updated_at')) self.assertIsNotNone(api_listener.pop('updated_at')) self.assertNotEqual(listener, api_listener) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, api_listener['id'], delete=True) def test_delete_authorized(self): listener = self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(listener_path) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(listener_path) api_listener = response.json['listener'] expected = {'name': None, 'default_pool_id': None, 'description': None, 'admin_state_up': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_DELETE, 'connection_limit': None} listener['listener'].update(expected) self.assertIsNone(listener['listener'].pop('updated_at')) self.assertIsNotNone(api_listener.pop('updated_at')) self.assertNotEqual(listener, api_listener) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.PENDING_UPDATE) self.assert_final_listener_statuses(self.lb_id, api_listener['id'], delete=True) def test_delete_not_authorized(self): listener = self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): self.delete(listener_path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.ACTIVE) def test_delete_bad_listener_id(self): listener_path = self.LISTENER_PATH.format(listener_id='SEAN-CONNERY') self.delete(listener_path, status=404) def test_create_listener_bad_protocol(self): lb_listener = {'protocol': 'SEAN_CONNERY', 'protocol_port': 80} self.post(self.LISTENERS_PATH, lb_listener, status=400) def test_update_listener_bad_protocol(self): listener = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id) self.set_lb_status(self.lb_id) new_listener = {'protocol': 'SEAN_CONNERY', 'protocol_port': 80} listener_path = self.LISTENER_PATH.format( listener_id=listener['listener'].get('id')) self.put(listener_path, new_listener, status=400) def test_update_pending_create(self): lb = self.create_load_balancer(uuidutils.generate_uuid()) optionals = {'name': 'lb1', 'description': 'desc1', 'admin_state_up': False} lb.update(optionals) lb_listener = {'name': 'listener1', 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'loadbalancer_id': lb['loadbalancer']['id']} body = self._build_body(lb_listener) self.post(self.LISTENERS_PATH, body, status=409) def test_delete_pending_update(self): lb = self.create_load_balancer(uuidutils.generate_uuid()) optionals = {'name': 'lb1', 'description': 'desc1', 'admin_state_up': False} lb.update(optionals) self.set_lb_status(lb['loadbalancer']['id']) lb_listener = {'name': 'listener1', 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'loadbalancer_id': lb['loadbalancer']['id']} body = self._build_body(lb_listener) api_listener = self.post( self.LISTENERS_PATH, body).json['listener'] listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) self.delete(listener_path, status=409) def test_update_empty_body(self): listener = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener'].get('id')) self.put(listener_path, {}, status=400) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_bad_tls_ref(self, mock_cert_data): cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'sni_certs': [cert2, cert3]} sni1 = uuidutils.generate_uuid() sni2 = uuidutils.generate_uuid() tls_ref = uuidutils.generate_uuid() tls_ref2 = uuidutils.generate_uuid() lb_listener = {'name': 'listener1', 'default_pool_id': None, 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 80, 'sni_container_refs': [sni1, sni2], 'default_tls_container_ref': tls_ref, 'loadbalancer_id': self.lb_id} body = self._build_body(lb_listener) api_listener = self.post( self.LISTENERS_PATH, body).json['listener'] self.set_lb_status(self.lb_id) lb_listener_put = { 'default_tls_container_ref': tls_ref2, 'sni_container_refs': [sni1, sni2] } body = self._build_body(lb_listener_put) listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) self.cert_manager_mock().get_cert.side_effect = [ Exception("bad cert"), None, Exception("bad cert")] self.cert_manager_mock().get_secret.side_effect = [ Exception("bad secret"), Exception("bad secret")] response = self.put(listener_path, body, status=400).json self.assertIn(tls_ref2, response['faultstring']) self.assertIn(sni1, response['faultstring']) self.assertNotIn(sni2, response['faultstring']) self.assertNotIn(tls_ref, response['faultstring']) def test_update_pending_update(self): lb = self.create_load_balancer(uuidutils.generate_uuid()) optionals = {'name': 'lb1', 'description': 'desc1', 'admin_state_up': False} lb.update(optionals) self.set_lb_status(lb['loadbalancer']['id']) lb_listener = {'name': 'listener1', 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'loadbalancer_id': lb['loadbalancer']['id']} body = self._build_body(lb_listener) api_listener = self.post( self.LISTENERS_PATH, body).json['listener'] self.set_lb_status(lb['loadbalancer']['id']) self.put(self.LB_PATH.format(lb_id=lb['loadbalancer']['id']), {'loadbalancer': {'name': 'hi'}}) lb_listener_put = {'name': 'listener1_updated'} body = self._build_body(lb_listener_put) listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) self.put(listener_path, body, status=409) def test_update_pending_delete(self): lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', description='desc1', admin_state_up=False) lb_id = lb['loadbalancer'].get('id') self.set_lb_status(lb_id) lb_listener = {'name': 'listener1', 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'loadbalancer_id': lb_id} body = self._build_body(lb_listener) api_listener = self.post( self.LISTENERS_PATH, body).json.get(self.root_tag) self.set_lb_status(lb_id) self.delete(self.LB_PATH.format(lb_id=lb_id), params={'cascade': "true"}) lb_listener_put = {'name': 'listener1_updated'} body = self._build_body(lb_listener_put) listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) self.put(listener_path, body, status=409) def test_update_deleted(self): lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', description='desc1', admin_state_up=False) lb_id = lb['loadbalancer'].get('id') self.set_lb_status(lb_id) lb_listener = {'name': 'listener1', 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'loadbalancer_id': lb_id} body = self._build_body(lb_listener) api_listener = self.post( self.LISTENERS_PATH, body).json.get(self.root_tag) # This updates the child objects self.set_lb_status(lb_id, status=constants.DELETED) lb_listener_put = {'name': 'listener1_updated'} body = self._build_body(lb_listener_put) listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) self.put(listener_path, body, status=404) def test_delete_pending_delete(self): lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', description='desc1', admin_state_up=False) lb_id = lb['loadbalancer'].get('id') self.set_lb_status(lb_id) lb_listener = {'name': 'listener1', 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'loadbalancer_id': lb_id} body = self._build_body(lb_listener) api_listener = self.post( self.LISTENERS_PATH, body).json.get(self.root_tag) self.set_lb_status(lb_id) self.delete(self.LB_PATH.format(lb_id=lb_id), params={'cascade': "true"}) listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) self.delete(listener_path, status=409) def test_delete_already_deleted(self): lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', description='desc1', admin_state_up=False) lb_id = lb['loadbalancer'].get('id') self.set_lb_status(lb_id) lb_listener = {'name': 'listener1', 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 10, 'loadbalancer_id': lb_id} body = self._build_body(lb_listener) api_listener = self.post( self.LISTENERS_PATH, body).json.get(self.root_tag) # This updates the child objects self.set_lb_status(lb_id, status=constants.DELETED) listener_path = self.LISTENER_PATH.format( listener_id=api_listener['id']) self.delete(listener_path, status=404) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_with_tls_termination_data(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} cert_id = uuidutils.generate_uuid() listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, default_tls_container_ref=cert_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) get_listener = self.get(listener_path).json['listener'] self.assertEqual(cert_id, get_listener['default_tls_container_ref']) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_tls_termination_data(self, mock_cert_data): cert_id_orig = uuidutils.generate_uuid() cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} cert_id = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, default_tls_container_ref=cert_id_orig) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) get_listener = self.get(listener_path).json['listener'] self.assertEqual(cert_id_orig, get_listener.get('default_tls_container_ref')) self.put(listener_path, self._build_body({'default_tls_container_ref': cert_id})) get_listener = self.get(listener_path).json['listener'] self.assertEqual(cert_id, get_listener.get('default_tls_container_ref')) def test_create_with_tls_termination_disabled(self): self.conf.config(group='api_settings', allow_tls_terminated_listeners=False) cert_id = uuidutils.generate_uuid() listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, default_tls_container_ref=cert_id, status=400) self.assertIn( 'The selected protocol is not allowed in this deployment: {0}' .format(constants.PROTOCOL_TERMINATED_HTTPS), listener.get('faultstring')) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_with_sni_data(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} sni_id1 = uuidutils.generate_uuid() sni_id2 = uuidutils.generate_uuid() listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, sni_container_refs=[sni_id1, sni_id2]) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) get_listener = self.get(listener_path).json['listener'] self.assertItemsEqual([sni_id1, sni_id2], get_listener['sni_container_refs']) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_tls_terminated_with_sni_data(self, mock_cert_data): cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'sni_certs': [cert2, cert3]} sni_id1 = uuidutils.generate_uuid() sni_id2 = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, default_tls_container_ref=uuidutils.generate_uuid()) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) get_listener = self.get(listener_path).json['listener'] self.assertEqual([], get_listener.get('sni_container_refs')) self.put(listener_path, self._build_body({'sni_container_refs': [sni_id1, sni_id2]})) get_listener = self.get(listener_path).json['listener'] self.assertItemsEqual([sni_id1, sni_id2], get_listener.get('sni_container_refs')) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_non_tls_terminated_with_sni_data(self, mock_cert_data): cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'sni_certs': [cert2, cert3]} sni_id1 = uuidutils.generate_uuid() sni_id2 = uuidutils.generate_uuid() listener = self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener']['id']) get_listener = self.get(listener_path).json['listener'] self.assertEqual([], get_listener.get('sni_container_refs')) body = self._build_body({'sni_container_refs': [sni_id1, sni_id2]}) response = self.put(listener_path, body, status=400).json self.assertEqual( "Validation failure: Certificate container references are only " "allowed on TERMINATED_HTTPS protocol listeners.", response['faultstring']) get_listener = self.get(listener_path).json['listener'] self.assertEqual([], get_listener.get('sni_container_refs')) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_with_valid_insert_headers(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} lb_listener = {'protocol': 'HTTP', 'protocol_port': 80, 'loadbalancer_id': self.lb_id, 'insert_headers': {'X-Forwarded-For': 'true', 'X-Forwarded-Port': 'true', 'X-Forwarded-Proto': 'true'}} body = self._build_body(lb_listener) self.post(self.LISTENERS_PATH, body, status=201) # test client certificate http headers self.set_lb_status(self.lb_id) header = {} for name in constants.SUPPORTED_SSL_HEADERS: header[name] = 'true' lb_listener = {'protocol': constants.PROTOCOL_TERMINATED_HTTPS, 'protocol_port': 1801, 'loadbalancer_id': self.lb_id, 'insert_headers': header, 'default_tls_container_ref': uuidutils.generate_uuid()} body = self._build_body(lb_listener) self.post(self.LISTENERS_PATH, body, status=201) def test_create_with_bad_insert_headers(self): lb_listener = {'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id, 'insert_headers': {'X-Forwarded-Four': 'true'}} body = self._build_body(lb_listener) self.post(self.LISTENERS_PATH, body, status=400) # test client certificate http headers for name in constants.SUPPORTED_SSL_HEADERS: header = {} header[name] = 'true' lb_listener['insert_headers'] = header body = self._build_body(lb_listener) listener = self.post(self.LISTENERS_PATH, body, status=400).json self.assertIn('{0} is not a valid option for {1}'.format( [name], '%s protocol listener.' % constants.PROTOCOL_HTTP), listener.get('faultstring')) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_valid_insert_headers(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') mock_cert_data.return_value = {'tls_cert': cert1} listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) new_listener = self._build_body( {'insert_headers': {'X-Forwarded-For': 'true', 'X-Forwarded-Port': 'true', 'X-Forwarded-Proto': 'true'}}) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener'].get('id')) update_listener = self.put( listener_path, new_listener, status=200).json self.assertNotEqual( listener[self.root_tag]['insert_headers'], update_listener[self.root_tag]['insert_headers']) self.set_lb_status(self.lb_id) # test client certificate http headers cert1_id = uuidutils.generate_uuid() listener = self.create_listener( constants.PROTOCOL_TERMINATED_HTTPS, 443, self.lb_id, default_tls_container_ref=cert1_id) self.set_lb_status(self.lb_id) header = {} for name in constants.SUPPORTED_SSL_HEADERS: header[name] = 'true' new_listener[self.root_tag]['insert_headers'] = header listener_path = self.LISTENER_PATH.format( listener_id=listener['listener'].get('id')) update_listener = self.put( listener_path, new_listener, status=200).json self.assertNotEqual( listener[self.root_tag]['insert_headers'], update_listener[self.root_tag]['insert_headers']) def test_update_with_bad_insert_headers(self): listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) new_listener = self._build_body( {'insert_headers': {'X-Bad-Header': 'true'}}) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener'].get('id')) update_listener = self.put( listener_path, new_listener, status=400).json self.assertIn('{0} is not a valid option for {1}'.format( '[\'X-Bad-Header\']', 'insert_headers'), update_listener.get('faultstring')) # test client certificate http headers header = {} for name in constants.SUPPORTED_SSL_HEADERS: header[name] = 'true' new_listener[self.root_tag]['insert_headers'] = header # as the order of output faultstring is not stable, so we just check # the status. self.put(listener_path, new_listener, status=400).json def _test_update_protocol_insert_headers_mismatch(self, protocol): listener = self.create_listener( protocol, 80, self.lb_id) self.set_lb_status(self.lb_id) new_listener = self._build_body( {'insert_headers': {'X-Forwarded-Port': 'true'}}) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener'].get('id')) update_listener = self.put( listener_path, new_listener, status=400).json self.assertIn(protocol, update_listener['faultstring']) self.assert_correct_status(lb_id=self.lb_id) def test_update_protocol_HTTPS_insert_headers(self): self._test_update_protocol_insert_headers_mismatch( constants.PROTOCOL_HTTPS) def test_update_protocol_PROXY_insert_headers(self): self._test_update_protocol_insert_headers_mismatch( constants.PROTOCOL_PROXY) def test_update_protocol_TCP_insert_headers(self): self._test_update_protocol_insert_headers_mismatch( constants.PROTOCOL_TCP) def test_update_protocol_UDP_insert_headers(self): self._test_update_protocol_insert_headers_mismatch( constants.PROTOCOL_UDP) def _getStats(self, listener_id): res = self.get(self.LISTENER_PATH.format( listener_id=listener_id + "/stats")) return res.json.get('stats') def test_statistics(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) ls = self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9), request_errors=random.randint(1, 9)) response = self._getStats(li['id']) self.assertEqual(ls['bytes_in'], response['bytes_in']) self.assertEqual(ls['bytes_out'], response['bytes_out']) self.assertEqual(ls['total_connections'], response['total_connections']) self.assertEqual(ls['active_connections'], response['active_connections']) self.assertEqual(ls['request_errors'], response['request_errors']) def test_statistics_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer( uuidutils.generate_uuid(), project_id=project_id).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) ls = self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9), request_errors=random.randint(1, 9)) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self._getStats(li['id']) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(ls['bytes_in'], response['bytes_in']) self.assertEqual(ls['bytes_out'], response['bytes_out']) self.assertEqual(ls['total_connections'], response['total_connections']) self.assertEqual(ls['active_connections'], response['active_connections']) self.assertEqual(ls['request_errors'], response['request_errors']) def test_statistics_not_authorized(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9), request_errors=random.randint(1, 9)) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): res = self.get(self.LISTENER_PATH.format( listener_id=li['id'] + "/stats"), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, res.json) def test_statistics_get_deleted(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9), request_errors=random.randint(1, 9)) self.set_lb_status(lb['id'], status=constants.DELETED) self.get(self.LISTENER_PATH.format( listener_id=li.get('id') + "/stats"), status=404) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_pool_protocol_map_post(self, mock_cert_data): cert = data_models.TLSContainer(certificate='cert') mock_cert_data.return_value = {'sni_certs': [cert]} valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP port = 1 for listener_proto in valid_map: for pool_proto in valid_map[listener_proto]: port = port + 1 pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_object_status(self.lb_repo, self.lb_id) listener = {'protocol': listener_proto, 'protocol_port': port, 'loadbalancer_id': self.lb_id, 'default_pool_id': pool.get('id')} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: listener.update( {'sni_container_refs': [uuidutils.generate_uuid()]}) body = self._build_body(listener) self.post(self.LISTENERS_PATH, body, status=201) self.set_object_status(self.lb_repo, self.lb_id) invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP port = 1 for listener_proto in invalid_map: for pool_proto in invalid_map[listener_proto]: port = port + 1 pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_object_status(self.lb_repo, self.lb_id) expect_error_msg = ("Validation failure: The pool protocol " "'%s' is invalid while the listener " "protocol is '%s'.") % (pool_proto, listener_proto) listener = {'protocol': listener_proto, 'protocol_port': port, 'loadbalancer_id': self.lb_id, 'default_pool_id': pool.get('id')} body = self._build_body(listener) res = self.post(self.LISTENERS_PATH, body, status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_pool_protocol_map_put(self, mock_cert_data): cert = data_models.TLSContainer(certificate='cert') mock_cert_data.return_value = {'sni_certs': [cert]} valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP port = 1 for listener_proto in valid_map: for pool_proto in valid_map[listener_proto]: port = port + 1 pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_object_status(self.lb_repo, self.lb_id) opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) new_listener = {'default_pool_id': pool.get('id')} res = self.put( self.LISTENER_PATH.format(listener_id=listener.get('id')), self._build_body(new_listener), status=200) self.set_object_status(self.lb_repo, self.lb_id) invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP port = 100 for listener_proto in invalid_map: opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) port = port + 1 for pool_proto in invalid_map[listener_proto]: expect_error_msg = ("Validation failure: The pool protocol " "'%s' is invalid while the listener " "protocol is '%s'.") % (pool_proto, listener_proto) pool = self.create_pool( self.lb_id, pool_proto, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_object_status(self.lb_repo, self.lb_id) new_listener = {'default_pool_id': pool.get('id')} res = self.put( self.LISTENER_PATH.format(listener_id=listener.get('id')), self._build_body(new_listener), status=400) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_load_balancer.py0000664000175000017500000057143300000000000025420 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import random from unittest import mock from octavia_lib.api.drivers import exceptions as lib_exceptions from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from sqlalchemy.orm import exc as sa_exception from octavia.common import constants import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.network import base as network_base from octavia.network import data_models as network_models from octavia.tests.functional.api.v2 import base class TestLoadBalancer(base.BaseAPITest): root_tag = 'loadbalancer' root_tag_list = 'loadbalancers' root_tag_links = 'loadbalancers_links' def _assert_request_matches_response(self, req, resp, **optionals): self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) req_name = req.get('name') req_description = req.get('description') if not req_name: self.assertEqual('', resp.get('name')) else: self.assertEqual(req.get('name'), resp.get('name')) if not req_description: self.assertEqual('', resp.get('description')) else: self.assertEqual(req.get('description'), resp.get('description')) self.assertEqual(constants.PENDING_CREATE, resp.get('provisioning_status')) self.assertEqual(constants.OFFLINE, resp.get('operating_status')) self.assertEqual(req.get('admin_state_up', True), resp.get('admin_state_up')) self.assertIsNotNone(resp.get('created_at')) self.assertIsNone(resp.get('updated_at')) for key, value in optionals.items(): self.assertEqual(value, req.get(key)) def test_empty_list(self): response = self.get(self.LBS_PATH) api_list = response.json.get(self.root_tag_list) self.assertEqual([], api_list) def test_create(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'tags': ['test_tag1', 'test_tag2'] } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) return api_lb # Make sure the /v2.0 alias is maintained for the life of the v2 API def test_create_v2_0(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, use_v2_0=True) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) return api_lb def test_create_using_tenant_id(self): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'tenant_id': self.project_id } body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) return api_lb def test_create_without_vip(self): lb_json = {'name': 'test1', 'project_id': self.project_id} body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) err_msg = ('Validation failure: VIP must contain one of: ' 'vip_port_id, vip_network_id, vip_subnet_id.') self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_empty_vip(self): lb_json = {'vip_subnet_id': '', 'project_id': self.project_id} body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) err_msg = ("Invalid input for field/attribute vip_subnet_id. " "Value: ''. Value should be UUID format") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_invalid_vip_subnet(self): subnet_id = uuidutils.generate_uuid() lb_json = {'vip_subnet_id': subnet_id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch("octavia.network.drivers.noop_driver.driver" ".NoopManager.get_subnet") as mock_get_subnet: mock_get_subnet.side_effect = network_base.SubnetNotFound response = self.post(self.LBS_PATH, body, status=400) err_msg = 'Subnet {} not found.'.format(subnet_id) self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_invalid_vip_network_subnet(self): network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[]) subnet_id = uuidutils.generate_uuid() lb_json = { 'vip_subnet_id': subnet_id, 'vip_network_id': network.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch("octavia.network.drivers.noop_driver.driver" ".NoopManager.get_network") as mock_get_network: mock_get_network.return_value = network response = self.post(self.LBS_PATH, body, status=400) err_msg = 'Subnet {} not found.'.format(subnet_id) self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_vip_subnet_fills_network(self): subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid()) lb_json = {'vip_subnet_id': subnet.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch("octavia.network.drivers.noop_driver.driver" ".NoopManager.get_subnet") as mock_get_subnet: mock_get_subnet.return_value = subnet response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(subnet.network_id, api_lb.get('vip_network_id')) def test_create_with_vip_network_has_no_subnet(self): network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[]) lb_json = { 'vip_network_id': network.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch("octavia.network.drivers.noop_driver.driver" ".NoopManager.get_network") as mock_get_network: mock_get_network.return_value = network response = self.post(self.LBS_PATH, body, status=400) err_msg = ("Validation failure: " "Supplied network does not contain a subnet.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_vip_network_picks_subnet_ipv4(self): network_id = uuidutils.generate_uuid() subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=6) subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet1.id, subnet2.id]) lb_json = {'vip_network_id': network.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.side_effect = [subnet1, subnet2] response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet2.id, api_lb.get('vip_subnet_id')) self.assertEqual(network_id, api_lb.get('vip_network_id')) def test_create_with_vip_network_picks_subnet_ipv6(self): network_id = uuidutils.generate_uuid() subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=6) network = network_models.Network(id=network_id, subnets=[subnet.id]) lb_json = {'vip_network_id': network_id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.return_value = subnet response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(network_id, api_lb.get('vip_network_id')) def test_create_with_vip_network_picks_subnet_ipv4_avail_ips(self): self.conf.config( group='controller_worker', loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) network_id = uuidutils.generate_uuid() subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=4) subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=4) subnet3 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet1.id, subnet2.id, subnet3.id]) subnet_ip_availability = [{'subnet_id': subnet1.id, 'used_ips': 254, 'total_ips': 254}, {'subnet_id': subnet2.id, 'used_ips': 128, 'total_ips': 254}, {'subnet_id': subnet3.id, 'used_ips': 254, 'total_ips': 254}] ip_avail = network_models.Network_IP_Availability( network_id=network.id, subnet_ip_availability=subnet_ip_availability) lb_json = {'vip_network_id': network.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network_ip_availability") as ( mock_get_network_ip_availability): mock_get_network.return_value = network mock_get_subnet.side_effect = [subnet1, subnet2, subnet3] mock_get_network_ip_availability.return_value = ip_avail response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet2.id, api_lb.get('vip_subnet_id')) self.assertEqual(network_id, api_lb.get('vip_network_id')) def test_create_with_vip_network_not_enough_avail_ips(self): self.conf.config( group='controller_worker', loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) network_id = uuidutils.generate_uuid() subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=4) subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet1.id, subnet2.id]) subnet_ip_availability = [{'subnet_id': subnet1.id, 'used_ips': 254, 'total_ips': 254}, {'subnet_id': subnet2.id, 'used_ips': 254, 'total_ips': 254}] ip_avail = network_models.Network_IP_Availability( network_id=network.id, subnet_ip_availability=subnet_ip_availability) lb_json = {'vip_network_id': network.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network_ip_availability") as ( mock_get_network_ip_availability): mock_get_network.return_value = network mock_get_subnet.side_effect = [subnet1, subnet2] mock_get_network_ip_availability.return_value = ip_avail response = self.post(self.LBS_PATH, body, status=400) err_msg = ('Validation failure: Subnet(s) in the supplied network do ' 'not contain enough available IPs.') self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_vip_network_and_address(self): ip_address = '198.51.100.10' network_id = uuidutils.generate_uuid() subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='2001:DB8::/32', ip_version=6) subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='198.51.100.0/24', ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet1.id, subnet2.id]) lb_json = {'vip_network_id': network.id, 'vip_address': ip_address, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.side_effect = [subnet1, subnet2] response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet2.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(ip_address, api_lb.get('vip_address')) def test_create_with_vip_network_and_address_no_subnet_match(self): ip_address = '198.51.100.10' network_id = uuidutils.generate_uuid() subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='2001:DB8::/32', ip_version=6) subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='203.0.113.0/24', ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet1.id, subnet2.id]) lb_json = {'vip_network_id': network.id, 'vip_address': ip_address, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.side_effect = [subnet1, subnet2] response = self.post(self.LBS_PATH, body, status=400) err_msg = ('Validation failure: Supplied network does not contain a ' 'subnet for VIP address specified.') self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_vip_network_and_address_ipv6(self): ip_address = '2001:DB8::10' network_id = uuidutils.generate_uuid() subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='2001:DB8::/32', ip_version=6) subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='198.51.100.0/24', ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet1.id, subnet2.id]) lb_json = {'vip_network_id': network.id, 'vip_address': ip_address, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.side_effect = [subnet1, subnet2] response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet1.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(ip_address, api_lb.get('vip_address')) # Note: This test is using the unique local address range to # validate that we handle a fully expaned IP address properly. # This is not possible with the documentation/testnet range. def test_create_with_vip_network_and_address_full_ipv6(self): ip_address = 'fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' network_id = uuidutils.generate_uuid() subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='fc00::/7', ip_version=6) subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, cidr='198.51.100.0/24', ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet1.id, subnet2.id]) lb_json = {'vip_network_id': network.id, 'vip_address': ip_address, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.side_effect = [subnet1, subnet2] response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet1.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(ip_address, api_lb.get('vip_address')) def test_create_with_vip_port_1_fixed_ip(self): ip_address = '198.51.100.1' subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) fixed_ip = network_models.FixedIP(subnet_id=subnet.id, ip_address=ip_address) port = network_models.Port(id=uuidutils.generate_uuid(), fixed_ips=[fixed_ip], network_id=network.id) lb_json = { 'name': 'test1', 'description': 'test1_desc', 'vip_port_id': port.id, 'admin_state_up': False, 'project_id': self.project_id} body = self._build_body(lb_json) # This test needs the provider driver to not supply the VIP port # so mocking noop to not supply a VIP port. with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port, mock.patch( "octavia.api.drivers.noop_driver.driver.NoopManager." "create_vip_port") as mock_provider: mock_get_network.return_value = network mock_get_port.return_value = port mock_provider.side_effect = lib_exceptions.NotImplementedError() response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(ip_address, api_lb.get('vip_address')) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(port.id, api_lb.get('vip_port_id')) def test_create_with_vip_port_2_fixed_ip(self): ip_address = '198.51.100.1' subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) fixed_ip = network_models.FixedIP(subnet_id=subnet.id, ip_address=ip_address) fixed_ip_2 = network_models.FixedIP( subnet_id=uuidutils.generate_uuid(), ip_address='203.0.113.5') port = network_models.Port(id=uuidutils.generate_uuid(), fixed_ips=[fixed_ip, fixed_ip_2], network_id=network.id) lb_json = { 'name': 'test1', 'description': 'test1_desc', 'vip_port_id': port.id, 'admin_state_up': False, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port: mock_get_network.return_value = network mock_get_port.return_value = port response = self.post(self.LBS_PATH, body, status=400) err_msg = ("Validation failure: " "VIP port's subnet could not be determined. Please " "specify either a VIP subnet or address.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_vip_port_and_address(self): ip_address = '198.51.100.1' subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) fixed_ip = network_models.FixedIP(subnet_id=subnet.id, ip_address=ip_address) port = network_models.Port(id=uuidutils.generate_uuid(), fixed_ips=[fixed_ip], network_id=network.id) lb_json = { 'name': 'test1', 'description': 'test1_desc', 'vip_port_id': port.id, 'vip_address': ip_address, 'admin_state_up': False, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port: mock_get_network.return_value = network mock_get_port.return_value = port response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(ip_address, api_lb.get('vip_address')) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(port.id, api_lb.get('vip_port_id')) def test_create_with_vip_port_and_bad_address(self): ip_address = '198.51.100.1' subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) fixed_ip = network_models.FixedIP(subnet_id=subnet.id, ip_address=ip_address) port = network_models.Port(id=uuidutils.generate_uuid(), fixed_ips=[fixed_ip], network_id=network.id) lb_json = { 'name': 'test1', 'description': 'test1_desc', 'vip_port_id': port.id, 'vip_address': '203.0.113.7', 'admin_state_up': False, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port: mock_get_network.return_value = network mock_get_port.return_value = port response = self.post(self.LBS_PATH, body, status=400) err_msg = ("Validation failure: " "Specified VIP address not found on the specified VIP " "port.") self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_vip_full(self): subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) port = network_models.Port(id=uuidutils.generate_uuid(), network_id=network.id) lb_json = { 'name': 'test1', 'description': 'test1_desc', 'vip_address': '10.0.0.1', 'vip_subnet_id': subnet.id, 'vip_network_id': network.id, 'vip_port_id': port.id, 'admin_state_up': False, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port: mock_get_network.return_value = network mock_get_port.return_value = port response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual('10.0.0.1', api_lb.get('vip_address')) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(port.id, api_lb.get('vip_port_id')) def test_create_neutron_failure(self): class TestNeutronException(network_base.AllocateVIPException): def __init__(self, message, orig_msg, orig_code): super(TestNeutronException, self).__init__( message, orig_msg=orig_msg, orig_code=orig_code, ) def __str__(self): return repr(self.message) subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) port = network_models.Port(id=uuidutils.generate_uuid(), network_id=network.id) lb_json = { 'name': 'test1', 'description': 'test1_desc', 'vip_address': '10.0.0.1', 'vip_subnet_id': subnet.id, 'vip_network_id': network.id, 'vip_port_id': port.id, 'admin_state_up': False, 'project_id': self.project_id} body = self._build_body(lb_json) # This test needs the provider driver to not supply the VIP port # so mocking noop to not supply a VIP port. with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".allocate_vip") as mock_allocate_vip, mock.patch( "octavia.api.drivers.noop_driver.driver.NoopManager." "create_vip_port") as mock_provider: mock_get_network.return_value = network mock_get_port.return_value = port mock_allocate_vip.side_effect = TestNeutronException( "octavia_msg", "neutron_msg", 409) mock_provider.side_effect = lib_exceptions.NotImplementedError() response = self.post(self.LBS_PATH, body, status=409) # Make sure the faultstring contains the neutron error and not # the octavia error message self.assertIn("neutron_msg", response.json.get("faultstring")) def test_create_with_qos(self): subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid()) qos_policy_id = uuidutils.generate_uuid() # Test with specific vip_qos_policy_id lb_json = {'vip_subnet_id': subnet.id, 'project_id': self.project_id, 'vip_qos_policy_id': qos_policy_id} body = self._build_body(lb_json) with mock.patch("octavia.network.drivers.noop_driver.driver" ".NoopManager.get_subnet") as mock_get_subnet: with mock.patch("octavia.common.validate." "qos_policy_exists") as mock_get_qos: mock_get_subnet.return_value = subnet mock_get_qos.return_value = qos_policy_id response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(qos_policy_id, api_lb.get('vip_qos_policy_id')) def test_create_with_qos_vip_port(self): # Test with vip_port_id which applied qos_policy subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid()) port_qos_policy_id = uuidutils.generate_uuid() ip_address = '192.168.50.50' network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) fixed_ip = network_models.FixedIP(subnet_id=subnet.id, ip_address=ip_address) port = network_models.Port(id=uuidutils.generate_uuid(), fixed_ips=[fixed_ip], network_id=network.id, qos_policy_id=port_qos_policy_id) lb_json = {'vip_port_id': port.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver." "NoopManager.get_network") as m_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".allocate_vip") as mock_allocate_vip, mock.patch( "octavia.common.validate." "qos_policy_exists") as m_get_qos: m_get_qos.return_value = port_qos_policy_id mock_allocate_vip.return_value = data_models.Vip( ip_address=ip_address, subnet_id=subnet.id, network_id=network.id, port_id=port.id) m_get_network.return_value = network mock_get_port.return_value = port response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(port.id, api_lb.get('vip_port_id')) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(port_qos_policy_id, api_lb.get( 'vip_qos_policy_id')) def test_create_with_qos_vip_port_and_vip_qos(self): subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid()) port_qos_policy_id = uuidutils.generate_uuid() new_qos_policy_id = uuidutils.generate_uuid() ip_address = '192.168.50.50' network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) fixed_ip = network_models.FixedIP(subnet_id=subnet.id, ip_address=ip_address) port = network_models.Port(id=uuidutils.generate_uuid(), fixed_ips=[fixed_ip], network_id=network.id, qos_policy_id=port_qos_policy_id) lb_json = {'vip_port_id': port.id, 'project_id': self.project_id, 'vip_qos_policy_id': new_qos_policy_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver." "NoopManager.get_network") as m_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".allocate_vip") as mock_allocate_vip, mock.patch( "octavia.common.validate." "qos_policy_exists") as m_get_qos: m_get_qos.return_value = mock.ANY mock_allocate_vip.return_value = data_models.Vip( ip_address=ip_address, subnet_id=subnet.id, network_id=network.id, port_id=port.id) m_get_network.return_value = network mock_get_port.return_value = port response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(port.id, api_lb.get('vip_port_id')) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(network.id, api_lb.get('vip_network_id')) self.assertEqual(new_qos_policy_id, api_lb.get( 'vip_qos_policy_id')) def test_create_with_non_exist_qos_policy_id(self): subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid()) qos_policy_id = uuidutils.generate_uuid() lb_json = {'vip_subnet_id': subnet.id, 'project_id': self.project_id, 'vip_qos_policy_id': qos_policy_id} body = self._build_body(lb_json) with mock.patch("octavia.network.drivers.noop_driver.driver" ".NoopManager.get_subnet") as mock_get_subnet: with mock.patch("octavia.network.drivers.noop_driver." "driver.NoopManager." "get_qos_policy") as mock_get_qos: mock_get_subnet.return_value = subnet mock_get_qos.side_effect = Exception() response = self.post(self.LBS_PATH, body, status=400) err_msg = "qos_policy %s not found." % qos_policy_id self.assertEqual(err_msg, response.json.get('faultstring')) def test_create_with_long_name(self): lb_json = {'name': 'n' * 256, 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=400) self.assertIn('Invalid input for field/attribute name', response.json.get('faultstring')) def test_create_with_long_description(self): lb_json = {'description': 'n' * 256, 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=400) self.assertIn('Invalid input for field/attribute description', response.json.get('faultstring')) def test_create_with_nonuuid_vip_attributes(self): lb_json = {'vip_subnet_id': 'HI', 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=400) self.assertIn('Invalid input for field/attribute vip_subnet_id', response.json.get('faultstring')) def test_create_with_allowed_network_id(self): network_id = uuidutils.generate_uuid() self.conf.config(group="networking", valid_vip_networks=network_id) subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id, ip_version=4) network = network_models.Network(id=network_id, subnets=[subnet.id]) lb_json = {'vip_network_id': network.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.return_value = subnet response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) self.assertEqual(network_id, api_lb.get('vip_network_id')) def test_create_with_disallowed_network_id(self): network_id1 = uuidutils.generate_uuid() network_id2 = uuidutils.generate_uuid() self.conf.config(group="networking", valid_vip_networks=network_id1) subnet = network_models.Subnet(id=uuidutils.generate_uuid(), network_id=network_id2, ip_version=4) network = network_models.Network(id=network_id2, subnets=[subnet.id]) lb_json = {'vip_network_id': network.id, 'project_id': self.project_id} body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_subnet") as mock_get_subnet: mock_get_network.return_value = network mock_get_subnet.return_value = subnet response = self.post(self.LBS_PATH, body, status=400) self.assertIn("Supplied VIP network_id is not allowed", response.json.get('faultstring')) def test_create_with_disallowed_vip_objects(self): self.conf.config(group="networking", allow_vip_network_id=False) self.conf.config(group="networking", allow_vip_subnet_id=False) self.conf.config(group="networking", allow_vip_port_id=False) lb_json = {'vip_network_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=400) self.assertIn('use of vip_network_id is disallowed', response.json.get('faultstring')) lb_json = {'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=400) self.assertIn('use of vip_subnet_id is disallowed', response.json.get('faultstring')) lb_json = {'vip_port_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=400) self.assertIn('use of vip_port_id is disallowed', response.json.get('faultstring')) def test_create_with_project_id(self): project_id = uuidutils.generate_uuid() api_lb = self.test_create(project_id=project_id) self.assertEqual(project_id, api_lb.get('project_id')) def test_create_no_project_id(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid() } lb_json.update(optionals) body = self._build_body(lb_json) self.post(self.LBS_PATH, body, status=400) def test_create_context_project_id(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid() } lb_json.update(optionals) body = self._build_body(lb_json) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) def test_create_authorized(self, **optionals): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': project_id } lb_json.update(optionals) body = self._build_body(lb_json) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self._assert_request_matches_response(lb_json, api_lb) def test_create_not_authorized(self, **optionals): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid() } lb_json.update(optionals) body = self._build_body(lb_json) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.post(self.LBS_PATH, body, status=403) api_lb = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) def test_create_provider_octavia(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'provider': constants.OCTAVIA } lb_json.update(optionals) body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( 'oslo_messaging.get_rpc_transport'), mock.patch( 'oslo_messaging.Target'), mock.patch( 'oslo_messaging.RPCClient'): mock_get_network.return_value = mock.MagicMock() mock_get_network.return_value.port_security_enabled = True response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_request_matches_response(lb_json, api_lb) return api_lb def test_create_provider_octavia_no_port_sec(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'provider': constants.OCTAVIA } lb_json.update(optionals) body = self._build_body(lb_json) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( 'oslo_messaging.get_rpc_transport'), mock.patch( 'oslo_messaging.Target'), mock.patch( 'oslo_messaging.RPCClient'): mock_get_network.return_value = mock.MagicMock() mock_get_network.return_value.port_security_enabled = False response = self.post(self.LBS_PATH, body, status=500) self.assertIn("Port security must be enabled on the VIP network.", response.json.get('faultstring')) def test_create_provider_bogus(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'provider': 'BOGUS' } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) self.assertIn("Provider 'BOGUS' is not enabled.", response.json.get('faultstring')) def test_create_flavor_bad_type(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'flavor_id': 'BOGUS' } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) self.assertIn("Invalid input for field/attribute flavor_id. Value: " "'BOGUS'. Value should be UUID format", response.json.get('faultstring')) def test_create_flavor_invalid(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'flavor_id': uuidutils.generate_uuid() } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) self.assertIn("Validation failure: Invalid flavor_id.", response.json.get('faultstring')) def test_create_flavor_disabled(self, **optionals): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') flavor = self.create_flavor('name1', 'description', fp.get('id'), False) test_flavor_id = flavor.get('id') lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'flavor_id': test_flavor_id, } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) ref_faultstring = ('The selected flavor is not allowed in this ' 'deployment: {}'.format(test_flavor_id)) self.assertEqual(ref_faultstring, response.json.get('faultstring')) def test_create_flavor_missing(self, **optionals): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') flavor = self.create_flavor('name1', 'description', fp.get('id'), True) test_flavor_id = flavor.get('id') lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'flavor_id': test_flavor_id } lb_json.update(optionals) body = self._build_body(lb_json) with mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict', side_effect=sa_exception.NoResultFound): response = self.post(self.LBS_PATH, body, status=400) self.assertIn("Validation failure: Invalid flavor_id.", response.json.get('faultstring')) def test_create_flavor_no_provider(self, **optionals): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') flavor = self.create_flavor('name1', 'description', fp.get('id'), True) test_flavor_id = flavor.get('id') lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'flavor_id': test_flavor_id, } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=201) api_lb = response.json.get(self.root_tag) self.assertEqual('noop_driver', api_lb.get('provider')) self.assertEqual(test_flavor_id, api_lb.get('flavor_id')) def test_create_with_availability_zone(self, **optionals): zone_name = 'nova' azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) az = self.create_availability_zone(zone_name, 'description', azp.get('id'), True) api_lb = self.test_create(availability_zone=az.get('name')) self.assertEqual(zone_name, api_lb.get('availability_zone')) def test_create_az_disabled(self, **optionals): zone_name = 'nova' azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) az = self.create_availability_zone(zone_name, 'description', azp.get('id'), False) lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'availability_zone': az.get('name'), } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) ref_faultstring = ('The selected availability_zone is not allowed in ' 'this deployment: {}'.format(zone_name)) self.assertEqual(ref_faultstring, response.json.get('faultstring')) def test_create_az_missing(self, **optionals): lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'availability_zone': 'bogus-az', } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) ref_faultstring = 'Validation failure: Invalid availability zone.' self.assertEqual(ref_faultstring, response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_az_unsupported(self, mock_provider): zone_name = 'nova' azp = self.create_availability_zone_profile( 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) az = self.create_availability_zone(zone_name, 'description', azp.get('id'), True) mock_provider.side_effect = NotImplementedError lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'availability_zone': az.get('name'), } body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=501) ref_faultstring = ("Provider \'noop_driver\' does not support a " "requested action: This provider does not support " "availability zones.") self.assertEqual(ref_faultstring, response.json.get('faultstring')) def test_matching_providers(self, **optionals): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') flavor = self.create_flavor('name1', 'description', fp.get('id'), True) test_flavor_id = flavor.get('id') lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'flavor_id': test_flavor_id, 'provider': 'noop_driver' } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=201) api_lb = response.json.get(self.root_tag) self.assertEqual('noop_driver', api_lb.get('provider')) self.assertEqual(test_flavor_id, api_lb.get('flavor_id')) def test_conflicting_providers(self, **optionals): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') flavor = self.create_flavor('name1', 'description', fp.get('id'), True) test_flavor_id = flavor.get('id') lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id, 'flavor_id': test_flavor_id, 'provider': 'noop_driver-alt' } lb_json.update(optionals) body = self._build_body(lb_json) response = self.post(self.LBS_PATH, body, status=400) self.assertIn("Flavor '{}' is not compatible with provider " "'noop_driver-alt'".format(test_flavor_id), response.json.get('faultstring')) def test_get_all_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=self.project_id, tags=['test_tag1']) lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project_id, tags=['test_tag2']) lb3 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=project_id, tags=['test_tag3']) response = self.get(self.LBS_PATH) lbs = response.json.get(self.root_tag_list) self.assertEqual(3, len(lbs)) lb_id_names = [(lb.get('id'), lb.get('name'), lb.get('tags')) for lb in lbs] lb1 = lb1.get(self.root_tag) lb2 = lb2.get(self.root_tag) lb3 = lb3.get(self.root_tag) self.assertIn((lb1.get('id'), lb1.get('name'), lb1.get('tags')), lb_id_names) self.assertIn((lb2.get('id'), lb2.get('name'), lb2.get('tags')), lb_id_names) self.assertIn((lb3.get('id'), lb3.get('name'), lb3.get('tags')), lb_id_names) def test_get_all_non_admin(self): project_id = uuidutils.generate_uuid() self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project_id) lb3 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=self.project_id) lb3 = lb3.get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.LBS_PATH) self.conf.config(group='api_settings', auth_strategy=auth_strategy) lbs = response.json.get(self.root_tag_list) self.assertEqual(1, len(lbs)) lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] self.assertIn((lb3.get('id'), lb3.get('name')), lb_id_names) def test_get_all_unscoped_token(self): project_id = uuidutils.generate_uuid() self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project_id) lb3 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=self.project_id) lb3 = lb3.get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', None): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': None} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.LBS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, result) def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project_id) lb3 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=self.project_id) lb1 = lb1.get(self.root_tag) lb2 = lb2.get(self.root_tag) lb3 = lb3.get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_global_observer'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.LBS_PATH) self.conf.config(group='api_settings', auth_strategy=auth_strategy) lbs = response.json.get(self.root_tag_list) self.assertEqual(3, len(lbs)) lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] self.assertIn((lb1.get('id'), lb1.get('name')), lb_id_names) self.assertIn((lb2.get('id'), lb2.get('name')), lb_id_names) self.assertIn((lb3.get('id'), lb3.get('name')), lb_id_names) def test_get_all_not_authorized(self): project_id = uuidutils.generate_uuid() self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=project_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) LB_PROJECT_PATH = '{}?project_id={}'.format(self.LBS_PATH, project_id) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): response = self.get(LB_PROJECT_PATH, status=403) api_lb = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) def test_get_all_by_project_id(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project1_id) lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project1_id) lb3 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=project2_id) response = self.get(self.LBS_PATH, params={'project_id': project1_id}) lbs = response.json.get(self.root_tag_list) self.assertEqual(2, len(lbs)) lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] lb1 = lb1.get(self.root_tag) lb2 = lb2.get(self.root_tag) lb3 = lb3.get(self.root_tag) self.assertIn((lb1.get('id'), lb1.get('name')), lb_id_names) self.assertIn((lb2.get('id'), lb2.get('name')), lb_id_names) response = self.get(self.LBS_PATH, params={'project_id': project2_id}) lbs = response.json.get(self.root_tag_list) lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] self.assertEqual(1, len(lbs)) self.assertIn((lb3.get('id'), lb3.get('name')), lb_id_names) def test_get_all_sorted(self): self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=self.project_id) response = self.get(self.LBS_PATH, params={'sort': 'name:desc'}) lbs_desc = response.json.get(self.root_tag_list) response = self.get(self.LBS_PATH, params={'sort': 'name:asc'}) lbs_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(lbs_desc)) self.assertEqual(3, len(lbs_asc)) lb_id_names_desc = [(lb.get('id'), lb.get('name')) for lb in lbs_desc] lb_id_names_asc = [(lb.get('id'), lb.get('name')) for lb in lbs_asc] self.assertEqual(lb_id_names_asc, list(reversed(lb_id_names_desc))) def test_get_all_sorted_by_vip_ip_address(self): self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=self.project_id, vip_address='198.51.100.2') self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=self.project_id, vip_address='198.51.100.1') self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=self.project_id, vip_address='198.51.100.3') response = self.get(self.LBS_PATH, params={'sort': 'vip_address:desc'}) lbs_desc = response.json.get(self.root_tag_list) response = self.get(self.LBS_PATH, params={'sort': 'vip_address:asc'}) lbs_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(lbs_desc)) self.assertEqual(3, len(lbs_asc)) lb_id_names_desc = [(lb.get('id'), lb.get('name')) for lb in lbs_desc] lb_id_names_asc = [(lb.get('id'), lb.get('name')) for lb in lbs_asc] self.assertEqual(lb_id_names_asc, list(reversed(lb_id_names_desc))) self.assertEqual('198.51.100.1', lbs_asc[0][constants.VIP_ADDRESS]) self.assertEqual('198.51.100.2', lbs_asc[1][constants.VIP_ADDRESS]) self.assertEqual('198.51.100.3', lbs_asc[2][constants.VIP_ADDRESS]) self.assertEqual('198.51.100.3', lbs_desc[0][constants.VIP_ADDRESS]) self.assertEqual('198.51.100.2', lbs_desc[1][constants.VIP_ADDRESS]) self.assertEqual('198.51.100.1', lbs_desc[2][constants.VIP_ADDRESS]) def test_get_all_limited(self): self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=self.project_id) # First two -- should have 'next' link first_two = self.get(self.LBS_PATH, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.LBS_PATH, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.LBS_PATH, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), name='lb3', project_id=self.project_id) lbs = self.get(self.LBS_PATH, params={ 'fields': ['id', 'project_id']}).json for lb in lbs['loadbalancers']: self.assertIn(u'id', lb) self.assertIn(u'project_id', lb) self.assertNotIn(u'description', lb) def test_get_one_fields_filter(self): lb1 = self.create_load_balancer( uuidutils.generate_uuid(), name='lb1', project_id=self.project_id).get(self.root_tag) lb = self.get( self.LB_PATH.format(lb_id=lb1.get('id')), params={'fields': ['id', 'project_id']}).json.get(self.root_tag) self.assertIn(u'id', lb) self.assertIn(u'project_id', lb) self.assertNotIn(u'description', lb) def test_get_all_admin_state_up_filter(self): self.create_load_balancer(uuidutils.generate_uuid(), admin_state_up=True, name='lb1', project_id=self.project_id) self.create_load_balancer(uuidutils.generate_uuid(), admin_state_up=False, name='lb2', project_id=self.project_id) lbs = self.get(self.LBS_PATH, params={'admin_state_up': 'false'}).json self.assertEqual(1, len(lbs['loadbalancers'])) self.assertFalse(lbs['loadbalancers'][0]['admin_state_up']) self.assertEqual('lb2', lbs['loadbalancers'][0]['name']) def test_get_all_filter(self): lb1 = self.create_load_balancer( uuidutils.generate_uuid(), name='lb1', project_id=self.project_id, vip_address='10.0.0.1').get(self.root_tag) self.create_load_balancer( uuidutils.generate_uuid(), name='lb2', project_id=self.project_id).get(self.root_tag) self.create_load_balancer( uuidutils.generate_uuid(), name='lb3', project_id=self.project_id).get(self.root_tag) lbs = self.get(self.LBS_PATH, params={ 'id': lb1['id'], 'vip_address': lb1['vip_address']}).json self.assertEqual(1, len(lbs['loadbalancers'])) self.assertEqual(lb1['id'], lbs['loadbalancers'][0]['id']) def test_get_all_tags_filter(self): lb1 = self.create_load_balancer( uuidutils.generate_uuid(), name='lb1', project_id=self.project_id, vip_address='10.0.0.1', tags=['test_tag1', 'test_tag2'] ).get(self.root_tag) lb2 = self.create_load_balancer( uuidutils.generate_uuid(), name='lb2', project_id=self.project_id, tags=['test_tag2', 'test_tag3'] ).get(self.root_tag) lb3 = self.create_load_balancer( uuidutils.generate_uuid(), name='lb3', project_id=self.project_id, tags=['test_tag4', 'test_tag5'] ).get(self.root_tag) lbs = self.get( self.LBS_PATH, params={'tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(2, len(lbs)) self.assertEqual( [lb1.get('id'), lb2.get('id')], [lb.get('id') for lb in lbs] ) lbs = self.get( self.LBS_PATH, params={'tags': ['test_tag2', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(1, len(lbs)) self.assertEqual( [lb2.get('id')], [lb.get('id') for lb in lbs] ) lbs = self.get( self.LBS_PATH, params={'tags': ['test_tag2,test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(1, len(lbs)) self.assertEqual( [lb2.get('id')], [lb.get('id') for lb in lbs] ) lbs = self.get( self.LBS_PATH, params={'tags-any': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(2, len(lbs)) self.assertEqual( [lb1.get('id'), lb2.get('id')], [lb.get('id') for lb in lbs] ) lbs = self.get( self.LBS_PATH, params={'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(1, len(lbs)) self.assertEqual( [lb3.get('id')], [lb.get('id') for lb in lbs] ) lbs = self.get( self.LBS_PATH, params={'not-tags-any': ['test_tag2', 'test_tag4']} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(0, len(lbs)) lbs = self.get( self.LBS_PATH, params={'tags': 'test_tag2', 'tags-any': ['test_tag1', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(2, len(lbs)) self.assertEqual( [lb1.get('id'), lb2.get('id')], [lb.get('id') for lb in lbs] ) lbs = self.get( self.LBS_PATH, params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(0, len(lbs)) def test_get_all_tags_mixed_filters(self): lb1 = self.create_load_balancer( uuidutils.generate_uuid(), name='lb1', project_id=self.project_id, vip_address='10.0.0.1', tags=['test_tag1', 'test_tag2'] ).get(self.root_tag) self.create_load_balancer( uuidutils.generate_uuid(), name='lb2', project_id=self.project_id, tags=['test_tag2', 'test_tag3'] ).get(self.root_tag) lbs = self.get( self.LBS_PATH, params={'name': 'lb1', 'tags': 'test_tag2', 'vip_address': '10.0.0.1'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(1, len(lbs)) self.assertEqual(lb1.get('id'), lbs[0].get('id')) lbs = self.get( self.LBS_PATH, params={'tags': 'test_tag2', 'vip_address': '10.0.0.1'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(1, len(lbs)) self.assertEqual(lb1.get('id'), lbs[0].get('id')) lbs = self.get( self.LBS_PATH, params={'name': 'lb1', 'tags': 'test_tag1', 'vip_address': '10.0.0.1'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(1, len(lbs)) self.assertEqual(lb1.get('id'), lbs[0].get('id')) lbs = self.get( self.LBS_PATH, params={'name': 'lb1', 'tags': 'test_tag3', 'vip_address': '10.0.0.1'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(0, len(lbs)) lbs = self.get( self.LBS_PATH, params={'name': 'lb1', 'tags': 'test_tag3', 'vip_address': '10.0.0.1'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(0, len(lbs)) lbs = self.get( self.LBS_PATH, params={'name': 'bogus-lb', 'tags': 'test_tag2', 'vip_address': '10.0.0.1'} ).json.get(self.root_tag_list) self.assertIsInstance(lbs, list) self.assertEqual(0, len(lbs)) def test_get_all_hides_deleted(self): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) response = self.get(self.LBS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) self.set_object_status(self.lb_repo, api_lb.get('id'), provisioning_status=constants.DELETED) response = self.get(self.LBS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 0) def test_get(self): project_id = uuidutils.generate_uuid() subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) port = network_models.Port(id=uuidutils.generate_uuid(), network_id=network.id) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port: mock_get_network.return_value = network mock_get_port.return_value = port lb = self.create_load_balancer(subnet.id, vip_address='10.0.0.1', vip_network_id=network.id, vip_port_id=port.id, name='lb1', project_id=project_id, description='desc1', admin_state_up=False, tags=['test_tag']) lb_dict = lb.get(self.root_tag) response = self.get( self.LB_PATH.format( lb_id=lb_dict.get('id'))).json.get(self.root_tag) self.assertEqual('lb1', response.get('name')) self.assertEqual(project_id, response.get('project_id')) self.assertEqual('desc1', response.get('description')) self.assertFalse(response.get('admin_state_up')) self.assertEqual('10.0.0.1', response.get('vip_address')) self.assertEqual(subnet.id, response.get('vip_subnet_id')) self.assertEqual(network.id, response.get('vip_network_id')) self.assertEqual(port.id, response.get('vip_port_id')) self.assertEqual(['test_tag'], response.get('tags')) def test_get_deleted_gives_404(self): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) self.set_object_status(self.lb_repo, api_lb.get('id'), provisioning_status=constants.DELETED) self.get(self.LB_PATH.format(lb_id=api_lb.get('id')), status=404) def test_get_bad_lb_id(self): path = self.LB_PATH.format(lb_id='SEAN-CONNERY') self.get(path, status=404) def test_get_authorized(self): project_id = uuidutils.generate_uuid() subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) port = network_models.Port(id=uuidutils.generate_uuid(), network_id=network.id) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port: mock_get_network.return_value = network mock_get_port.return_value = port lb = self.create_load_balancer(subnet.id, vip_address='10.0.0.1', vip_network_id=network.id, vip_port_id=port.id, name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.LB_PATH.format( lb_id=lb_dict.get('id'))).json.get(self.root_tag) self.assertEqual('lb1', response.get('name')) self.assertEqual(project_id, response.get('project_id')) self.assertEqual('desc1', response.get('description')) self.assertFalse(response.get('admin_state_up')) self.assertEqual('10.0.0.1', response.get('vip_address')) self.assertEqual(subnet.id, response.get('vip_subnet_id')) self.assertEqual(network.id, response.get('vip_network_id')) self.assertEqual(port.id, response.get('vip_port_id')) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_get_not_authorized(self): project_id = uuidutils.generate_uuid() subnet = network_models.Subnet(id=uuidutils.generate_uuid()) network = network_models.Network(id=uuidutils.generate_uuid(), subnets=[subnet]) port = network_models.Port(id=uuidutils.generate_uuid(), network_id=network.id) with mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_network") as mock_get_network, mock.patch( "octavia.network.drivers.noop_driver.driver.NoopManager" ".get_port") as mock_get_port: mock_get_network.return_value = network mock_get_port.return_value = port lb = self.create_load_balancer(subnet.id, vip_address='10.0.0.1', vip_network_id=network.id, vip_port_id=port.id, name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=403) api_lb = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) def test_create_over_quota(self): self.start_quota_mock(data_models.LoadBalancer) lb_json = {'name': 'test1', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id} body = self._build_body(lb_json) self.post(self.LBS_PATH, body, status=403) def test_update(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False, tags=['test_tag1']) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'name': 'lb2', 'tags': ['test_tag2']}) lb = self.set_lb_status(lb_dict.get('id')) response = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) api_lb = response.json.get(self.root_tag) self.assertIsNotNone(api_lb.get('vip_subnet_id')) self.assertEqual('lb2', api_lb.get('name')) self.assertEqual(['test_tag2'], api_lb.get('tags')) self.assertEqual(project_id, api_lb.get('project_id')) self.assertEqual('desc1', api_lb.get('description')) self.assertFalse(api_lb.get('admin_state_up')) self.assertIsNotNone(api_lb.get('created_at')) self.assertIsNotNone(api_lb.get('updated_at')) self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, constants.PENDING_UPDATE) def test_update_delete_tag(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, admin_state_up=False, tags=['test_tag1'],) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'tags': []}) self.set_lb_status(lb_dict.get('id')) response = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) api_lb = response.json.get(self.root_tag) self.assertEqual([], api_lb.get('tags')) def test_update_with_vip(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'vip_subnet_id': '1234'}) lb = self.set_lb_status(lb_dict.get('id')) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=400) def test_update_with_qos(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer( uuidutils.generate_uuid(), name='lb1', project_id=project_id, vip_qos_policy_id=uuidutils.generate_uuid()) lb_dict = lb.get(self.root_tag) self.set_lb_status(lb_dict.get('id')) lb_json = self._build_body( {'vip_qos_policy_id': uuidutils.generate_uuid()}) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=200) def test_update_with_bad_qos(self): project_id = uuidutils.generate_uuid() vip_qos_policy_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, vip_qos_policy_id=vip_qos_policy_id) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'vip_qos_policy_id': 'BAD'}) self.set_lb_status(lb_dict.get('id')) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=400) def test_update_with_qos_ext_disabled(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb_dict = lb.get(self.root_tag) self.set_lb_status(lb_dict.get('id')) vip_qos_policy_id = uuidutils.generate_uuid() lb_json = self._build_body({'vip_qos_policy_id': vip_qos_policy_id}) with mock.patch("octavia.network.drivers.noop_driver.driver" ".NoopManager.qos_enabled", return_value=False): self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=400) def test_update_bad_lb_id(self): path = self.LB_PATH.format(lb_id='SEAN-CONNERY') self.put(path, body={}, status=404) def test_update_pending_create(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'name': 'Roberto'}) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=409) def test_update_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'name': 'lb2'}) lb = self.set_lb_status(lb_dict.get('id')) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.put( self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) api_lb = response.json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertIsNotNone(api_lb.get('vip_subnet_id')) self.assertEqual('lb2', api_lb.get('name')) self.assertEqual(project_id, api_lb.get('project_id')) self.assertEqual('desc1', api_lb.get('description')) self.assertFalse(api_lb.get('admin_state_up')) self.assertIsNotNone(api_lb.get('created_at')) self.assertIsNotNone(api_lb.get('updated_at')) self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, constants.PENDING_UPDATE) def test_update_not_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'name': 'lb2'}) lb = self.set_lb_status(lb_dict.get('id')) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=403) api_lb = response.json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) self.assert_correct_lb_status(lb_dict.get('id'), constants.ONLINE, constants.ACTIVE) def test_delete_pending_create(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=409) def test_update_pending_update(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb_json = self._build_body({'name': 'Bob'}) lb = self.set_lb_status(lb_dict.get('id')) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=409) def test_delete_pending_update(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_json = self._build_body({'name': 'Steve'}) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=409) def test_delete_with_error_status(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id'), status=constants.ERROR) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=204) def test_update_pending_delete(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) lb_json = self._build_body({'name': 'John'}) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=409) def test_delete_pending_delete(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=409) def test_update_already_deleted(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id'), status=constants.DELETED) lb_json = self._build_body({'name': 'John'}) self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, status=404) def test_delete_already_deleted(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id'), status=constants.DELETED) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=404) def test_delete(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id'))) api_lb = response.json.get(self.root_tag) self.assertEqual('lb1', api_lb.get('name')) self.assertEqual('desc1', api_lb.get('description')) self.assertEqual(project_id, api_lb.get('project_id')) self.assertFalse(api_lb.get('admin_state_up')) self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, constants.PENDING_DELETE) def test_delete_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id'))) api_lb = response.json.get(self.root_tag) self.assertEqual('lb1', api_lb.get('name')) self.assertEqual('desc1', api_lb.get('description')) self.assertEqual(project_id, api_lb.get('project_id')) self.assertFalse(api_lb.get('admin_state_up')) self.assertEqual(lb.get('operational_status'), api_lb.get('operational_status')) self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, constants.PENDING_DELETE) def test_delete_not_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id'))) api_lb = response.json.get(self.root_tag) self.assertEqual('lb1', api_lb.get('name')) self.assertEqual('desc1', api_lb.get('description')) self.assertEqual(project_id, api_lb.get('project_id')) self.assertFalse(api_lb.get('admin_state_up')) self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, constants.ACTIVE) def test_delete_fails_with_pool(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1').get(self.root_tag) lb_id = lb.get('id') self.set_lb_status(lb_id) self.create_pool( lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) self.set_lb_status(lb_id) self.delete(self.LB_PATH.format(lb_id=lb_id), status=400) self.assert_correct_status(lb_id=lb_id) def test_delete_fails_with_listener(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1').get(self.root_tag) lb_id = lb.get('id') self.set_lb_status(lb_id) self.create_listener(constants.PROTOCOL_HTTP, 80, lb_id) self.set_lb_status(lb_id) self.delete(self.LB_PATH.format(lb_id=lb_id), status=400) self.assert_correct_status(lb_id=lb_id) def test_cascade_delete(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1').get(self.root_tag) lb_id = lb.get('id') self.set_lb_status(lb_id) listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb_id).get('listener') listener_id = listener.get('id') self.set_lb_status(lb_id) self.create_pool( lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=listener_id) self.set_lb_status(lb_id) self.delete(self.LB_PATH.format(lb_id=lb_id), params={'cascade': "true"}) def test_delete_bad_lb_id(self): path = self.LB_PATH.format(lb_id='bad_uuid') self.delete(path, status=404) def test_failover(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) self.app.put(self._get_full_path( self.LB_PATH.format(lb_id=lb_dict.get('id')) + "/failover"), status=202) def test_failover_pending(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id'), status=constants.PENDING_UPDATE) self.app.put(self._get_full_path( self.LB_PATH.format(lb_id=lb_dict.get('id')) + "/failover"), status=409) def test_failover_error(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id'), status=constants.ERROR) self.app.put(self._get_full_path( self.LB_PATH.format(lb_id=lb_dict.get('id')) + "/failover"), status=202) def test_failover_not_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) path = self._get_full_path(self.LB_PATH.format( lb_id=lb_dict.get('id')) + "/failover") self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.app.put(path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_failover_not_authorized_no_role(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) path = self._get_full_path(self.LB_PATH.format( lb_id=lb_dict.get('id')) + "/failover") self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': [], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.app.put(path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_failover_authorized_lb_admin(self): project_id = uuidutils.generate_uuid() project_id_2 = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) path = self._get_full_path(self.LB_PATH.format( lb_id=lb_dict.get('id')) + "/failover") self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id_2): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_admin'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id_2} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.app.put(path, status=202) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_failover_authorized_no_auth(self): project_id = uuidutils.generate_uuid() project_id_2 = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) path = self._get_full_path(self.LB_PATH.format( lb_id=lb_dict.get('id')) + "/failover") self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id_2): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id_2} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.app.put(path, status=202) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_failover_deleted(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id, description='desc1', admin_state_up=False) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id'), status=constants.DELETED) path = self._get_full_path(self.LB_PATH.format( lb_id=lb_dict.get('id')) + "/failover") self.app.put(path, status=404) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_bad_provider(self, mock_provider): mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') lb_json = {'name': 'test-lb', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) self.set_lb_status(lb_id=api_lb.get('id')) new_listener = {'name': 'new_name'} mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')), self._build_body(new_listener), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_bad_provider(self, mock_provider): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) self.set_lb_status(lb_id=api_lb.get('id')) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_lb['provisioning_status'] = constants.ACTIVE api_lb['operating_status'] = constants.ONLINE response = self.get(self.LB_PATH.format( lb_id=api_lb.get('id'))).json.get(self.root_tag) self.assertIsNone(api_lb.pop('updated_at')) self.assertIsNotNone(response.pop('updated_at')) self.assertEqual(api_lb, response) mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=500) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_provider_not_implemented(self, mock_provider): mock_provider.side_effect = exceptions.ProviderNotImplementedError( prov='bad_driver', user_msg='broken') lb_json = {'name': 'test-lb', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=501) self.assertIn('Provider \'bad_driver\' does not support a requested ' 'action: broken', response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_provider_not_implemented(self, mock_provider): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) self.set_lb_status(lb_id=api_lb.get('id')) new_listener = {'name': 'new_name'} mock_provider.side_effect = exceptions.ProviderNotImplementedError( prov='bad_driver', user_msg='broken') response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')), self._build_body(new_listener), status=501) self.assertIn('Provider \'bad_driver\' does not support a requested ' 'action: broken', response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_provider_not_implemented(self, mock_provider): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) self.set_lb_status(lb_id=api_lb.get('id')) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_lb['provisioning_status'] = constants.ACTIVE api_lb['operating_status'] = constants.ONLINE response = self.get(self.LB_PATH.format( lb_id=api_lb.get('id'))).json.get(self.root_tag) self.assertIsNone(api_lb.pop('updated_at')) self.assertIsNotNone(response.pop('updated_at')) self.assertEqual(api_lb, response) mock_provider.side_effect = exceptions.ProviderNotImplementedError( prov='bad_driver', user_msg='broken') self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=501) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_provider_unsupport_option(self, mock_provider): mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError( prov='bad_driver', user_msg='broken') lb_json = {'name': 'test-lb', 'vip_subnet_id': uuidutils.generate_uuid(), 'project_id': self.project_id} response = self.post(self.LBS_PATH, self._build_body(lb_json), status=501) self.assertIn('Provider \'bad_driver\' does not support a requested ' 'option: broken', response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_provider_unsupport_option(self, mock_provider): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) self.set_lb_status(lb_id=api_lb.get('id')) new_listener = {'name': 'new_name'} mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError( prov='bad_driver', user_msg='broken') response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')), self._build_body(new_listener), status=501) self.assertIn('Provider \'bad_driver\' does not support a requested ' 'option: broken', response.json.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_provider_unsupport_option(self, mock_provider): api_lb = self.create_load_balancer( uuidutils.generate_uuid()).get(self.root_tag) self.set_lb_status(lb_id=api_lb.get('id')) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_lb['provisioning_status'] = constants.ACTIVE api_lb['operating_status'] = constants.ONLINE response = self.get(self.LB_PATH.format( lb_id=api_lb.get('id'))).json.get(self.root_tag) self.assertIsNone(api_lb.pop('updated_at')) self.assertIsNotNone(response.pop('updated_at')) self.assertEqual(api_lb, response) mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError( prov='bad_driver', user_msg='broken') self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=501) class TestLoadBalancerGraph(base.BaseAPITest): root_tag = 'loadbalancer' def setUp(self): super(TestLoadBalancerGraph, self).setUp() self._project_id = uuidutils.generate_uuid() def _build_body(self, json): return {self.root_tag: json} def _assert_graphs_equal(self, expected_graph, observed_graph): observed_graph_copy = copy.deepcopy(observed_graph) del observed_graph_copy['created_at'] del observed_graph_copy['updated_at'] self.assertEqual(observed_graph_copy['project_id'], observed_graph_copy.pop('tenant_id')) obs_lb_id = observed_graph_copy.pop('id') self.assertTrue(uuidutils.is_uuid_like(obs_lb_id)) expected_listeners = expected_graph.pop('listeners', []) observed_listeners = observed_graph_copy.pop('listeners', []) expected_pools = expected_graph.pop('pools', []) observed_pools = observed_graph_copy.pop('pools', []) self.assertEqual(expected_graph, observed_graph_copy) self.assertEqual(len(expected_pools), len(observed_pools)) self.assertEqual(len(expected_listeners), len(observed_listeners)) for observed_listener in observed_listeners: del observed_listener['created_at'] del observed_listener['updated_at'] self.assertEqual(observed_listener['project_id'], observed_listener.pop('tenant_id')) self.assertTrue(uuidutils.is_uuid_like( observed_listener.pop('id'))) if observed_listener.get('default_pool_id'): self.assertTrue(uuidutils.is_uuid_like( observed_listener.pop('default_pool_id'))) default_pool = observed_listener.get('default_pool') if default_pool: observed_listener.pop('default_pool_id') self.assertTrue(default_pool.get('id')) default_pool.pop('id') default_pool.pop('created_at') default_pool.pop('updated_at') hm = default_pool.get('health_monitor') if hm: self.assertTrue(hm.get('id')) hm.pop('id') for member in default_pool.get('members', []): self.assertTrue(member.get('id')) member.pop('id') member.pop('created_at') member.pop('updated_at') if observed_listener.get('sni_containers'): observed_listener['sni_containers'].sort() o_l7policies = observed_listener.get('l7policies') if o_l7policies: for o_l7policy in o_l7policies: o_l7policy.pop('created_at') o_l7policy.pop('updated_at') self.assertEqual(o_l7policy['project_id'], o_l7policy.pop('tenant_id')) if o_l7policy.get('redirect_pool_id'): r_pool_id = o_l7policy.pop('redirect_pool_id') self.assertTrue(uuidutils.is_uuid_like(r_pool_id)) o_l7policy_id = o_l7policy.pop('id') self.assertTrue(uuidutils.is_uuid_like(o_l7policy_id)) o_l7policy_l_id = o_l7policy.pop('listener_id') self.assertTrue(uuidutils.is_uuid_like(o_l7policy_l_id)) l7rules = o_l7policy.get('rules') or [] for l7rule in l7rules: l7rule.pop('created_at') l7rule.pop('updated_at') self.assertEqual(l7rule['project_id'], l7rule.pop('tenant_id')) self.assertTrue(l7rule.pop('id')) self.assertIn(observed_listener, expected_listeners) def _get_lb_bodies(self, create_listeners, expected_listeners, create_pools=None): create_lb = { 'name': 'lb1', 'project_id': self._project_id, 'vip_subnet_id': uuidutils.generate_uuid(), 'vip_port_id': uuidutils.generate_uuid(), 'vip_address': '198.51.100.10', 'provider': 'noop_driver', 'listeners': create_listeners, 'pools': create_pools or [] } expected_lb = { 'description': '', 'admin_state_up': True, 'availability_zone': None, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, # TODO(rm_work): vip_network_id is a weird case, as it will be # replaced from the port, which in the noop network driver will be # freshly generated... I don't see a way to actually set it sanely # for this test without interfering with a ton of stuff, and it is # expected that this would be overwritten anyway, so 'ANY' is fine? 'vip_network_id': mock.ANY, 'vip_qos_policy_id': None, 'flavor_id': None, 'provider': 'noop_driver', 'tags': [], } expected_lb.update(create_lb) expected_lb['listeners'] = expected_listeners expected_lb['pools'] = create_pools or [] return create_lb, expected_lb def _get_listener_bodies( self, name='listener1', protocol_port=80, create_default_pool_name=None, create_default_pool_id=None, create_l7policies=None, expected_l7policies=None, create_sni_containers=None, expected_sni_containers=None, create_client_ca_tls_container=None, expected_client_ca_tls_container=None, create_protocol=constants.PROTOCOL_HTTP, create_client_authentication=None, expected_client_authentication=constants.CLIENT_AUTH_NONE, create_client_crl_container=None, expected_client_crl_container=None, create_allowed_cidrs=None, expected_allowed_cidrs=None, create_timeout_client_data=None, expected_timeout_client_data=None, create_timeout_member_connect=None, expected_timeout_member_connect=None, create_timeout_member_data=None, expected_timeout_member_data=None, create_timeout_tcp_inspect=None, expected_timeout_tcp_inspect=None): create_listener = { 'name': name, 'protocol_port': protocol_port, 'protocol': create_protocol } expected_listener = { 'description': '', 'default_tls_container_ref': None, 'sni_container_refs': [], 'connection_limit': constants.DEFAULT_CONNECTION_LIMIT, 'admin_state_up': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'insert_headers': {}, 'project_id': self._project_id, 'timeout_client_data': constants.DEFAULT_TIMEOUT_CLIENT_DATA, 'timeout_member_connect': constants.DEFAULT_TIMEOUT_MEMBER_CONNECT, 'timeout_member_data': constants.DEFAULT_TIMEOUT_MEMBER_DATA, 'timeout_tcp_inspect': constants.DEFAULT_TIMEOUT_TCP_INSPECT, 'tags': [], 'client_ca_tls_container_ref': None, 'client_authentication': constants.CLIENT_AUTH_NONE, 'client_crl_container_ref': None, 'allowed_cidrs': None, 'tls_ciphers': None } if create_sni_containers: create_listener['sni_container_refs'] = create_sni_containers expected_listener.update(create_listener) if create_default_pool_name: pool = {'name': create_default_pool_name} create_listener['default_pool'] = pool elif create_default_pool_id: create_listener['default_pool_id'] = create_default_pool_id expected_listener['default_pool_id'] = create_default_pool_id else: expected_listener['default_pool_id'] = None if create_l7policies: l7policies = create_l7policies create_listener['l7policies'] = l7policies if create_client_ca_tls_container: create_listener['client_ca_tls_container_ref'] = ( create_client_ca_tls_container) if create_client_authentication: create_listener['client_authentication'] = ( create_client_authentication) if create_client_crl_container: create_listener['client_crl_container_ref'] = ( create_client_crl_container) if create_allowed_cidrs: create_listener['allowed_cidrs'] = create_allowed_cidrs if expected_sni_containers: expected_listener['sni_container_refs'] = expected_sni_containers if expected_l7policies: expected_listener['l7policies'] = expected_l7policies else: expected_listener['l7policies'] = [] if expected_client_ca_tls_container: expected_listener['client_ca_tls_container_ref'] = ( expected_client_ca_tls_container) expected_listener['client_authentication'] = ( constants.CLIENT_AUTH_NONE) if expected_client_authentication: expected_listener[ 'client_authentication'] = expected_client_authentication if expected_client_crl_container: expected_listener['client_crl_container_ref'] = ( expected_client_crl_container) if expected_allowed_cidrs: expected_listener['allowed_cidrs'] = expected_allowed_cidrs if create_protocol == constants.PROTOCOL_TERMINATED_HTTPS: expected_listener['tls_ciphers'] = constants.CIPHERS_OWASP_SUITE_B if create_timeout_client_data is not None: create_listener['timeout_client_data'] = ( create_timeout_client_data) if expected_timeout_client_data is not None: expected_listener['timeout_client_data'] = ( expected_timeout_client_data) if create_timeout_member_connect is not None: create_listener['timeout_member_connect'] = ( create_timeout_member_connect) if expected_timeout_member_connect is not None: expected_listener['timeout_member_connect'] = ( expected_timeout_member_connect) if create_timeout_member_data is not None: create_listener['timeout_member_data'] = ( create_timeout_member_data) if expected_timeout_member_data is not None: expected_listener['timeout_member_data'] = ( expected_timeout_member_data) if create_timeout_tcp_inspect is not None: create_listener['timeout_tcp_inspect'] = ( create_timeout_tcp_inspect) if expected_timeout_tcp_inspect is not None: expected_listener['timeout_tcp_inspect'] = ( expected_timeout_tcp_inspect) return create_listener, expected_listener def _get_pool_bodies(self, name='pool1', create_members=None, expected_members=None, create_hm=None, expected_hm=None, protocol=constants.PROTOCOL_HTTP, session_persistence=True): create_pool = { 'name': name, 'protocol': protocol, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, } if session_persistence: create_pool['session_persistence'] = { 'type': constants.SESSION_PERSISTENCE_SOURCE_IP, 'cookie_name': None} if create_members: create_pool['members'] = create_members if create_hm: create_pool['healthmonitor'] = create_hm expected_pool = { 'description': None, 'session_persistence': None, 'members': [], 'enabled': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'project_id': self._project_id, 'tags': [] } expected_pool.update(create_pool) if expected_members: expected_pool['members'] = expected_members if expected_hm: expected_pool['healthmonitor'] = expected_hm return create_pool, expected_pool def _get_member_bodies(self, protocol_port=80): create_member = { 'address': '10.0.0.1', 'protocol_port': protocol_port } expected_member = { 'weight': 1, 'enabled': True, 'subnet_id': None, 'operating_status': constants.OFFLINE, 'project_id': self._project_id, 'tags': [] } expected_member.update(create_member) return create_member, expected_member def _get_hm_bodies(self, hm_type=constants.HEALTH_MONITOR_PING, delay=1): if hm_type == constants.HEALTH_MONITOR_UDP_CONNECT: create_hm = { 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'delay': delay, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1 } expected_hm = { 'admin_state_up': True, 'project_id': self._project_id, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'tags': [] } elif hm_type == constants.HEALTH_MONITOR_HTTP: create_hm = { 'type': constants.HEALTH_MONITOR_HTTP, 'delay': delay, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1 } expected_hm = { 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200', 'admin_state_up': True, 'project_id': self._project_id, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'tags': [] } else: create_hm = { 'type': constants.HEALTH_MONITOR_PING, 'delay': delay, 'timeout': 1, 'max_retries_down': 1, 'max_retries': 1 } expected_hm = { 'admin_state_up': True, 'project_id': self._project_id, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'tags': [] } expected_hm.update(create_hm) return create_hm, expected_hm def _get_sni_container_bodies(self): create_sni_container1 = uuidutils.generate_uuid() create_sni_container2 = uuidutils.generate_uuid() create_sni_containers = [create_sni_container1, create_sni_container2] expected_sni_containers = [create_sni_container1, create_sni_container2] expected_sni_containers.sort() return create_sni_containers, expected_sni_containers def _get_l7policies_bodies(self, create_pool_name=None, create_pool_id=None, create_l7rules=None, expected_l7rules=None): create_l7policies = [] if create_pool_name: create_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool': {'name': create_pool_name}, 'position': 1, 'admin_state_up': False } else: create_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://127.0.0.1/', 'position': 1, 'redirect_http_code': 302, 'admin_state_up': False } create_l7policies.append(create_l7policy) expected_l7policy = { 'name': '', 'description': '', 'redirect_http_code': None, 'redirect_url': None, 'redirect_prefix': None, 'rules': [], 'project_id': self._project_id, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'tags': [] } expected_l7policy.update(create_l7policy) expected_l7policy.pop('redirect_pool', None) expected_l7policies = [] if not create_pool_name: expected_l7policy['redirect_pool_id'] = create_pool_id expected_l7policies.append(expected_l7policy) if expected_l7rules: expected_l7policies[0]['rules'] = expected_l7rules if create_l7rules: create_l7policies[0]['rules'] = create_l7rules return create_l7policies, expected_l7policies def _get_l7rules_bodies(self, value="localhost"): create_l7rules = [{ 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'value': value, 'invert': False, 'admin_state_up': True }] expected_l7rules = [{ 'key': None, 'project_id': self._project_id, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'tags': [] }] expected_l7rules[0].update(create_l7rules[0]) return create_l7rules, expected_l7rules def test_with_one_listener(self): create_listener, expected_listener = self._get_listener_bodies() create_lb, expected_lb = self._get_lb_bodies([create_listener], [expected_listener]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_one_listener_with_default_timeouts(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group='haproxy_amphora', timeout_client_data=20) self.conf.config(group='haproxy_amphora', timeout_member_connect=21) self.conf.config(group='haproxy_amphora', timeout_member_data=constants.MIN_TIMEOUT) self.conf.config(group='haproxy_amphora', timeout_tcp_inspect=constants.MAX_TIMEOUT) create_listener, expected_listener = self._get_listener_bodies( expected_timeout_client_data=20, expected_timeout_member_connect=21, expected_timeout_member_data=constants.MIN_TIMEOUT, expected_timeout_tcp_inspect=constants.MAX_TIMEOUT) create_lb, expected_lb = self._get_lb_bodies([create_listener], [expected_listener]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_many_listeners(self): create_listener1, expected_listener1 = self._get_listener_bodies() create_listener2, expected_listener2 = self._get_listener_bodies( name='listener2', protocol_port=81) create_lb, expected_lb = self._get_lb_bodies( [create_listener1, create_listener2], [expected_listener1, expected_listener2]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_one_listener_one_pool(self): create_pool, expected_pool = self._get_pool_bodies() create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name']) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_many_listeners_one_pool(self): create_pool1, expected_pool1 = self._get_pool_bodies() create_pool2, expected_pool2 = self._get_pool_bodies(name='pool2') create_listener1, expected_listener1 = self._get_listener_bodies( create_default_pool_name=create_pool1['name']) create_listener2, expected_listener2 = self._get_listener_bodies( create_default_pool_name=create_pool2['name'], name='listener2', protocol_port=81) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener1, create_listener2], expected_listeners=[expected_listener1, expected_listener2], create_pools=[create_pool1, create_pool2]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_one_listener_one_member(self): create_member, expected_member = self._get_member_bodies() create_pool, expected_pool = self._get_pool_bodies( create_members=[create_member], expected_members=[expected_member]) create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name']) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_one_listener_one_hm(self): create_hm, expected_hm = self._get_hm_bodies() create_pool, expected_pool = self._get_pool_bodies( create_hm=create_hm, expected_hm=expected_hm) create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name']) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_one_listener_one_hm_udp(self): create_hm, expected_hm = self._get_hm_bodies( hm_type=constants.HEALTH_MONITOR_UDP_CONNECT, delay=3) create_pool, expected_pool = self._get_pool_bodies( create_hm=create_hm, expected_hm=expected_hm, protocol=constants.PROTOCOL_UDP) create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name'], create_protocol=constants.PROTOCOL_UDP) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_one_listener_one_hm_udp_validation_failure(self): create_hm, expected_hm = self._get_hm_bodies( hm_type=constants.HEALTH_MONITOR_UDP_CONNECT, delay=1) create_pool, expected_pool = self._get_pool_bodies( create_hm=create_hm, expected_hm=expected_hm, protocol=constants.PROTOCOL_UDP) create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name'], create_protocol=constants.PROTOCOL_UDP) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body, status=400, expect_errors=True) error_text = response.json.get('faultstring') self.assertIn('request delay value 1 should be larger', error_text) def test_with_one_listener_allowed_cidrs(self): allowed_cidrs = ['10.0.1.0/24', '172.16.0.0/16'] create_listener, expected_listener = self._get_listener_bodies( create_allowed_cidrs=allowed_cidrs, expected_allowed_cidrs=allowed_cidrs) create_lb, expected_lb = self._get_lb_bodies([create_listener], [expected_listener]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_with_one_listener_sni_containers(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} create_sni_containers, expected_sni_containers = ( self._get_sni_container_bodies()) create_listener, expected_listener = self._get_listener_bodies( create_protocol=constants.PROTOCOL_TERMINATED_HTTPS, create_sni_containers=create_sni_containers, expected_sni_containers=expected_sni_containers) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) @mock.patch('cryptography.hazmat.backends.default_backend') @mock.patch('cryptography.x509.load_pem_x509_crl') @mock.patch('cryptography.x509.load_pem_x509_certificate') @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_with_full_listener_certs(self, mock_cert_data, mock_get_secret, mock_x509_cert, mock_x509_crl, mock_backend): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_get_secret.side_effect = ['ca cert', 'X509 CRL FILE'] mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} cert_mock = mock.MagicMock() mock_x509_cert.return_value = cert_mock create_client_ca_tls_container, create_client_crl_container = ( uuidutils.generate_uuid(), uuidutils.generate_uuid()) expected_client_ca_tls_container = create_client_ca_tls_container create_client_authentication = constants.CLIENT_AUTH_MANDATORY expected_client_authentication = constants.CLIENT_AUTH_MANDATORY expected_client_crl_container = create_client_crl_container create_sni_containers, expected_sni_containers = ( self._get_sni_container_bodies()) create_listener, expected_listener = self._get_listener_bodies( create_protocol=constants.PROTOCOL_TERMINATED_HTTPS, create_sni_containers=create_sni_containers, expected_sni_containers=expected_sni_containers, create_client_ca_tls_container=create_client_ca_tls_container, expected_client_ca_tls_container=expected_client_ca_tls_container, create_client_authentication=create_client_authentication, expected_client_authentication=expected_client_authentication, create_client_crl_container=create_client_crl_container, expected_client_crl_container=expected_client_crl_container) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_l7policy_redirect_pool_no_rule(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7policies, expected_l7policies = self._get_l7policies_bodies( create_pool_name=create_pool['name']) create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_l7policy_redirect_pool_one_rule(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7rules, expected_l7rules = self._get_l7rules_bodies() create_l7policies, expected_l7policies = self._get_l7policies_bodies( create_pool_name=create_pool['name'], create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_l7policies_one_redirect_pool_one_rule(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7rules, expected_l7rules = self._get_l7rules_bodies() create_l7policies, expected_l7policies = self._get_l7policies_bodies( create_pool_name=create_pool['name'], create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) c_l7policies_url, e_l7policies_url = self._get_l7policies_bodies() for policy in c_l7policies_url: policy['position'] = 2 create_l7policies.append(policy) for policy in e_l7policies_url: policy['position'] = 2 expected_l7policies.append(policy) create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_l7policies_one_redirect_url_with_default_pool(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7rules, expected_l7rules = self._get_l7rules_bodies() create_l7policies, expected_l7policies = self._get_l7policies_bodies( create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name'], create_l7policies=create_l7policies, expected_l7policies=expected_l7policies, ) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_l7policies_redirect_pools_no_rules(self): create_pool, expected_pool = self._get_pool_bodies() create_l7policies, expected_l7policies = self._get_l7policies_bodies( create_pool_name=create_pool['name']) r_create_pool, r_expected_pool = self._get_pool_bodies(name='pool2') c_l7policies_url, e_l7policies_url = self._get_l7policies_bodies( create_pool_name=r_create_pool['name']) for policy in c_l7policies_url: policy['position'] = 2 create_l7policies.append(policy) for policy in e_l7policies_url: policy['position'] = 2 expected_l7policies.append(policy) create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool, r_create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_with_l7policy_redirect_pool_bad_rule(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7rules, expected_l7rules = self._get_l7rules_bodies( value="local host") create_l7policies, expected_l7policies = self._get_l7policies_bodies( create_pool_name=create_pool['name'], create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body, status=400) self.assertIn('L7Rule: Invalid characters', response.json.get('faultstring')) def test_with_member_invalid_address(self): # 169.254.169.254 is the default invalid member address create_member = { 'address': '169.254.169.254', 'protocol_port': 80, } create_pool, _ = self._get_pool_bodies( create_members=[create_member], protocol=constants.PROTOCOL_TCP ) create_listener, _ = self._get_listener_bodies( create_default_pool_name="pool1", ) create_lb, _ = self._get_lb_bodies( [create_listener], [], create_pools=[create_pool] ) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body, expect_errors=True) self.assertEqual(400, response.status_code) expect_error_msg = ("169.254.169.254 is not a valid option for member " "address") self.assertEqual(expect_error_msg, response.json['faultstring']) def _test_with_one_of_everything_helper(self): create_member, expected_member = self._get_member_bodies() create_hm, expected_hm = self._get_hm_bodies() create_pool, expected_pool = self._get_pool_bodies( create_members=[create_member], expected_members=[expected_member], create_hm=create_hm, expected_hm=expected_hm, protocol=constants.PROTOCOL_HTTP) create_sni_containers, expected_sni_containers = ( self._get_sni_container_bodies()) create_l7rules, expected_l7rules = self._get_l7rules_bodies() r_create_member, r_expected_member = self._get_member_bodies( protocol_port=88) r_create_pool, r_expected_pool = self._get_pool_bodies( create_members=[r_create_member], expected_members=[r_expected_member]) create_l7policies, expected_l7policies = self._get_l7policies_bodies( create_pool_name=r_create_pool['name'], create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name'], create_protocol=constants.PROTOCOL_TERMINATED_HTTPS, create_l7policies=create_l7policies, expected_l7policies=expected_l7policies, create_sni_containers=create_sni_containers, expected_sni_containers=expected_sni_containers) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) return body, expected_lb # TODO(johnsom) Fix this when there is a noop certificate manager @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_with_one_of_everything(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} body, expected_lb = self._test_with_one_of_everything_helper() response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) def test_db_create_failure(self): create_listener, expected_listener = self._get_listener_bodies() create_lb, _ = self._get_lb_bodies([create_listener], [expected_listener]) body = self._build_body(create_lb) with mock.patch('octavia.db.repositories.Repositories.' 'create_load_balancer_and_vip') as repo_mock: repo_mock.side_effect = Exception('I am a DB Error') self.post(self.LBS_PATH, body, status=500) def test_pool_names_not_unique(self): create_pool1, expected_pool1 = self._get_pool_bodies() create_pool2, expected_pool2 = self._get_pool_bodies() create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool1['name']) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool1, create_pool2]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body, status=400) self.assertIn("Pool names must be unique", response.json.get('faultstring')) def test_pool_names_must_have_specs(self): create_pool, expected_pool = self._get_pool_bodies() create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name="my_nonexistent_pool") create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body, status=400) self.assertIn("referenced but no full definition", response.json.get('faultstring')) def test_pool_mandatory_attributes(self): create_pool, expected_pool = self._get_pool_bodies() create_pool.pop('protocol') create_listener, expected_listener = self._get_listener_bodies( create_default_pool_name=create_pool['name']) create_lb, expected_lb = self._get_lb_bodies( create_listeners=[create_listener], expected_listeners=[expected_listener], create_pools=[create_pool]) body = self._build_body(create_lb) response = self.post(self.LBS_PATH, body, status=400) self.assertIn("missing required attribute: protocol", response.json.get('faultstring')) def test_create_over_quota_lb(self): body, _ = self._test_with_one_of_everything_helper() self.start_quota_mock(data_models.LoadBalancer) self.post(self.LBS_PATH, body, status=403) def test_create_over_quota_pools(self): body, _ = self._test_with_one_of_everything_helper() self.start_quota_mock(data_models.Pool) self.post(self.LBS_PATH, body, status=403) def test_create_over_quota_listeners(self): body, _ = self._test_with_one_of_everything_helper() self.start_quota_mock(data_models.Listener) self.post(self.LBS_PATH, body, status=403) def test_create_over_quota_members(self): body, _ = self._test_with_one_of_everything_helper() self.start_quota_mock(data_models.Member) self.post(self.LBS_PATH, body, status=403) def test_create_over_quota_hms(self): body, _ = self._test_with_one_of_everything_helper() self.start_quota_mock(data_models.HealthMonitor) self.post(self.LBS_PATH, body, status=403) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_over_quota_sanity_check(self, mock_cert_data): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_cert_data.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} # This one should create, as we don't check quotas on L7Policies body, _ = self._test_with_one_of_everything_helper() self.start_quota_mock(data_models.L7Policy) self.post(self.LBS_PATH, body) def _getStatus(self, lb_id): res = self.get(self.LB_PATH.format(lb_id=lb_id + "/status")) return res.json.get('statuses').get('loadbalancer') # Test the "statuses" alias for "status". # This is required for backward compatibility with neutron-lbaas def test_statuses(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') statuses = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/statuses")) response = statuses.json.get('statuses').get('loadbalancer') self.assertEqual(lb['name'], response['name']) self.assertEqual(lb['id'], response['id']) self.assertEqual(lb['operating_status'], response['operating_status']) self.assertEqual(lb['provisioning_status'], response['provisioning_status']) def test_status(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') response = self._getStatus(lb['id']) self.assertEqual(lb['name'], response['name']) self.assertEqual(lb['id'], response['id']) self.assertEqual(lb['operating_status'], response['operating_status']) self.assertEqual(lb['provisioning_status'], response['provisioning_status']) def _assertLB(self, lb, response): self.assertEqual(lb['name'], response['name']) self.assertEqual(lb['id'], response['id']) self.assertEqual(constants.ONLINE, response['operating_status']) self.assertEqual(constants.PENDING_UPDATE, response['provisioning_status']) def test_statuses_listener(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') response = self._getStatus(lb['id']) self._assertLB(lb, response) response = response.get('listeners')[0] self.assertEqual(listener['name'], response['name']) self.assertEqual(listener['id'], response['id']) self.assertEqual(listener['operating_status'], response['operating_status']) self.assertEqual(listener['provisioning_status'], response['provisioning_status']) def _assertListener(self, listener, response, prov_status=constants.ACTIVE): self.assertEqual(listener['name'], response['name']) self.assertEqual(listener['id'], response['id']) self.assertEqual(constants.ONLINE, response['operating_status']) self.assertEqual(prov_status, response['provisioning_status']) def _assertListenerPending(self, listener, response): self._assertListener(listener, response, constants.PENDING_UPDATE) def test_statuses_multiple_listeners(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) listener1 = self.create_listener( constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') self.set_lb_status(lb['id']) listener2 = self.create_listener( constants.PROTOCOL_HTTPS, 443, lb['id']).get('listener') response = self._getStatus(lb['id']) self._assertLB(lb, response) self._assertListener(listener1, response.get('listeners')[0]) response = response.get('listeners')[1] self.assertEqual(listener2['name'], response['name']) self.assertEqual(listener2['id'], response['id']) self.assertEqual(listener2['operating_status'], response['operating_status']) self.assertEqual(listener2['provisioning_status'], response['provisioning_status']) def test_statuses_pool(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') self.set_lb_status(lb['id']) pool = self.create_pool( lb['id'], constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=listener['id']).get('pool') response = self._getStatus(lb['id']) self._assertLB(lb, response) self._assertListenerPending(listener, response.get('listeners')[0]) response = response.get('listeners')[0]['pools'][0] self.assertEqual(pool['name'], response['name']) self.assertEqual(pool['id'], response['id']) self.assertEqual(pool['operating_status'], response['operating_status']) self.assertEqual(pool['provisioning_status'], response['provisioning_status']) def _assertPool(self, pool, response, prov_status=constants.ACTIVE): self.assertEqual(pool['name'], response['name']) self.assertEqual(pool['id'], response['id']) self.assertEqual(constants.ONLINE, response['operating_status']) self.assertEqual(prov_status, response['provisioning_status']) def _assertPoolPending(self, pool, response): self._assertPool(pool, response, constants.PENDING_UPDATE) def test_statuses_pools(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') self.set_lb_status(lb['id']) pool1 = self.create_pool( lb['id'], constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=listener['id']).get('pool') self.set_lb_status(lb['id']) pool2 = self.create_pool( lb['id'], constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') self.set_lb_status(lb['id']) l7_policy = self.create_l7policy( listener['id'], constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool2.get('id')).get('l7policy') self.set_lb_status(lb['id']) self.create_l7rule( l7_policy['id'], constants.L7RULE_TYPE_HOST_NAME, constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'www.example.com').get(self.root_tag) response = self._getStatus(lb['id']) self._assertLB(lb, response) self._assertListenerPending(listener, response.get('listeners')[0]) self._assertPool(pool1, response.get('listeners')[0]['pools'][0]) self._assertPool(pool2, response.get('listeners')[0]['pools'][1]) def test_statuses_health_monitor(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') self.set_lb_status(lb['id']) pool = self.create_pool( lb['id'], constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=listener['id']).get('pool') self.set_lb_status(lb['id']) hm = self.create_health_monitor( pool['id'], constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get('healthmonitor') response = self._getStatus(lb['id']) self._assertLB(lb, response) self._assertListenerPending(listener, response.get('listeners')[0]) self._assertPoolPending(pool, response.get('listeners')[0]['pools'][0]) response = response.get('listeners')[0]['pools'][0]['health_monitor'] self.assertEqual(hm['name'], response['name']) self.assertEqual(hm['id'], response['id']) self.assertEqual(hm['type'], response['type']) self.assertEqual(hm['operating_status'], response['operating_status']) self.assertEqual(hm['provisioning_status'], response['provisioning_status']) def test_statuses_member(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') self.set_lb_status(lb['id']) pool = self.create_pool( lb['id'], constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=listener['id']).get('pool') self.set_lb_status(lb['id']) member = self.create_member( pool['id'], '10.0.0.1', 80).get('member') response = self._getStatus(lb['id']) self._assertLB(lb, response) self._assertListenerPending(listener, response.get('listeners')[0]) self._assertPoolPending(pool, response.get('listeners')[0]['pools'][0]) response = response.get('listeners')[0]['pools'][0]['members'][0] self.assertEqual(member['name'], response['name']) self.assertEqual(member['id'], response['id']) self.assertEqual(member['address'], response['address']) self.assertEqual(member['protocol_port'], response['protocol_port']) self.assertEqual(member['operating_status'], response['operating_status']) self.assertEqual(member['provisioning_status'], response['provisioning_status']) def test_statuses_members(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') self.set_lb_status(lb['id']) pool = self.create_pool( lb['id'], constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=listener['id']).get('pool') self.set_lb_status(lb['id']) member1 = self.create_member( pool['id'], '10.0.0.1', 80).get('member') self.set_lb_status(lb['id']) member2 = self.create_member( pool['id'], '10.0.0.2', 88, name='test').get('member') response = self._getStatus(lb['id']) self._assertLB(lb, response) self._assertListenerPending(listener, response.get('listeners')[0]) self._assertPoolPending(pool, response.get('listeners')[0]['pools'][0]) members = response.get('listeners')[0]['pools'][0]['members'] response = members[0] self.assertEqual(member1['name'], response['name']) self.assertEqual(member1['id'], response['id']) self.assertEqual(member1['address'], response['address']) self.assertEqual(member1['protocol_port'], response['protocol_port']) self.assertEqual(constants.ONLINE, response['operating_status']) self.assertEqual(constants.ACTIVE, response['provisioning_status']) response = members[1] self.assertEqual(member2['name'], response['name']) self.assertEqual(member2['id'], response['id']) self.assertEqual(member2['address'], response['address']) self.assertEqual(member2['protocol_port'], response['protocol_port']) def test_statuses_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer( uuidutils.generate_uuid(), project_id=project_id).get('loadbalancer') self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self._getStatus(lb['id']) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(lb['name'], response['name']) self.assertEqual(lb['id'], response['id']) self.assertEqual(lb['operating_status'], response['operating_status']) self.assertEqual(lb['provisioning_status'], response['provisioning_status']) def test_statuses_not_authorized(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): res = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/status"), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, res.json) def test_statuses_get_deleted(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer( uuidutils.generate_uuid(), project_id=project_id).get('loadbalancer') self.set_lb_status(lb['id'], status=constants.DELETED) self.get(self.LB_PATH.format(lb_id=lb['id'] + "/status"), status=404) def _getStats(self, lb_id): res = self.get(self.LB_PATH.format(lb_id=lb_id + "/stats")) return res.json.get('stats') def test_statistics(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) ls = self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9), request_errors=random.randint(1, 9)) response = self._getStats(lb['id']) self.assertEqual(ls['bytes_in'], response['bytes_in']) self.assertEqual(ls['bytes_out'], response['bytes_out']) self.assertEqual(ls['total_connections'], response['total_connections']) self.assertEqual(ls['active_connections'], response['active_connections']) self.assertEqual(ls['request_errors'], response['request_errors']) def test_statistics_authorized(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer( uuidutils.generate_uuid(), project_id=project_id).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) ls = self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9), request_errors=random.randint(1, 9)) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self._getStats(lb['id']) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(ls['bytes_in'], response['bytes_in']) self.assertEqual(ls['bytes_out'], response['bytes_out']) self.assertEqual(ls['total_connections'], response['total_connections']) self.assertEqual(ls['active_connections'], response['active_connections']) self.assertEqual(ls['request_errors'], response['request_errors']) def test_statistics_not_authorized(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9)) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): res = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/stats"), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, res.json) def test_statistics_get_deleted(self): lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.set_lb_status(lb['id']) li = self.create_listener( constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) self.create_listener_stats_dynamic( listener_id=li.get('id'), amphora_id=amphora.id, bytes_in=random.randint(1, 9), bytes_out=random.randint(1, 9), total_connections=random.randint(1, 9)) self.set_lb_status(lb['id'], status=constants.DELETED) self.get(self.LB_PATH.format(lb_id=lb['id'] + "/stats"), status=404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_member.py0000664000175000017500000020027500000000000024112 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia_lib.api.drivers import data_models as driver_dm from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.api.drivers import utils as driver_utils from octavia.common import constants import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.db import repositories from octavia.network import base as network_base from octavia.tests.functional.api.v2 import base class TestMember(base.BaseAPITest): root_tag = 'member' root_tag_list = 'members' root_tag_links = 'members_links' def setUp(self): super(TestMember, self).setUp() vip_subnet_id = uuidutils.generate_uuid() self.lb = self.create_load_balancer(vip_subnet_id) self.lb_id = self.lb.get('loadbalancer').get('id') self.project_id = self.lb.get('loadbalancer').get('project_id') self.set_lb_status(self.lb_id) self.listener = self.create_listener( constants.PROTOCOL_HTTP, 80, lb_id=self.lb_id) self.listener_id = self.listener.get('listener').get('id') self.set_lb_status(self.lb_id) self.pool = self.create_pool(self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) self.pool_id = self.pool.get('pool').get('id') self.set_lb_status(self.lb_id) self.pool_with_listener = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id) self.pool_with_listener_id = ( self.pool_with_listener.get('pool').get('id')) self.set_lb_status(self.lb_id) self.members_path = self.MEMBERS_PATH.format( pool_id=self.pool_id) self.member_path = self.members_path + '/{member_id}' self.members_path_listener = self.MEMBERS_PATH.format( pool_id=self.pool_with_listener_id) self.member_path_listener = self.members_path_listener + '/{member_id}' self.pool_repo = repositories.PoolRepository() def test_get(self): api_member = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) response = self.get(self.member_path.format( member_id=api_member.get('id'))).json.get(self.root_tag) self.assertEqual(api_member, response) self.assertEqual(api_member.get('name'), '') self.assertEqual([], api_member['tags']) def test_get_authorized(self): api_member = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.member_path.format( member_id=api_member.get('id'))).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(api_member, response) self.assertEqual(api_member.get('name'), '') def test_get_not_authorized(self): api_member = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.member_path.format( member_id=api_member.get('id')), status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response) def test_get_deleted_gives_404(self): api_member = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) self.set_object_status(self.member_repo, api_member.get('id'), provisioning_status=constants.DELETED) self.get(self.member_path.format(member_id=api_member.get('id')), status=404) def test_bad_get(self): self.get(self.member_path.format(member_id=uuidutils.generate_uuid()), status=404) def test_get_all(self): api_m_1 = self.create_member( self.pool_id, '192.0.2.1', 80, tags=['test_tag1']).get(self.root_tag) self.set_lb_status(self.lb_id) api_m_2 = self.create_member( self.pool_id, '192.0.2.2', 80, tags=['test_tag2']).get(self.root_tag) self.set_lb_status(self.lb_id) # Original objects didn't have the updated operating/provisioning # status that exists in the DB. for m in [api_m_1, api_m_2]: m['operating_status'] = constants.ONLINE m['provisioning_status'] = constants.ACTIVE m.pop('updated_at') response = self.get(self.members_path).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(2, len(response)) for m in response: m.pop('updated_at') for m in [api_m_1, api_m_2]: self.assertIn(m, response) def test_get_all_hides_deleted(self): api_member = self.create_member( self.pool_id, '10.0.0.1', 80).get(self.root_tag) response = self.get(self.members_path) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) self.set_object_status(self.member_repo, api_member.get('id'), provisioning_status=constants.DELETED) response = self.get(self.members_path) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 0) def test_get_all_authorized(self): api_m_1 = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) api_m_2 = self.create_member( self.pool_id, '192.0.2.2', 80).get(self.root_tag) self.set_lb_status(self.lb_id) # Original objects didn't have the updated operating/provisioning # status that exists in the DB. for m in [api_m_1, api_m_2]: m['operating_status'] = constants.ONLINE m['provisioning_status'] = constants.ACTIVE m.pop('updated_at') self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.members_path) response = response.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertIsInstance(response, list) self.assertEqual(2, len(response)) for m in response: m.pop('updated_at') for m in [api_m_1, api_m_2]: self.assertIn(m, response) def test_get_all_unscoped_token(self): api_m_1 = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) api_m_2 = self.create_member( self.pool_id, '192.0.2.2', 80).get(self.root_tag) self.set_lb_status(self.lb_id) # Original objects didn't have the updated operating/provisioning # status that exists in the DB. for m in [api_m_1, api_m_2]: m['operating_status'] = constants.ONLINE m['provisioning_status'] = constants.ACTIVE m.pop('updated_at') self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', None): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': None} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.members_path, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, result) def test_get_all_not_authorized(self): api_m_1 = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) api_m_2 = self.create_member( self.pool_id, '192.0.2.2', 80).get(self.root_tag) self.set_lb_status(self.lb_id) # Original objects didn't have the updated operating/provisioning # status that exists in the DB. for m in [api_m_1, api_m_2]: m['operating_status'] = constants.ONLINE m['provisioning_status'] = constants.ACTIVE m.pop('updated_at') self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.members_path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_all_sorted(self): self.create_member(self.pool_id, '192.0.2.1', 80, name='member1') self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.2', 80, name='member2') self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.3', 80, name='member3') self.set_lb_status(self.lb_id) response = self.get(self.members_path, params={'sort': 'name:desc'}) members_desc = response.json.get(self.root_tag_list) response = self.get(self.members_path, params={'sort': 'name:asc'}) members_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(members_desc)) self.assertEqual(3, len(members_asc)) member_id_names_desc = [(member.get('id'), member.get('name')) for member in members_desc] member_id_names_asc = [(member.get('id'), member.get('name')) for member in members_asc] self.assertEqual(member_id_names_asc, list(reversed(member_id_names_desc))) def test_get_all_limited(self): self.create_member(self.pool_id, '192.0.2.1', 80, name='member1') self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.2', 80, name='member2') self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.3', 80, name='member3') self.set_lb_status(self.lb_id) # First two -- should have 'next' link first_two = self.get(self.members_path, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.members_path, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.members_path, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): self.create_member(self.pool_id, '192.0.2.1', 80, name='member1') self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.2', 80, name='member2') self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.3', 80, name='member3') self.set_lb_status(self.lb_id) members = self.get(self.members_path, params={ 'fields': ['id', 'address']}).json for member in members['members']: self.assertIn(u'id', member) self.assertIn(u'address', member) self.assertNotIn(u'name', member) self.assertNotIn(u'monitor_address', member) def test_get_one_fields_filter(self): member1 = self.create_member( self.pool_id, '192.0.2.1', 80, name='member1').get(self.root_tag) self.set_lb_status(self.lb_id) member = self.get( self.member_path.format(member_id=member1.get('id')), params={'fields': ['id', 'address']}).json.get(self.root_tag) self.assertIn(u'id', member) self.assertIn(u'address', member) self.assertNotIn(u'name', member) self.assertNotIn(u'monitor_address', member) def test_get_all_filter(self): mem1 = self.create_member(self.pool_id, '192.0.2.1', 80, name='member1').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.2', 80, name='member2').get(self.root_tag) self.set_lb_status(self.lb_id) self.create_member(self.pool_id, '192.0.2.3', 80, name='member3').get(self.root_tag) self.set_lb_status(self.lb_id) members = self.get(self.members_path, params={ 'id': mem1['id']}).json self.assertEqual(1, len(members['members'])) self.assertEqual(mem1['id'], members['members'][0]['id']) def test_get_all_tags_filter(self): mem1 = self.create_member( self.pool_id, '192.0.2.1', 80, name='member1', tags=['test_tag1', 'test_tag2'] ).get(self.root_tag) self.set_lb_status(self.lb_id) mem2 = self.create_member( self.pool_id, '192.0.2.2', 80, name='member2', tags=['test_tag2', 'test_tag3'] ).get(self.root_tag) self.set_lb_status(self.lb_id) mem3 = self.create_member( self.pool_id, '192.0.2.3', 80, name='member3', tags=['test_tag4', 'test_tag5'] ).get(self.root_tag) self.set_lb_status(self.lb_id) mems = self.get( self.members_path, params={'tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(mems, list) self.assertEqual(2, len(mems)) self.assertEqual( [mem1.get('id'), mem2.get('id')], [mem.get('id') for mem in mems] ) mems = self.get( self.members_path, params={'tags': ['test_tag2', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(mems, list) self.assertEqual(1, len(mems)) self.assertEqual( [mem2.get('id')], [mem.get('id') for mem in mems] ) mems = self.get( self.members_path, params={'tags-any': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(mems, list) self.assertEqual(2, len(mems)) self.assertEqual( [mem1.get('id'), mem2.get('id')], [mem.get('id') for mem in mems] ) mems = self.get( self.members_path, params={'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(mems, list) self.assertEqual(1, len(mems)) self.assertEqual( [mem3.get('id')], [mem.get('id') for mem in mems] ) mems = self.get( self.members_path, params={'not-tags-any': ['test_tag2', 'test_tag4']} ).json.get(self.root_tag_list) self.assertIsInstance(mems, list) self.assertEqual(0, len(mems)) mems = self.get( self.members_path, params={'tags': 'test_tag2', 'tags-any': ['test_tag1', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(mems, list) self.assertEqual(2, len(mems)) self.assertEqual( [mem1.get('id'), mem2.get('id')], [mem.get('id') for mem in mems] ) mems = self.get( self.members_path, params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(mems, list) self.assertEqual(0, len(mems)) def test_empty_get_all(self): response = self.get(self.members_path).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(0, len(response)) def test_create_sans_listener(self): api_member = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) self.assertEqual('192.0.2.1', api_member['address']) self.assertEqual(80, api_member['protocol_port']) self.assertIsNotNone(api_member['created_at']) self.assertIsNone(api_member['updated_at']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_CREATE, member_op_status=constants.NO_MONITOR) self.set_lb_status(self.lb_id) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, member_id=api_member.get('id')) def test_create_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): api_member = self.create_member( self.pool_id, '192.0.2.1', 80, tags=['test_tag']).get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual('192.0.2.1', api_member['address']) self.assertEqual(80, api_member['protocol_port']) self.assertEqual(['test_tag'], api_member['tags']) self.assertIsNotNone(api_member['created_at']) self.assertIsNone(api_member['updated_at']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_CREATE, member_op_status=constants.NO_MONITOR) self.set_lb_status(self.lb_id) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, member_id=api_member.get('id')) def test_create_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): api_member = self.create_member( self.pool_id, '192.0.2.1', 80, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_member) def test_create_pool_in_error(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') pool1_id = pool1.get('id') self.set_lb_status(lb1_id) self.set_object_status(self.pool_repo, pool1_id, provisioning_status=constants.ERROR) api_member = self.create_member(pool1_id, '192.0.2.1', 80, status=409) ref_msg = 'Pool %s is immutable and cannot be updated.' % pool1_id self.assertEqual(ref_msg, api_member.get('faultstring')) # TODO(rm_work) Remove after deprecation of project_id in POST (R series) def test_create_with_project_id_is_ignored(self): pid = uuidutils.generate_uuid() api_member = self.create_member( self.pool_id, '192.0.2.1', 80, project_id=pid).get(self.root_tag) self.assertEqual(self.project_id, api_member['project_id']) def test_create_backup(self): api_member = self.create_member( self.pool_id, '192.0.2.1', 80, backup=True).get(self.root_tag) self.assertTrue(api_member['backup']) self.set_lb_status(self.lb_id) api_member = self.create_member( self.pool_id, '192.0.2.1', 81, backup=False).get(self.root_tag) self.assertFalse(api_member['backup']) def test_bad_create(self): member = {'name': 'test1'} self.post(self.members_path, self._build_body(member), status=400) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_bad_provider(self, mock_provider): mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.create_member(self.pool_id, '192.0.2.1', 80, status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.get('faultstring')) @mock.patch('octavia.api.drivers.driver_factory.get_driver') @mock.patch('octavia.api.drivers.utils.call_provider') def test_full_batch_members(self, mock_provider, mock_get_driver): mock_driver = mock.MagicMock() mock_driver.name = 'noop_driver' mock_get_driver.return_value = mock_driver member1 = {'address': '192.0.2.1', 'protocol_port': 80, 'project_id': self.project_id} member2 = {'address': '192.0.2.2', 'protocol_port': 80, 'project_id': self.project_id} member3 = {'address': '192.0.2.3', 'protocol_port': 80, 'project_id': self.project_id} member4 = {'address': '192.0.2.4', 'protocol_port': 80, 'project_id': self.project_id} member5 = {'address': '192.0.2.5', 'protocol_port': 80, 'project_id': self.project_id} member6 = {'address': '192.0.2.6', 'protocol_port': 80, 'project_id': self.project_id} members = [member1, member2, member3, member4] for m in members: self.create_member(pool_id=self.pool_id, **m) self.set_lb_status(self.lb_id) # We are only concerned about the batch update, so clear out the # create members calls above. mock_provider.reset_mock() req_dict = [member1, member2, member5, member6] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) self.put(path, body, status=202) returned_members = self.get( self.MEMBERS_PATH.format(pool_id=self.pool_id) ).json.get(self.root_tag_list) expected_members = [ ('192.0.2.1', 80, 'PENDING_UPDATE'), ('192.0.2.2', 80, 'PENDING_UPDATE'), ('192.0.2.3', 80, 'PENDING_DELETE'), ('192.0.2.4', 80, 'PENDING_DELETE'), ('192.0.2.5', 80, 'PENDING_CREATE'), ('192.0.2.6', 80, 'PENDING_CREATE'), ] provider_creates = [] provider_updates = [] for rm in returned_members: self.assertIn( (rm['address'], rm['protocol_port'], rm['provisioning_status']), expected_members) provider_dict = driver_utils.member_dict_to_provider_dict(rm) # Adjust for API response provider_dict['pool_id'] = self.pool_id if rm['provisioning_status'] == 'PENDING_UPDATE': del provider_dict['name'] del provider_dict['subnet_id'] provider_updates.append(driver_dm.Member(**provider_dict)) elif rm['provisioning_status'] == 'PENDING_CREATE': provider_dict['name'] = None provider_creates.append(driver_dm.Member(**provider_dict)) # Order matters here provider_creates += provider_updates mock_provider.assert_called_once_with(u'noop_driver', mock_driver.member_batch_update, self.pool_id, provider_creates) @mock.patch('octavia.api.drivers.driver_factory.get_driver') @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_batch_members(self, mock_provider, mock_get_driver): mock_driver = mock.MagicMock() mock_driver.name = 'noop_driver' mock_get_driver.return_value = mock_driver member5 = {'address': '192.0.2.5', 'protocol_port': 80, 'tags': ['test_tag1']} member6 = {'address': '192.0.2.6', 'protocol_port': 80, 'tags': ['test_tag2']} req_dict = [member5, member6] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) self.put(path, body, status=202) returned_members = self.get( self.MEMBERS_PATH.format(pool_id=self.pool_id) ).json.get(self.root_tag_list) expected_members = [ ('192.0.2.5', 80, 'PENDING_CREATE', ['test_tag1']), ('192.0.2.6', 80, 'PENDING_CREATE', ['test_tag2']), ] provider_members = [] for rm in returned_members: self.assertIn( (rm['address'], rm['protocol_port'], rm['provisioning_status'], rm['tags']), expected_members) provider_dict = driver_utils.member_dict_to_provider_dict(rm) # Adjust for API response provider_dict['pool_id'] = self.pool_id provider_dict['name'] = None provider_members.append(driver_dm.Member(**provider_dict)) mock_provider.assert_called_once_with(u'noop_driver', mock_driver.member_batch_update, self.pool_id, provider_members) def test_create_batch_members_with_bad_subnet(self): subnet_id = uuidutils.generate_uuid() member5 = {'address': '10.0.0.5', 'protocol_port': 80, 'subnet_id': subnet_id} req_dict = [member5] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) with mock.patch( 'octavia.common.utils.get_network_driver') as net_mock: net_mock.return_value.get_subnet = mock.Mock( side_effect=network_base.SubnetNotFound('Subnet not found')) response = self.put(path, body, status=400).json err_msg = 'Subnet ' + subnet_id + ' not found.' self.assertEqual(response.get('faultstring'), err_msg) def test_create_batch_members_with_invalid_address(self): # 169.254.169.254 is the default invalid member address member5 = {'address': '169.254.169.254', 'protocol_port': 80} req_dict = [member5] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) response = self.put(path, body, status=400).json err_msg = ("169.254.169.254 is not a valid option for member address") self.assertEqual(err_msg, response.get('faultstring')) @mock.patch('octavia.api.drivers.driver_factory.get_driver') @mock.patch('octavia.api.drivers.utils.call_provider') def test_additive_only_batch_members(self, mock_provider, mock_get_driver): mock_driver = mock.MagicMock() mock_driver.name = 'noop_driver' mock_get_driver.return_value = mock_driver member1 = {'address': '192.0.2.1', 'protocol_port': 80} member2 = {'address': '192.0.2.2', 'protocol_port': 80} member3 = {'address': '192.0.2.3', 'protocol_port': 80} member4 = {'address': '192.0.2.4', 'protocol_port': 80} member5 = {'address': '192.0.2.5', 'protocol_port': 80} member6 = {'address': '192.0.2.6', 'protocol_port': 80} members = [member1, member2, member3, member4] for m in members: self.create_member(pool_id=self.pool_id, **m) self.set_lb_status(self.lb_id) # We are only concerned about the batch update, so clear out the # create members calls above. mock_provider.reset_mock() req_dict = [member1, member2, member5, member6] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) path = "{}?additive_only=True".format(path) self.put(path, body, status=202) returned_members = self.get( self.MEMBERS_PATH.format(pool_id=self.pool_id) ).json.get(self.root_tag_list) # Members 1+2 should be updated, 3+4 left alone, and 5+6 created expected_members = [ ('192.0.2.1', 80, 'PENDING_UPDATE'), ('192.0.2.2', 80, 'PENDING_UPDATE'), ('192.0.2.3', 80, 'ACTIVE'), ('192.0.2.4', 80, 'ACTIVE'), ('192.0.2.5', 80, 'PENDING_CREATE'), ('192.0.2.6', 80, 'PENDING_CREATE'), ] provider_creates = [] provider_updates = [] provider_ignored = [] for rm in returned_members: self.assertIn( (rm['address'], rm['protocol_port'], rm['provisioning_status']), expected_members) provider_dict = driver_utils.member_dict_to_provider_dict(rm) # Adjust for API response provider_dict['pool_id'] = self.pool_id if rm['provisioning_status'] == 'PENDING_UPDATE': del provider_dict['name'] del provider_dict['subnet_id'] provider_updates.append(driver_dm.Member(**provider_dict)) elif rm['provisioning_status'] == 'PENDING_CREATE': provider_dict['name'] = None provider_creates.append(driver_dm.Member(**provider_dict)) elif rm['provisioning_status'] == 'ACTIVE': provider_dict['name'] = None provider_ignored.append(driver_dm.Member(**provider_dict)) # Order matters here provider_creates += provider_updates provider_creates += provider_ignored mock_provider.assert_called_once_with(u'noop_driver', mock_driver.member_batch_update, self.pool_id, provider_creates) @mock.patch('octavia.api.drivers.driver_factory.get_driver') @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_batch_members(self, mock_provider, mock_get_driver): mock_driver = mock.MagicMock() mock_driver.name = 'noop_driver' mock_get_driver.return_value = mock_driver member1 = {'address': '192.0.2.1', 'protocol_port': 80, 'project_id': self.project_id} member2 = {'address': '192.0.2.2', 'protocol_port': 80, 'project_id': self.project_id} members = [member1, member2] for m in members: self.create_member(pool_id=self.pool_id, **m) self.set_lb_status(self.lb_id) # We are only concerned about the batch update, so clear out the # create members calls above. mock_provider.reset_mock() req_dict = [member1, member2] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) self.put(path, body, status=202) returned_members = self.get( self.MEMBERS_PATH.format(pool_id=self.pool_id) ).json.get(self.root_tag_list) expected_members = [ ('192.0.2.1', 80, 'PENDING_UPDATE'), ('192.0.2.2', 80, 'PENDING_UPDATE'), ] provider_members = [] for rm in returned_members: self.assertIn( (rm['address'], rm['protocol_port'], rm['provisioning_status']), expected_members) provider_dict = driver_utils.member_dict_to_provider_dict(rm) # Adjust for API response provider_dict['pool_id'] = self.pool_id del provider_dict['name'] del provider_dict['subnet_id'] provider_members.append(driver_dm.Member(**provider_dict)) mock_provider.assert_called_once_with(u'noop_driver', mock_driver.member_batch_update, self.pool_id, provider_members) @mock.patch('octavia.api.drivers.driver_factory.get_driver') @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_batch_members(self, mock_provider, mock_get_driver): mock_driver = mock.MagicMock() mock_driver.name = 'noop_driver' mock_get_driver.return_value = mock_driver member3 = {'address': '192.0.2.3', 'protocol_port': 80} member4 = {'address': '192.0.2.4', 'protocol_port': 80} members = [member3, member4] for m in members: self.create_member(pool_id=self.pool_id, **m) self.set_lb_status(self.lb_id) # We are only concerned about the batch update, so clear out the # create members calls above. mock_provider.reset_mock() req_dict = [] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) self.put(path, body, status=202) returned_members = self.get( self.MEMBERS_PATH.format(pool_id=self.pool_id) ).json.get(self.root_tag_list) expected_members = [ ('192.0.2.3', 80, 'PENDING_DELETE'), ('192.0.2.4', 80, 'PENDING_DELETE'), ] provider_members = [] for rm in returned_members: self.assertIn( (rm['address'], rm['protocol_port'], rm['provisioning_status']), expected_members) mock_provider.assert_called_once_with(u'noop_driver', mock_driver.member_batch_update, self.pool_id, provider_members) @mock.patch('octavia.api.drivers.driver_factory.get_driver') @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_batch_members_already_empty(self, mock_provider, mock_get_driver): mock_driver = mock.MagicMock() mock_driver.name = 'noop_driver' mock_get_driver.return_value = mock_driver req_dict = [] body = {self.root_tag_list: req_dict} path = self.MEMBERS_PATH.format(pool_id=self.pool_id) self.put(path, body, status=202) returned_members = self.get( self.MEMBERS_PATH.format(pool_id=self.pool_id) ).json.get(self.root_tag_list) self.assertEqual([], returned_members) mock_provider.assert_not_called() def test_create_with_attached_listener(self): api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) self.assertEqual('192.0.2.1', api_member['address']) self.assertEqual(80, api_member['protocol_port']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_CREATE, member_op_status=constants.NO_MONITOR) self.set_lb_status(self.lb_id) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) def test_create_with_monitor_address_and_port(self): api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80, monitor_address='192.0.2.3', monitor_port=80).get(self.root_tag) self.assertEqual('192.0.2.1', api_member['address']) self.assertEqual(80, api_member['protocol_port']) self.assertEqual('192.0.2.3', api_member['monitor_address']) self.assertEqual(80, api_member['monitor_port']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_CREATE, member_op_status=constants.NO_MONITOR) self.set_lb_status(self.lb_id) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) def test_create_with_health_monitor(self): self.create_health_monitor(self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1) self.set_lb_status(self.lb_id) api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_CREATE, member_op_status=constants.OFFLINE) def test_duplicate_create(self): member = {'address': '192.0.2.1', 'protocol_port': 80, 'project_id': self.project_id} self.post(self.members_path, self._build_body(member)) self.set_lb_status(self.lb_id) self.post(self.members_path, self._build_body(member), status=409) def test_create_with_bad_subnet(self): with mock.patch( 'octavia.common.utils.get_network_driver') as net_mock: net_mock.return_value.get_subnet = mock.Mock( side_effect=network_base.SubnetNotFound('Subnet not found')) subnet_id = uuidutils.generate_uuid() response = self.create_member(self.pool_id, '192.0.2.1', 80, subnet_id=subnet_id, status=400) err_msg = 'Subnet ' + subnet_id + ' not found.' self.assertEqual(response.get('faultstring'), err_msg) def test_create_with_valid_subnet(self): with mock.patch( 'octavia.common.utils.get_network_driver') as net_mock: subnet_id = uuidutils.generate_uuid() net_mock.return_value.get_subnet.return_value = subnet_id response = self.create_member( self.pool_id, '192.0.2.1', 80, subnet_id=subnet_id).get(self.root_tag) self.assertEqual('192.0.2.1', response['address']) self.assertEqual(80, response['protocol_port']) self.assertEqual(subnet_id, response['subnet_id']) def test_create_bad_port_number(self): member = {'address': '192.0.2.3', 'protocol_port': constants.MIN_PORT_NUMBER - 1} resp = self.post(self.members_path, self._build_body(member), status=400) self.assertIn('Value should be greater or equal to', resp.json.get('faultstring')) member = {'address': '192.0.2.3', 'protocol_port': constants.MAX_PORT_NUMBER + 1} resp = self.post(self.members_path, self._build_body(member), status=400) self.assertIn('Value should be lower or equal to', resp.json.get('faultstring')) def test_create_over_quota(self): self.start_quota_mock(data_models.Member) member = {'address': '192.0.2.3', 'protocol_port': 81} self.post(self.members_path, self._build_body(member), status=403) def test_update_with_attached_listener(self): old_name = "name1" new_name = "name2" api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80, name=old_name).get(self.root_tag) self.set_lb_status(self.lb_id) new_member = {'name': new_name} response = self.put( self.member_path_listener.format(member_id=api_member.get('id')), self._build_body(new_member)).json.get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) self.assertEqual(new_name, response.get('name')) self.assertEqual(api_member.get('created_at'), response.get('created_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) def test_update_authorized(self): old_name = "name1" new_name = "name2" api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80, name=old_name, tags=['old_tag']).get(self.root_tag) self.set_lb_status(self.lb_id) new_member = {'name': new_name, 'tags': ['new_tag']} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): member_path = self.member_path_listener.format( member_id=api_member.get('id')) response = self.put( member_path, self._build_body(new_member)).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) self.assertEqual(new_name, response.get('name')) self.assertEqual(['new_tag'], response['tags']) self.assertEqual(api_member.get('created_at'), response.get('created_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) def test_update_not_authorized(self): old_name = "name1" new_name = "name2" api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80, name=old_name).get(self.root_tag) self.set_lb_status(self.lb_id) new_member = {'name': new_name} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): member_path = self.member_path_listener.format( member_id=api_member.get('id')) response = self.put( member_path, self._build_body(new_member), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.ACTIVE, member_prov_status=constants.ACTIVE) def test_update_sans_listener(self): old_name = "name1" new_name = "name2" api_member = self.create_member( self.pool_id, '192.0.2.1', 80, name=old_name).get(self.root_tag) self.set_lb_status(self.lb_id) member_path = self.member_path.format( member_id=api_member.get('id')) new_member = {'name': new_name} response = self.put( member_path, self._build_body(new_member)).json.get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, member_id=api_member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) self.assertEqual(new_name, response.get('name')) self.assertEqual(api_member.get('created_at'), response.get('created_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_id, member_id=api_member.get('id')) def test_bad_update(self): api_member = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) new_member = {'protocol_port': 'ten'} self.put(self.member_path.format(member_id=api_member.get('id')), self._build_body(new_member), status=400) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80, name="member1").get(self.root_tag) self.set_lb_status(self.lb_id) new_member = {'name': "member2"} mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.put(self.member_path_listener.format( member_id=api_member.get('id')), self._build_body(new_member), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_update_unset_defaults(self): old_name = "name1" api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80, name=old_name, backup=True, monitor_address='192.0.2.2', monitor_port=8888, weight=10).get(self.root_tag) self.set_lb_status(self.lb_id) unset_params = {'name': None, 'backup': None, 'monitor_address': None, 'monitor_port': None, 'weight': None} member_path = self.member_path_listener.format( member_id=api_member.get('id')) response = self.put(member_path, self._build_body(unset_params)) response = response.json.get(self.root_tag) self.assertFalse(response['backup']) self.assertIsNone(response['monitor_address']) self.assertIsNone(response['monitor_port']) self.assertEqual('', response['name']) self.assertEqual(constants.DEFAULT_WEIGHT, response['weight']) def test_delete(self): api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) member = self.get(self.member_path_listener.format( member_id=api_member.get('id'))).json.get(self.root_tag) api_member['provisioning_status'] = constants.ACTIVE api_member['operating_status'] = constants.ONLINE self.assertIsNone(api_member.pop('updated_at')) self.assertIsNotNone(member.pop('updated_at')) self.assertEqual(api_member, member) self.delete(self.member_path_listener.format( member_id=api_member.get('id'))) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_DELETE) self.set_lb_status(self.lb_id) member = self.get(self.member_path_listener.format( member_id=api_member.get('id')), status=404) def test_delete_authorized(self): api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) member = self.get(self.member_path_listener.format( member_id=api_member.get('id'))).json.get(self.root_tag) api_member['provisioning_status'] = constants.ACTIVE api_member['operating_status'] = constants.ONLINE self.assertIsNone(api_member.pop('updated_at')) self.assertIsNotNone(member.pop('updated_at')) self.assertEqual(api_member, member) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(self.member_path_listener.format( member_id=api_member.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=member.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE, member_prov_status=constants.PENDING_DELETE) self.set_lb_status(self.lb_id) member = self.get(self.member_path_listener.format( member_id=api_member.get('id')), status=404) def test_delete_not_authorized(self): api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) member = self.get(self.member_path_listener.format( member_id=api_member.get('id'))).json.get(self.root_tag) api_member['provisioning_status'] = constants.ACTIVE api_member['operating_status'] = constants.ONLINE self.assertIsNone(api_member.pop('updated_at')) self.assertIsNotNone(member.pop('updated_at')) self.assertEqual(api_member, member) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): self.delete(self.member_path_listener.format( member_id=api_member.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=self.pool_with_listener_id, member_id=member.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.ACTIVE, member_prov_status=constants.ACTIVE) def test_bad_delete(self): self.delete(self.member_path.format( member_id=uuidutils.generate_uuid()), status=404) def test_delete_mismatch_pool(self): # Create a pool that will not have the member, but is valid. self.pool = self.create_pool(self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN) bad_pool_id = self.pool.get('pool').get('id') self.set_lb_status(self.lb_id) # Create a member on our reference pool api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) # Attempt to delete the member using the wrong pool in the path member_path = self.MEMBERS_PATH.format( pool_id=bad_pool_id) + '/' + api_member['id'] result = self.delete(member_path, status=404).json ref_msg = 'Member %s not found.' % api_member['id'] self.assertEqual(ref_msg, result.get('faultstring')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_bad_provider(self, mock_provider): api_member = self.create_member( self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) self.set_lb_status(self.lb_id) member = self.get(self.member_path_listener.format( member_id=api_member.get('id'))).json.get(self.root_tag) api_member['provisioning_status'] = constants.ACTIVE api_member['operating_status'] = constants.ONLINE self.assertIsNone(api_member.pop('updated_at')) self.assertIsNotNone(member.pop('updated_at')) self.assertEqual(api_member, member) mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') self.delete(self.member_path_listener.format( member_id=api_member.get('id')), status=500) def test_create_when_lb_pending_update(self): self.create_member(self.pool_id, address="192.0.2.2", protocol_port=80) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) member = {'address': '192.0.2.1', 'protocol_port': 80, 'project_id': self.project_id} self.post(self.members_path, body=self._build_body(member), status=409) def test_update_when_lb_pending_update(self): member = self.create_member( self.pool_id, address="192.0.2.1", protocol_port=80, name="member1").get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) self.put( self.member_path.format(member_id=member.get('id')), body=self._build_body({'name': "member2"}), status=409) def test_delete_when_lb_pending_update(self): member = self.create_member( self.pool_id, address="192.0.2.1", protocol_port=80).get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), body={'loadbalancer': {'name': 'test_name_change'}}) self.delete(self.member_path.format( member_id=member.get('id')), status=409) def test_create_when_lb_pending_delete(self): self.create_member(self.pool_id, address="192.0.2.1", protocol_port=80) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) member = {'address': '192.0.2.2', 'protocol_port': 88, 'project_id': self.project_id} self.post(self.members_path, body=self._build_body(member), status=409) def test_update_when_lb_pending_delete(self): member = self.create_member( self.pool_id, address="192.0.2.1", protocol_port=80, name="member1").get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) self.put(self.member_path.format(member_id=member.get('id')), body=self._build_body({'name': "member2"}), status=409) def test_update_when_deleted(self): member = self.create_member( self.pool_id, address="10.0.0.1", protocol_port=80).get(self.root_tag) self.set_lb_status(self.lb_id, status=constants.DELETED) self.put(self.member_path.format(member_id=member.get('id')), body=self._build_body({'name': "member2"}), status=404) def test_delete_when_lb_pending_delete(self): member = self.create_member( self.pool_id, address="192.0.2.1", protocol_port=80).get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) self.delete(self.member_path.format( member_id=member.get('id')), status=409) def test_delete_already_deleted(self): member = self.create_member( self.pool_id, address="192.0.2.1", protocol_port=80).get(self.root_tag) self.set_lb_status(self.lb_id, status=constants.DELETED) self.delete(self.member_path.format( member_id=member.get('id')), status=404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_pool.py0000664000175000017500000033241300000000000023614 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api from octavia.tests.common import constants as c_const from octavia.tests.common import sample_certs from octavia.tests.functional.api.v2 import base class TestPool(base.BaseAPITest): root_tag = 'pool' root_tag_list = 'pools' root_tag_links = 'pools_links' def setUp(self): super(TestPool, self).setUp() self.lb = self.create_load_balancer( uuidutils.generate_uuid()).get('loadbalancer') self.lb_id = self.lb.get('id') self.project_id = self.lb.get('project_id') self.set_lb_status(self.lb_id) self.listener = self.create_listener( constants.PROTOCOL_HTTP, 80, self.lb_id).get('listener') self.listener_id = self.listener.get('id') self.set_lb_status(self.lb_id) self._setup_udp_lb_resources() def _setup_udp_lb_resources(self): self.udp_lb = self.create_load_balancer(uuidutils.generate_uuid()).get( 'loadbalancer') self.udp_lb_id = self.udp_lb.get('id') self.set_lb_status(self.udp_lb_id) self.udp_listener = self.create_listener( constants.PROTOCOL_UDP, 8888, self.udp_lb_id).get('listener') self.udp_listener_id = self.udp_listener.get('id') self.set_lb_status(self.udp_lb_id) def test_get(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tags=['test_tag']).get(self.root_tag) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_pool, response) def test_get_authorized(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') self.set_lb_status(lb_id=self.lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_pool, response) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_get_not_authorized(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') self.set_lb_status(lb_id=self.lb_id) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_deleted_gives_404(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_object_status(self.pool_repo, api_pool.get('id'), provisioning_status=constants.DELETED) self.get(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=404) def test_bad_get(self): self.get(self.POOL_PATH.format(pool_id=uuidutils.generate_uuid()), status=404) def test_get_all(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tags=['test_tag']).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) self.assertIsInstance(pools, list) self.assertEqual(1, len(pools)) self.assertEqual(api_pool.get('id'), pools[0].get('id')) self.assertEqual(['test_tag'], pools[0]['tags']) def test_get_all_hides_deleted(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) response = self.get(self.POOLS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 1) self.set_object_status(self.pool_repo, api_pool.get('id'), provisioning_status=constants.DELETED) response = self.get(self.POOLS_PATH) objects = response.json.get(self.root_tag_list) self.assertEqual(len(objects), 0) def test_get_all_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pool3 = self.create_pool( lb1_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) self.assertEqual(3, len(pools)) pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] self.assertIn((pool1.get('id'), pool1.get('protocol')), pool_id_protocols) self.assertIn((pool2.get('id'), pool2.get('protocol')), pool_id_protocols) self.assertIn((pool3.get('id'), pool3.get('protocol')), pool_id_protocols) def test_get_all_non_admin(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pool3 = self.create_pool( self.lb_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', pool3['project_id']): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(1, len(pools)) pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] self.assertIn((pool3.get('id'), pool3.get('protocol')), pool_id_protocols) def test_get_all_unscoped_token(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) self.create_pool( self.lb_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(self.lb_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', None): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': None} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.POOLS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, result) def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pool3 = self.create_pool( lb1_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_global_observer'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(3, len(pools)) pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] self.assertIn((pool1.get('id'), pool1.get('protocol')), pool_id_protocols) self.assertIn((pool2.get('id'), pool2.get('protocol')), pool_id_protocols) self.assertIn((pool3.get('id'), pool3.get('protocol')), pool_id_protocols) def test_get_all_not_authorized(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) self.create_pool( lb1_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): pools = self.get(self.POOLS_PATH, status=403).json self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, pools) def test_get_by_project_id(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', project_id=project1_id) lb1_id = lb1.get('loadbalancer').get('id') self.set_lb_status(lb1_id) lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', project_id=project2_id) lb2_id = lb2.get('loadbalancer').get('id') self.set_lb_status(lb2_id) pool1 = self.create_pool( lb1_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pool2 = self.create_pool( lb1_id, constants.PROTOCOL_HTTPS, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb1_id) pool3 = self.create_pool( lb2_id, constants.PROTOCOL_TCP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.set_lb_status(lb2_id) pools = self.get( self.POOLS_PATH, params={'project_id': project1_id}).json.get(self.root_tag_list) self.assertEqual(2, len(pools)) pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] self.assertIn((pool1.get('id'), pool1.get('protocol')), pool_id_protocols) self.assertIn((pool2.get('id'), pool2.get('protocol')), pool_id_protocols) pools = self.get( self.POOLS_PATH, params={'project_id': project2_id}).json.get(self.root_tag_list) self.assertEqual(1, len(pools)) pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] self.assertIn((pool3.get('id'), pool3.get('protocol')), pool_id_protocols) def test_get_all_with_listener(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOLS_PATH).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(1, len(response)) self.assertEqual(api_pool.get('id'), response[0].get('id')) def test_get_all_sorted(self): self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1') self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2') self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3') self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOLS_PATH, params={'sort': 'name:desc'}) pools_desc = response.json.get(self.root_tag_list) response = self.get(self.POOLS_PATH, params={'sort': 'name:asc'}) pools_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(pools_desc)) self.assertEqual(3, len(pools_asc)) pool_id_names_desc = [(pool.get('id'), pool.get('name')) for pool in pools_desc] pool_id_names_asc = [(pool.get('id'), pool.get('name')) for pool in pools_asc] self.assertEqual(pool_id_names_asc, list(reversed(pool_id_names_desc))) def test_get_all_limited(self): self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1') self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2') self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3') self.set_lb_status(lb_id=self.lb_id) # First two -- should have 'next' link first_two = self.get(self.POOLS_PATH, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.POOLS_PATH, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.POOLS_PATH, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_all_fields_filter(self): self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1') self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2') self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3') self.set_lb_status(lb_id=self.lb_id) pools = self.get(self.POOLS_PATH, params={ 'fields': ['id', 'project_id']}).json for pool in pools['pools']: self.assertIn(u'id', pool) self.assertIn(u'project_id', pool) self.assertNotIn(u'description', pool) def test_get_one_fields_filter(self): pool1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) pool = self.get( self.POOL_PATH.format(pool_id=pool1.get('id')), params={'fields': ['id', 'project_id']}).json.get(self.root_tag) self.assertIn(u'id', pool) self.assertIn(u'project_id', pool) self.assertNotIn(u'description', pool) def test_get_all_filter(self): po1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1').get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) hm = self.create_health_monitor(po1['id'], constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get('healthmonitor') self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2').get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3').get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) pools = self.get(self.POOLS_PATH, params={ 'id': po1['id'], 'healthmonitor_id': hm['id']}).json self.assertEqual(1, len(pools['pools'])) self.assertEqual(po1['id'], pools['pools'][0]['id']) def test_get_all_tags_filter(self): po1 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool1', tags=['test_tag1', 'test_tag2'] ).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) po2 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool2', tags=['test_tag2', 'test_tag3'] ).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) po3 = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, name='pool3', tags=['test_tag4', 'test_tag5'] ).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) pos = self.get( self.POOLS_PATH, params={'tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(pos, list) self.assertEqual(2, len(pos)) self.assertEqual( [po1.get('id'), po2.get('id')], [po.get('id') for po in pos] ) pos = self.get( self.POOLS_PATH, params={'tags': ['test_tag2', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(pos, list) self.assertEqual(1, len(pos)) self.assertEqual( [po2.get('id')], [po.get('id') for po in pos] ) pos = self.get( self.POOLS_PATH, params={'tags-any': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(pos, list) self.assertEqual(2, len(pos)) self.assertEqual( [po1.get('id'), po2.get('id')], [po.get('id') for po in pos] ) pos = self.get( self.POOLS_PATH, params={'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(pos, list) self.assertEqual(1, len(pos)) self.assertEqual( [po3.get('id')], [po.get('id') for po in pos] ) pos = self.get( self.POOLS_PATH, params={'not-tags-any': ['test_tag2', 'test_tag4']} ).json.get(self.root_tag_list) self.assertIsInstance(pos, list) self.assertEqual(0, len(pos)) pos = self.get( self.POOLS_PATH, params={'tags': 'test_tag2', 'tags-any': ['test_tag1', 'test_tag3']} ).json.get(self.root_tag_list) self.assertIsInstance(pos, list) self.assertEqual(2, len(pos)) self.assertEqual( [po1.get('id'), po2.get('id')], [po.get('id') for po in pos] ) pos = self.get( self.POOLS_PATH, params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} ).json.get(self.root_tag_list) self.assertIsInstance(pos, list) self.assertEqual(0, len(pos)) def test_empty_get_all(self): response = self.get(self.POOLS_PATH).json.get(self.root_tag_list) self.assertIsInstance(response, list) self.assertEqual(0, len(response)) def test_create(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tags=['test_tag']).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, api_pool.get('lb_algorithm')) self.assertEqual(['test_tag'], api_pool['tags']) self.assertIsNotNone(api_pool.get('created_at')) self.assertIsNone(api_pool.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) def test_create_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, api_pool.get('lb_algorithm')) self.assertIsNotNone(api_pool.get('created_at')) self.assertIsNone(api_pool.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) def test_create_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_pool) def test_create_with_proxy_protocol(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_PROXY, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.PROTOCOL_PROXY, api_pool.get('protocol')) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, api_pool.get('lb_algorithm')) self.assertIsNotNone(api_pool.get('created_at')) self.assertIsNone(api_pool.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) def test_create_sans_listener(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, api_pool.get('lb_algorithm')) # Make sure listener status is unchanged, but LB status is changed. # LB should still be locked even with pool and subordinate object # updates. self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) def test_create_sans_loadbalancer_id(self): api_pool = self.create_pool( None, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, api_pool.get('lb_algorithm')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) def test_create_with_listener_id_in_pool_dict(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, api_pool.get('lb_algorithm')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) def test_create_with_project_id(self): optionals = { 'listener_id': self.listener_id, 'project_id': self.project_id} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.assertEqual(self.project_id, api_pool.get('project_id')) def test_create_udp_case_source_ip(self): sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0'} api_pool = self.create_pool( None, constants.PROTOCOL_UDP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.udp_listener_id, session_persistence=sp).get(self.root_tag) self.assertEqual(constants.PROTOCOL_UDP, api_pool.get('protocol')) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, api_pool.get('lb_algorithm')) self.assertEqual(constants.SESSION_PERSISTENCE_SOURCE_IP, api_pool.get('session_persistence')['type']) self.assertEqual(3, api_pool.get( 'session_persistence')['persistence_timeout']) self.assertEqual('255.255.255.0', api_pool.get( 'session_persistence')['persistence_granularity']) self.assertIsNone(api_pool.get( 'session_persistence')['cookie_name']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) def test_create_with_tls_enabled_only(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tls_enabled=True).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertTrue(api_pool.get('tls_enabled')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_with_tls_container_ref(self, mock_cert_data): tls_container_ref = uuidutils.generate_uuid() pool_cert = data_models.TLSContainer(certificate='pool cert') mock_cert_data.return_value = {'tls_cert': pool_cert, 'sni_certs': [], 'client_ca_cert': None} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tls_container_ref=tls_container_ref).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(tls_container_ref, api_pool.get('tls_container_ref')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_create_with_ca_and_crl(self, mock_cert_data): self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] ca_tls_container_ref = uuidutils.generate_uuid() crl_container_ref = uuidutils.generate_uuid() api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_container_ref, crl_container_ref=crl_container_ref).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) self.assertEqual(ca_tls_container_ref, api_pool.get('ca_tls_container_ref')) self.assertEqual(crl_container_ref, api_pool.get('crl_container_ref')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) def test_create_with_bad_tls_container_ref(self): tls_container_ref = uuidutils.generate_uuid() self.cert_manager_mock().get_cert.side_effect = [Exception( "bad cert")] self.cert_manager_mock().get_secret.side_effect = [Exception( "bad secret")] api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tls_container_ref=tls_container_ref, status=400) self.assertIn(tls_container_ref, api_pool['faultstring']) def test_create_with_bad_ca_tls_container_ref(self): ca_tls_container_ref = uuidutils.generate_uuid() self.cert_manager_mock().get_cert.side_effect = [Exception( "bad ca cert")] self.cert_manager_mock().get_secret.side_effect = [Exception( "bad ca secret")] api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_container_ref, status=400) self.assertIn(ca_tls_container_ref, api_pool['faultstring']) def test_create_with_unreachable_crl(self): ca_tls_container_ref = uuidutils.generate_uuid() crl_container_ref = uuidutils.generate_uuid() self.cert_manager_mock().get_cert.side_effect = [ 'cert 1', Exception('unknow/bad cert')] self.cert_manager_mock().get_secret.side_effect = [Exception( 'bad secret')] api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_container_ref, crl_container_ref=crl_container_ref, status=400) self.assertIn(crl_container_ref, api_pool['faultstring']) def test_create_with_crl_only(self): crl_container_ref = uuidutils.generate_uuid() api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, crl_container_ref=crl_container_ref, status=400) self.assertIn( 'A CA certificate reference is required to specify a ' 'revocation list.', api_pool['faultstring']) def test_negative_create_udp_case(self): # Error create pool with udp protocol but non-udp-type sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, "cookie_name": 'test-cookie-name'} req_dict = { 'listener_id': self.udp_listener_id, 'protocol': constants.PROTOCOL_UDP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': sp} expect_error_msg = ("Validation failure: Cookie names are not " "supported for %s pools.") % constants.PROTOCOL_UDP res = self.post(self.POOLS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) # Error create pool with any non-udp-types and udp session persistence # options. sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0'} req_dict = { 'listener_id': self.udp_listener_id, 'protocol': constants.PROTOCOL_UDP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': None} for type in [constants.SESSION_PERSISTENCE_HTTP_COOKIE, constants.SESSION_PERSISTENCE_APP_COOKIE]: expect_error_msg = ("Validation failure: Session persistence of " "type %s is not supported for %s protocol " "pools.") % (type, constants.PROTOCOL_UDP) sp.update({'type': type}) req_dict['session_persistence'] = sp res = self.post(self.POOLS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) # Error create pool with source ip session persistence and wrong # options. sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0', "cookie_name": 'test-cookie-name'} req_dict = { 'listener_id': self.udp_listener_id, 'protocol': constants.PROTOCOL_UDP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': sp} expect_error_msg = ( "Validation failure: session_persistence %s type for %s " "protocol only accepts: type, persistence_timeout, " "persistence_granularity.") % ( constants.SESSION_PERSISTENCE_SOURCE_IP, constants.PROTOCOL_UDP) res = self.post(self.POOLS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) # Error create non-udp pool with udp session persistence sps = [{"type": constants.SESSION_PERSISTENCE_SOURCE_IP, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0'}, {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0'}] req_dict = { 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN} expect_error_msg = ("Validation failure: persistence_timeout and " "persistence_granularity is only for %s protocol " "pools.") % constants.PROTOCOL_UDP for s in sps: req_dict.update({'session_persistence': s}) res = self.post(self.POOLS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id) def test_bad_create(self): pool = {'name': 'test1'} self.post(self.POOLS_PATH, self._build_body(pool), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id) def test_create_with_listener_with_default_pool_id_set(self): self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id) self.set_lb_status(self.lb_id) lb_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'project_id': self.project_id} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=409) def test_create_bad_protocol(self): lb_pool = { 'loadbalancer_id': self.lb_id, 'protocol': 'STUPID_PROTOCOL', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) @mock.patch('octavia.api.drivers.utils.call_provider') def test_create_with_bad_provider(self, mock_provider): mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') lb_pool = { 'loadbalancer_id': self.lb_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'project_id': self.project_id} response = self.post(self.POOLS_PATH, self._build_body(lb_pool), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_create_over_quota(self): self.start_quota_mock(data_models.Pool) lb_pool = { 'loadbalancer_id': self.lb_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'project_id': self.project_id} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=403) def test_update(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tags=['old_tag']).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'name': 'new_name', 'tags': ['new_tag']} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual('new_name', response.get('name')) self.assertEqual(['new_tag'], response['tags']) self.assertIsNotNone(response.get('created_at')) self.assertIsNotNone(response.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) def test_update_authorized(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'name': 'new_name'} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual('new_name', response.get('name')) self.assertIsNotNone(response.get('created_at')) self.assertIsNotNone(response.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) def test_update_not_authorized(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'name': 'new_name'} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): api_pool = self.put( self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, api_pool.json) self.assert_correct_lb_status(self.lb_id, constants.ONLINE, constants.ACTIVE) def test_update_get_session_persistence_from_db_if_no_request(self): sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0'} optionals = {"listener_id": self.udp_listener_id, "session_persistence": sp} api_pool = self.create_pool( None, constants.PROTOCOL_UDP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.udp_lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') ty = sess_p.pop('type') sess_p['persistence_timeout'] = 4 sess_p['persistence_granularity'] = "255.255.0.0" new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) sess_p['type'] = ty response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(sess_p, response.get('session_persistence')) self.assert_correct_status( listener_id=self.udp_listener_id, pool_id=api_pool.get('id'), listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) def test_update_udp_case_source_ip(self): sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0'} optionals = {"listener_id": self.udp_listener_id, "session_persistence": sp} api_pool = self.create_pool( None, constants.PROTOCOL_UDP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.udp_lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') sess_p['persistence_timeout'] = 4 sess_p['persistence_granularity'] = "255.255.0.0" new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(sess_p, response.get('session_persistence')) self.assert_correct_status( listener_id=self.udp_listener_id, pool_id=api_pool.get('id'), listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.udp_lb_id) self.set_object_status(self.pool_repo, api_pool.get('id')) # Negative cases # Error during update pool with non-UDP type and cookie_name. expect_error_msg = ( "Validation failure: Cookie names are not supported for %s" " pools.") % constants.PROTOCOL_UDP sess_p['type'] = constants.SESSION_PERSISTENCE_HTTP_COOKIE sess_p['cookie_name'] = 'test-cookie-name' new_pool = {'session_persistence': sess_p} res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) # Error during update pool with source ip type and more options. expect_error_msg = ( "Validation failure: session_persistence %s type for %s protocol " "only accepts: type, persistence_timeout, " "persistence_granularity.") % ( constants.SESSION_PERSISTENCE_SOURCE_IP, constants.PROTOCOL_UDP) sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP sess_p['cookie_name'] = 'test-cookie-name' sess_p['persistence_timeout'] = 4 sess_p['persistence_granularity'] = "255.255.0.0" res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) # Error during update pool with non-UDP session persistence type. sess_p['cookie_name'] = None for ty in [constants.SESSION_PERSISTENCE_APP_COOKIE, constants.SESSION_PERSISTENCE_HTTP_COOKIE]: expect_error_msg = ("Validation failure: Session persistence of " "type %s is not supported for %s protocol " "pools.") % (ty, constants.PROTOCOL_UDP) sess_p['type'] = ty res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) def test_update_with_tls_enabled_only(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) self.assertFalse(api_pool['tls_enabled']) new_pool = {'tls_enabled': True} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertTrue(response.get('tls_enabled')) self.assertIsNotNone(response.get('created_at')) self.assertIsNotNone(response.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_tls_enabled_only_on_pool_certs_exist( self, mock_cert_data): tls_container_ref = uuidutils.generate_uuid() ca_tls_container_ref = uuidutils.generate_uuid() crl_container_ref = uuidutils.generate_uuid() pool_cert = data_models.TLSContainer(certificate='pool cert') mock_cert_data.return_value = {'tls_cert': pool_cert, 'sni_certs': [], 'client_ca_cert': None} self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, tls_container_ref=tls_container_ref, ca_tls_container_ref=ca_tls_container_ref, crl_container_ref=crl_container_ref).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) self.assertFalse(api_pool['tls_enabled']) new_pool = {'tls_enabled': True} self.cert_manager_mock().get_cert.reset_mock() self.cert_manager_mock().get_secret.reset_mock() self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertTrue(response.get('tls_enabled')) self.assertIsNotNone(response.get('created_at')) self.assertIsNotNone(response.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) @mock.patch( 'octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_tls_container_ref(self, mock_cert_data): tls_container_ref = uuidutils.generate_uuid() api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'tls_container_ref': tls_container_ref} pool_cert = data_models.TLSContainer(certificate='pool cert') mock_cert_data.return_value = {'tls_cert': pool_cert, 'sni_certs': [], 'client_ca_cert': None} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(tls_container_ref, response.get('tls_container_ref')) self.assertIsNotNone(response.get('created_at')) self.assertIsNotNone(response.get('updated_at')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) def test_update_with_bad_tls_ref(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_pool, response) tls_uuid = uuidutils.generate_uuid() self.pool_repo.update(db_api.get_session(), api_pool.get('id'), tls_certificate_id=tls_uuid) update_data = {'name': 'pool2'} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(update_data)) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual('pool2', response.get('name')) def test_bad_update(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(self.lb_id) new_pool = {'enabled': 'one'} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'name': 'new_name'} mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') response = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=500) self.assertIn('Provider \'bad_driver\' reports error: broken', response.json.get('faultstring')) def test_bad_update_non_udp_pool_with_udp_fields(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "persistence_timeout": 3, "persistence_granularity": '255.255.255.0'} self.set_lb_status(self.lb_id) new_pool = {'session_persistence': sp} expect_error_msg = ("Validation failure: persistence_timeout and " "persistence_granularity is only for %s " "protocol pools.") % constants.PROTOCOL_UDP res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status( lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) def test_update_with_bad_tls_container_ref(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) tls_container_ref = uuidutils.generate_uuid() new_pool = {'tls_container_ref': tls_container_ref} self.cert_manager_mock().get_cert.side_effect = [Exception( "bad cert")] self.cert_manager_mock().get_secret.side_effect = [Exception( "bad secret")] resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400).json self.assertIn(tls_container_ref, resp['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_ca_and_crl(self, mock_cert_data): self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] ca_tls_container_ref = uuidutils.generate_uuid() crl_container_ref = uuidutils.generate_uuid() api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'ca_tls_container_ref': ca_tls_container_ref, 'crl_container_ref': crl_container_ref} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(ca_tls_container_ref, response.get('ca_tls_container_ref')) self.assertEqual(crl_container_ref, response.get('crl_container_ref')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) def test_update_with_bad_ca_tls_container_ref(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) ca_tls_container_ref = uuidutils.generate_uuid() new_pool = {'ca_tls_container_ref': ca_tls_container_ref} self.cert_manager_mock().get_cert.side_effect = [Exception( "bad cert")] self.cert_manager_mock().get_secret.side_effect = [Exception( "bad secret")] resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400).json self.assertIn(ca_tls_container_ref, resp['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_crl(self, mock_cert_data): ca_tls_container_ref = uuidutils.generate_uuid() crl_container_ref = uuidutils.generate_uuid() self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_container_ref, crl_container_ref=crl_container_ref).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_crl_container_ref = uuidutils.generate_uuid() new_pool = {'crl_container_ref': new_crl_container_ref} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(new_crl_container_ref, response.get('crl_container_ref')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) def test_update_with_crl_only_negative_case(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) crl_container_ref = uuidutils.generate_uuid() new_pool = {'crl_container_ref': crl_container_ref} resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400).json self.assertIn( 'A CA reference is required to specify a certificate revocation ' 'list.', resp['faultstring']) def test_update_with_crl_only_none_ca(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) crl_container_ref = uuidutils.generate_uuid() new_pool = {'ca_tls_container_ref': None, 'crl_container_ref': crl_container_ref} resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400).json self.assertIn( 'A CA reference is required to specify a certificate revocation ' 'list.', resp['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_with_unreachable_crl(self, mock_cert_data): crl_container_ref = uuidutils.generate_uuid() new_crl_container_ref = uuidutils.generate_uuid() ca_tls_container_ref = uuidutils.generate_uuid() self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_container_ref, crl_container_ref=crl_container_ref).get(self.root_tag) self.set_lb_status(self.lb_id) new_crl_container_ref = uuidutils.generate_uuid() new_pool = {'crl_container_ref': new_crl_container_ref} self.cert_manager_mock().get_secret.side_effect = [ exceptions.CertificateRetrievalException( ref=new_crl_container_ref)] resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400).json self.assertIn(new_crl_container_ref, resp['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_unset_ca_cert(self, mock_cert_data): self.cert_manager_mock().get_secret.return_value = ( sample_certs.X509_CA_CERT) ca_tls_uuid = uuidutils.generate_uuid() api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) self.set_lb_status(self.lb_id) new_pool = {'ca_tls_container_ref': None} body = self._build_body(new_pool) listener_path = self.POOL_PATH.format( pool_id=api_pool['id']) api_pool = self.put(listener_path, body).json.get(self.root_tag) self.assertIsNone(api_pool.get('ca_tls_container_ref')) self.assertIsNone(api_pool.get('crl_container_ref')) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_unset_ca_cert_with_crl(self, mock_cert_data): self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] ca_tls_uuid = uuidutils.generate_uuid() crl_uuid = uuidutils.generate_uuid() api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_uuid, crl_container_ref=crl_uuid).get(self.root_tag) self.set_lb_status(self.lb_id) new_pool = {'ca_tls_container_ref': None} body = self._build_body(new_pool) listener_path = self.POOL_PATH.format( pool_id=api_pool['id']) response = self.put(listener_path, body, status=400).json self.assertIn('A CA reference cannot be removed when a certificate ' 'revocation list is present.', response['faultstring']) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_unset_crl(self, mock_cert_data): self.cert_manager_mock().get_secret.side_effect = [ sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] ca_tls_uuid = uuidutils.generate_uuid() crl_uuid = uuidutils.generate_uuid() api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id, ca_tls_container_ref=ca_tls_uuid, crl_container_ref=crl_uuid).get(self.root_tag) self.set_lb_status(self.lb_id) new_pool = {'crl_container_ref': None} body = self._build_body(new_pool) listener_path = self.POOL_PATH.format( pool_id=api_pool['id']) update_pool = self.put(listener_path, body).json.get(self.root_tag) self.assertEqual(api_pool.get('ca_tls_container_ref'), update_pool.get('ca_tls_container_ref')) self.assertIsNone(update_pool.get('crl_container_ref')) def test_delete(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_pool, response) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_DELETE) # Problems with TLS certs should not block a delete def test_delete_with_bad_tls_ref(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_pool, response) tls_uuid = uuidutils.generate_uuid() self.pool_repo.update(db_api.get_session(), api_pool.get('id'), tls_certificate_id=tls_uuid) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_DELETE) def test_delete_authorize(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_pool, response) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_DELETE) def test_delete_not_authorize(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE api_pool.pop('updated_at') response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) response.pop('updated_at') self.assertEqual(api_pool, response) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.ACTIVE, listener_prov_status=constants.ACTIVE, pool_prov_status=constants.ACTIVE) def test_bad_delete(self): self.delete(self.POOL_PATH.format( pool_id=uuidutils.generate_uuid()), status=404) def test_delete_with_l7policy(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(self.lb_id) self.create_l7policy( self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=api_pool.get('id')) self.set_lb_status(self.lb_id) self.delete(self.POOL_PATH.format( pool_id=api_pool.get('id')), status=409) @mock.patch('octavia.api.drivers.utils.call_provider') def test_delete_with_bad_provider(self, mock_provider): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) # Set status to ACTIVE/ONLINE because set_lb_status did it in the db api_pool['provisioning_status'] = constants.ACTIVE api_pool['operating_status'] = constants.ONLINE response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertIsNone(api_pool.pop('updated_at')) self.assertIsNotNone(response.pop('updated_at')) self.assertEqual(api_pool, response) mock_provider.side_effect = exceptions.ProviderDriverError( prov='bad_driver', user_msg='broken') self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=500) def test_create_with_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "test_cookie_name"} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_CREATE, pool_op_status=constants.OFFLINE) self.set_lb_status(self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') self.assertIsNotNone(sess_p) self.assertEqual(constants.SESSION_PERSISTENCE_APP_COOKIE, sess_p.get('type')) self.assertEqual('test_cookie_name', sess_p.get('cookie_name')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id')) def test_create_with_bad_session_persistence(self): sp = {"type": "persistence_type", "cookie_name": "test_cookie_name"} lb_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': sp} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) def test_create_with_bad_SP_type_HTTP_cookie(self): sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, "cookie_name": "test_cookie_name"} lb_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': sp} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) def test_create_with_bad_SP_type_IP_cookie(self): sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, "cookie_name": "test_cookie_name"} lb_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': sp} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) def test_create_with_bad_SP_cookie_name(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "b@d_cookie_name"} lb_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': sp} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) def test_create_with_missing_cookie_name(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE} lb_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'session_persistence': sp} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) def test_add_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "test_cookie_name", 'persistence_granularity': None, 'persistence_timeout': None} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'session_persistence': sp} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(sp, response.get('session_persistence')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) def test_update_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "test_cookie_name"} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') sess_p['cookie_name'] = None sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(sess_p, response.get('session_persistence')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) def test_update_preserve_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "test_cookie_name", 'persistence_granularity': None, 'persistence_timeout': None} optionals = {"listener_id": self.listener_id, "name": "name", "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_pool = {'name': 'update_name'} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool)) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) self.assertEqual(sp, response.get('session_persistence')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) def test_update_bad_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "test_cookie_name"} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') sess_p['type'] = 'fake_type' new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400) def test_update_with_bad_SP_type_HTTP_cookie(self): sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') sess_p['type'] = constants.SESSION_PERSISTENCE_HTTP_COOKIE sess_p['cookie_name'] = 'test_cookie_name' new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400) def test_update_with_bad_SP_type_IP_cookie(self): sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP sess_p['cookie_name'] = 'test_cookie_name' new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400) def test_update_with_bad_SP_cookie_name(self): sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') sess_p['type'] = constants.SESSION_PERSISTENCE_APP_COOKIE sess_p['cookie_name'] = 'b@d_cookie_name' new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400) def test_update_with_missing_SP_cookie_name(self): sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) response = self.get(self.POOL_PATH.format( pool_id=api_pool.get('id'))).json.get(self.root_tag) sess_p = response.get('session_persistence') sess_p['type'] = constants.SESSION_PERSISTENCE_APP_COOKIE new_pool = {'session_persistence': sess_p} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=400) def test_delete_with_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "test_cookie_name"} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_DELETE) def test_delete_session_persistence(self): sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, "cookie_name": "test_cookie_name"} optionals = {"listener_id": self.listener_id, "session_persistence": sp} api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, **optionals).get(self.root_tag) self.set_lb_status(lb_id=self.lb_id) new_sp = {"pool": {"session_persistence": None}} response = self.put(self.POOL_PATH.format( pool_id=api_pool.get('id')), new_sp).json.get(self.root_tag) self.assertIsNone(response.get('session_persistence')) self.assert_correct_status( lb_id=self.lb_id, listener_id=self.listener_id, pool_id=api_pool.get('id'), lb_prov_status=constants.PENDING_UPDATE, listener_prov_status=constants.PENDING_UPDATE, pool_prov_status=constants.PENDING_UPDATE) def test_create_when_lb_pending_update(self): self.put(self.LB_PATH.format(lb_id=self.lb_id), {'loadbalancer': {'name': 'test_name_change'}}) lb_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'project_id': self.project_id} self.post(self.POOLS_PATH, self._build_body(lb_pool), status=409) def test_update_when_lb_pending_update(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), {'loadbalancer': {'name': 'test_name_change'}}) new_pool = {'admin_state_up': False} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=409) def test_delete_when_lb_pending_update(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(self.lb_id) self.put(self.LB_PATH.format(lb_id=self.lb_id), {"loadbalancer": {'name': 'test_name_change'}}) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=409) def test_create_when_lb_pending_delete(self): self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) new_pool = { 'loadbalancer_id': self.lb_id, 'listener_id': self.listener_id, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'project_id': self.project_id} self.post(self.POOLS_PATH, self._build_body(new_pool), status=409) def test_update_when_lb_pending_delete(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) new_pool = {'admin_state_up': False} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=409) def test_delete_when_lb_pending_delete(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) self.set_lb_status(self.lb_id) self.delete(self.LB_PATH.format(lb_id=self.lb_id), params={'cascade': "true"}) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=409) def test_update_already_deleted(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) new_pool = {'admin_state_up': False} self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), self._build_body(new_pool), status=404) def test_delete_already_deleted(self): api_pool = self.create_pool( self.lb_id, constants.PROTOCOL_HTTP, constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id).get(self.root_tag) # This updates the child objects self.set_lb_status(self.lb_id, status=constants.DELETED) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=404) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_valid_listener_pool_protocol(self, mock_cert_data): cert = data_models.TLSContainer(certificate='cert') lb_pool = { 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'project_id': self.project_id} mock_cert_data.return_value = {'sni_certs': [cert]} valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP port = 1 for listener_proto in valid_map: for pool_proto in valid_map[listener_proto]: port = port + 1 opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) if listener['default_pool_id'] is None: lb_pool['protocol'] = pool_proto lb_pool['listener_id'] = listener.get('id') self.post(self.POOLS_PATH, self._build_body(lb_pool), status=201) self.set_object_status(self.lb_repo, self.lb_id) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_invalid_listener_pool_protocol_map(self, mock_cert_data): cert = data_models.TLSContainer(certificate='cert') lb_pool = { 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'project_id': self.project_id} mock_cert_data.return_value = {'sni_certs': [cert]} invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP port = 1 for listener_proto in invalid_map: opts = {} if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: opts['sni_container_refs'] = [uuidutils.generate_uuid()] listener = self.create_listener( listener_proto, port, self.lb_id, **opts).get('listener') self.set_object_status(self.lb_repo, self.lb_id) port = port + 1 for pool_proto in invalid_map[listener_proto]: expect_error_msg = ("Validation failure: The pool protocol " "'%s' is invalid while the listener " "protocol is '%s'.") % (pool_proto, listener_proto) if listener['default_pool_id'] is None: lb_pool['protocol'] = pool_proto lb_pool['listener_id'] = listener.get('id') res = self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) self.assert_correct_status(lb_id=self.lb_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_provider.py0000664000175000017500000003417400000000000024500 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia_lib.api.drivers import exceptions as lib_exceptions from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.tests.functional.api.v2 import base class TestProvider(base.BaseAPITest): root_tag_list = 'providers' def setUp(self): super(TestProvider, self).setUp() def test_get_all_providers(self): octavia_dict = {u'description': u'Octavia driver.', u'name': u'octavia'} amphora_dict = {u'description': u'Amp driver.', u'name': u'amphora'} noop_dict = {u'description': u'NoOp driver.', u'name': u'noop_driver'} providers = self.get(self.PROVIDERS_PATH).json.get(self.root_tag_list) self.assertEqual(4, len(providers)) self.assertIn(octavia_dict, providers) self.assertIn(amphora_dict, providers) self.assertIn(noop_dict, providers) def test_get_all_providers_fields(self): octavia_dict = {u'name': u'octavia'} amphora_dict = {u'name': u'amphora'} noop_dict = {u'name': u'noop_driver'} providers = self.get(self.PROVIDERS_PATH, params={'fields': ['name']}) providers_list = providers.json.get(self.root_tag_list) self.assertEqual(4, len(providers_list)) self.assertIn(octavia_dict, providers_list) self.assertIn(amphora_dict, providers_list) self.assertIn(noop_dict, providers_list) class TestFlavorCapabilities(base.BaseAPITest): root_tag = 'flavor_capabilities' def setUp(self): super(TestFlavorCapabilities, self).setUp() def test_nonexistent_provider(self): self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='bogus'), status=400) def test_noop_provider(self): ref_capabilities = [{'description': 'The glance image tag to use for ' 'this load balancer.', 'name': 'amp_image_tag'}] result = self.get( self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver')) self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) def test_amphora_driver(self): ref_description = ("The load balancer topology. One of: SINGLE - One " "amphora per load balancer. ACTIVE_STANDBY - Two " "amphora per load balancer.") result = self.get( self.FLAVOR_CAPABILITIES_PATH.format(provider='amphora')) capabilities = result.json.get(self.root_tag) capability_dict = [i for i in capabilities if i['name'] == 'loadbalancer_topology'][0] self.assertEqual(ref_description, capability_dict['description']) # Some drivers might not have implemented this yet, test that case @mock.patch('octavia.api.drivers.noop_driver.driver.NoopProviderDriver.' 'get_supported_flavor_metadata') def test_not_implemented(self, mock_get_metadata): mock_get_metadata.side_effect = lib_exceptions.NotImplementedError() self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver'), status=501) def test_authorized(self): ref_capabilities = [{'description': 'The glance image tag to use ' 'for this load balancer.', 'name': 'amp_image_tag'}] self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get(self.FLAVOR_CAPABILITIES_PATH.format( provider='noop_driver')) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) def test_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver'), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_amphora_driver_one_filter(self): ref_description = ("The compute driver flavor ID.") result = self.get( self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), params={constants.NAME: 'compute_flavor'}) capabilities = result.json.get(self.root_tag) self.assertEqual(1, len(capabilities)) self.assertEqual(2, len(capabilities[0])) self.assertEqual(ref_description, capabilities[0][constants.DESCRIPTION]) def test_amphora_driver_two_filters(self): ref_description = ("The compute driver flavor ID.") result = self.get( self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), params={constants.NAME: 'compute_flavor', constants.DESCRIPTION: ref_description}) capabilities = result.json.get(self.root_tag) self.assertEqual(1, len(capabilities)) self.assertEqual(ref_description, capabilities[0][constants.DESCRIPTION]) def test_amphora_driver_filter_no_match(self): result = self.get( self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), params={constants.NAME: 'bogus'}) capabilities = result.json.get(self.root_tag) self.assertEqual([], capabilities) def test_amphora_driver_one_filter_one_field(self): result = self.get( self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), params={constants.NAME: 'compute_flavor', constants.FIELDS: constants.NAME}) capabilities = result.json.get(self.root_tag) self.assertEqual(1, len(capabilities)) self.assertEqual(1, len(capabilities[0])) self.assertEqual('compute_flavor', capabilities[0][constants.NAME]) class TestAvailabilityZoneCapabilities(base.BaseAPITest): root_tag = 'availability_zone_capabilities' def setUp(self): super(TestAvailabilityZoneCapabilities, self).setUp() def test_nonexistent_provider(self): self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider='bogus'), status=400) def test_noop_provider(self): ref_capabilities = [{'description': 'The compute availability zone to ' 'use for this loadbalancer.', 'name': constants.COMPUTE_ZONE}] result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider='noop_driver')) self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) def test_amphora_driver(self): ref_description1 = 'The compute availability zone.' ref_description2 = 'The management network ID for the amphora.' result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider='amphora')) capabilities = result.json.get(self.root_tag) capability_dict = [i for i in capabilities if i['name'] == constants.COMPUTE_ZONE][0] self.assertEqual(ref_description1, capability_dict['description']) capability_dict = [i for i in capabilities if i['name'] == constants.MANAGEMENT_NETWORK][0] self.assertEqual(ref_description2, capability_dict['description']) # Some drivers might not have implemented this yet, test that case @mock.patch('octavia.api.drivers.noop_driver.driver.NoopProviderDriver.' 'get_supported_availability_zone_metadata') def test_not_implemented(self, mock_get_metadata): mock_get_metadata.side_effect = lib_exceptions.NotImplementedError() self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider='noop_driver'), status=501) def test_authorized(self): ref_capabilities = [{'description': 'The compute availability zone to ' 'use for this loadbalancer.', 'name': constants.COMPUTE_ZONE}] self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() with mock.patch.object(octavia.common.context.Context, 'project_id', project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': True, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider='noop_driver')) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) def test_not_authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider='noop_driver'), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_amphora_driver_one_filter(self): ref_description = 'The compute availability zone.' result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider=constants.AMPHORA), params={constants.NAME: constants.COMPUTE_ZONE}) capabilities = result.json.get(self.root_tag) self.assertEqual(1, len(capabilities)) self.assertEqual(2, len(capabilities[0])) self.assertEqual(ref_description, capabilities[0][constants.DESCRIPTION]) ref_description = 'The management network ID for the amphora.' result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider=constants.AMPHORA), params={constants.NAME: constants.MANAGEMENT_NETWORK}) capabilities = result.json.get(self.root_tag) self.assertEqual(1, len(capabilities)) self.assertEqual(2, len(capabilities[0])) self.assertEqual(ref_description, capabilities[0][constants.DESCRIPTION]) def test_amphora_driver_two_filters(self): ref_description = 'The compute availability zone.' result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider=constants.AMPHORA), params={constants.NAME: constants.COMPUTE_ZONE, constants.DESCRIPTION: ref_description}) capabilities = result.json.get(self.root_tag) self.assertEqual(1, len(capabilities)) self.assertEqual(ref_description, capabilities[0][constants.DESCRIPTION]) def test_amphora_driver_filter_no_match(self): result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider=constants.AMPHORA), params={constants.NAME: 'bogus'}) capabilities = result.json.get(self.root_tag) self.assertEqual([], capabilities) def test_amphora_driver_one_filter_one_field(self): result = self.get( self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( provider=constants.AMPHORA), params={constants.NAME: constants.COMPUTE_ZONE, constants.FIELDS: constants.NAME}) capabilities = result.json.get(self.root_tag) self.assertEqual(1, len(capabilities)) self.assertEqual(1, len(capabilities[0])) self.assertEqual(constants.COMPUTE_ZONE, capabilities[0][constants.NAME]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/api/v2/test_quotas.py0000664000175000017500000012162000000000000024153 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context from octavia.tests.functional.api.v2 import base CONF = cfg.CONF class TestQuotas(base.BaseAPITest): root_tag = 'quota' root_tag_list = 'quotas' root_tag_links = 'quotas_links' def setUp(self): super(TestQuotas, self).setUp() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config( group="quotas", default_load_balancer_quota=random.randrange( constants.QUOTA_UNLIMITED, 9000)) conf.config( group="quotas", default_listener_quota=random.randrange( constants.QUOTA_UNLIMITED, 9000)) conf.config( group="quotas", default_member_quota=random.randrange( constants.QUOTA_UNLIMITED, 9000)) # We need to make sure unlimited gets tested each pass conf.config(group="quotas", default_pool_quota=constants.QUOTA_UNLIMITED) conf.config( group="quotas", default_health_monitor_quota=random.randrange( constants.QUOTA_UNLIMITED, 9000)) self.project_id = uuidutils.generate_uuid() def _assert_quotas_equal(self, observed, expected=None): if not expected: expected = {'load_balancer': CONF.quotas.default_load_balancer_quota, 'listener': CONF.quotas.default_listener_quota, 'pool': CONF.quotas.default_pool_quota, 'health_monitor': CONF.quotas.default_health_monitor_quota, 'member': CONF.quotas.default_member_quota} self.assertEqual(expected['load_balancer'], observed['load_balancer']) self.assertEqual(expected['listener'], observed['listener']) self.assertEqual(expected['pool'], observed['pool']) self.assertEqual(expected['health_monitor'], observed['health_monitor']) self.assertEqual(expected['member'], observed['member']) def test_get_all_quotas_no_quotas(self): response = self.get(self.QUOTAS_PATH) quota_list = response.json self.assertEqual({'quotas': [], 'quotas_links': []}, quota_list) def test_get_all_quotas_with_quotas(self): project_id1 = uuidutils.generate_uuid() project_id2 = uuidutils.generate_uuid() quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30} body1 = {'quota': quota1} self.put(quota_path1, body1, status=202) quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50, 'health_monitor': 50, 'member': 50} body2 = {'quota': quota2} self.put(quota_path2, body2, status=202) response = self.get(self.QUOTAS_PATH) quota_list = response.json quota1['project_id'] = quota1['tenant_id'] = project_id1 quota2['project_id'] = quota2['tenant_id'] = project_id2 # Expected deprecated names until T quota1['healthmonitor'] = quota1['health_monitor'] quota1['loadbalancer'] = quota1['load_balancer'] quota2['healthmonitor'] = quota2['health_monitor'] quota2['loadbalancer'] = quota2['load_balancer'] expected = {'quotas': [quota1, quota2], 'quotas_links': []} self.assertEqual(expected, quota_list) def test_deprecated_get_and_put_vars(self): project_id1 = uuidutils.generate_uuid() project_id2 = uuidutils.generate_uuid() quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30} body1 = {'quota': quota1} self.put(quota_path1, body1, status=202) quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) quota2 = {'loadbalancer': 50, 'listener': 50, 'pool': 50, 'healthmonitor': 50, 'member': 50} body2 = {'quota': quota2} self.put(quota_path2, body2, status=202) response = self.get(self.QUOTAS_PATH) quota_list = response.json quota1['project_id'] = quota1['tenant_id'] = project_id1 quota2['project_id'] = quota2['tenant_id'] = project_id2 # Expected deprecated names until T quota1['healthmonitor'] = quota1['health_monitor'] quota1['loadbalancer'] = quota1['load_balancer'] quota2['health_monitor'] = quota2['healthmonitor'] quota2['load_balancer'] = quota2['loadbalancer'] expected = {'quotas': [quota1, quota2], 'quotas_links': []} self.assertEqual(expected, quota_list) def test_get_all_not_Authorized(self): project_id1 = uuidutils.generate_uuid() project_id2 = uuidutils.generate_uuid() quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30} body1 = {'quota': quota1} self.put(quota_path1, body1, status=202) quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50, 'health_monitor': 50, 'member': 50} body2 = {'quota': quota2} self.put(quota_path2, body2, status=202) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.QUOTAS_PATH, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_all_not_Authorized_no_role(self): project_id1 = uuidutils.generate_uuid() quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30} body1 = {'quota': quota1} self.put(quota_path1, body1, status=202) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project_id1): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': [], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.QUOTAS_PATH, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_all_not_Authorized_bogus_role(self): project_id1 = uuidutils.generate_uuid() project_id2 = uuidutils.generate_uuid() quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30} body1 = {'quota': quota1} self.put(quota_path1, body1, status=202) quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50, 'health_monitor': 50, 'member': 50} body2 = {'quota': quota2} self.put(quota_path2, body2, status=202) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_bogus'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.QUOTAS_PATH, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_get_all_admin(self): project_id1 = uuidutils.generate_uuid() project_id2 = uuidutils.generate_uuid() project_id3 = uuidutils.generate_uuid() quota1 = self.create_quota( project_id=project_id1, lb_quota=1, member_quota=1 ).get(self.root_tag) quota2 = self.create_quota( project_id=project_id2, lb_quota=2, member_quota=2 ).get(self.root_tag) quota3 = self.create_quota( project_id=project_id3, lb_quota=3, member_quota=3 ).get(self.root_tag) quotas = self.get(self.QUOTAS_PATH).json.get(self.root_tag_list) self.assertEqual(3, len(quotas)) quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) for lb in quotas] self.assertIn((quota1.get('load_balancer'), quota1.get('member')), quota_lb_member_quotas) self.assertIn((quota2.get('load_balancer'), quota2.get('member')), quota_lb_member_quotas) self.assertIn((quota3.get('load_balancer'), quota3.get('member')), quota_lb_member_quotas) def test_get_all_non_admin_global_observer(self): project_id1 = uuidutils.generate_uuid() project_id2 = uuidutils.generate_uuid() project_id3 = uuidutils.generate_uuid() quota1 = self.create_quota( project_id=project_id1, lb_quota=1, member_quota=1 ).get(self.root_tag) quota2 = self.create_quota( project_id=project_id2, lb_quota=2, member_quota=2 ).get(self.root_tag) quota3 = self.create_quota( project_id=project_id3, lb_quota=3, member_quota=3 ).get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_global_observer'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): quotas = self.get(self.QUOTAS_PATH) quotas = quotas.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(3, len(quotas)) quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) for lb in quotas] self.assertIn((quota1.get('load_balancer'), quota1.get('member')), quota_lb_member_quotas) self.assertIn((quota2.get('load_balancer'), quota2.get('member')), quota_lb_member_quotas) self.assertIn((quota3.get('load_balancer'), quota3.get('member')), quota_lb_member_quotas) def test_get_all_quota_admin(self): project_id1 = uuidutils.generate_uuid() project_id2 = uuidutils.generate_uuid() project_id3 = uuidutils.generate_uuid() quota1 = self.create_quota( project_id=project_id1, lb_quota=1, member_quota=1 ).get(self.root_tag) quota2 = self.create_quota( project_id=project_id2, lb_quota=2, member_quota=2 ).get(self.root_tag) quota3 = self.create_quota( project_id=project_id3, lb_quota=3, member_quota=3 ).get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_quota_admin'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): quotas = self.get(self.QUOTAS_PATH) quotas = quotas.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(3, len(quotas)) quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) for lb in quotas] self.assertIn((quota1.get('load_balancer'), quota1.get('member')), quota_lb_member_quotas) self.assertIn((quota2.get('load_balancer'), quota2.get('member')), quota_lb_member_quotas) self.assertIn((quota3.get('load_balancer'), quota3.get('member')), quota_lb_member_quotas) def test_get_all_non_admin(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() project3_id = uuidutils.generate_uuid() self.create_quota( project_id=project1_id, lb_quota=1, member_quota=1 ).get(self.root_tag) self.create_quota( project_id=project2_id, lb_quota=2, member_quota=2 ).get(self.root_tag) quota3 = self.create_quota( project_id=project3_id, lb_quota=3, member_quota=3 ).get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', project3_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project3_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): quotas = self.get(self.QUOTAS_PATH) quotas = quotas.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(1, len(quotas)) quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) for lb in quotas] self.assertIn((quota3.get('load_balancer'), quota3.get('member')), quota_lb_member_quotas) def test_get_all_non_admin_observer(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() project3_id = uuidutils.generate_uuid() self.create_quota( project_id=project1_id, lb_quota=1, member_quota=1 ).get(self.root_tag) self.create_quota( project_id=project2_id, lb_quota=2, member_quota=2 ).get(self.root_tag) quota3 = self.create_quota( project_id=project3_id, lb_quota=3, member_quota=3 ).get(self.root_tag) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.KEYSTONE) with mock.patch.object(octavia.common.context.Context, 'project_id', project3_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_observer'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project3_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): quotas = self.get(self.QUOTAS_PATH) quotas = quotas.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(1, len(quotas)) quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) for lb in quotas] self.assertIn((quota3.get('load_balancer'), quota3.get('member')), quota_lb_member_quotas) def test_get_by_project_id(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() quota1 = self.create_quota( project_id=project1_id, lb_quota=1, member_quota=1 ).get(self.root_tag) quota2 = self.create_quota( project_id=project2_id, lb_quota=2, member_quota=2 ).get(self.root_tag) quotas = self.get( self.QUOTA_PATH.format(project_id=project1_id) ).json.get(self.root_tag) self._assert_quotas_equal(quotas, quota1) quotas = self.get( self.QUOTA_PATH.format(project_id=project2_id) ).json.get(self.root_tag) self._assert_quotas_equal(quotas, quota2) def test_get_Authorized_member(self): self._test_get_Authorized('load-balancer_member') def test_get_Authorized_observer(self): self._test_get_Authorized('load-balancer_observer') def test_get_Authorized_global_observer(self): self._test_get_Authorized('load-balancer_global_observer') def test_get_Authorized_quota_admin(self): self._test_get_Authorized('load-balancer_quota_admin') def _test_get_Authorized(self, role): project1_id = uuidutils.generate_uuid() quota1 = self.create_quota( project_id=project1_id, lb_quota=1, member_quota=1 ).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project1_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': [role], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project1_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): quotas = self.get( self.QUOTA_PATH.format(project_id=project1_id) ).json.get(self.root_tag) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self._assert_quotas_equal(quotas, quota1) def test_get_not_Authorized(self): project1_id = uuidutils.generate_uuid() self.create_quota( project_id=project1_id, lb_quota=1, member_quota=1 ).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): quotas = self.get(self.QUOTA_PATH.format(project_id=project1_id), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, quotas.json) def test_get_not_Authorized_bogus_role(self): project1_id = uuidutils.generate_uuid() self.create_quota( project_id=project1_id, lb_quota=1, member_quota=1 ).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project1_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer:bogus'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project1_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): quotas = self.get( self.QUOTA_PATH.format(project_id=project1_id), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, quotas.json) def test_get_not_Authorized_no_role(self): project1_id = uuidutils.generate_uuid() self.create_quota( project_id=project1_id, lb_quota=1, member_quota=1 ).get(self.root_tag) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', project1_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': [], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': project1_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): quotas = self.get( self.QUOTA_PATH.format(project_id=project1_id), status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, quotas.json) def test_get_all_sorted(self): project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() project3_id = uuidutils.generate_uuid() self.create_quota( project_id=project1_id, lb_quota=3, member_quota=8 ).get(self.root_tag) self.create_quota( project_id=project2_id, lb_quota=2, member_quota=10 ).get(self.root_tag) self.create_quota( project_id=project3_id, lb_quota=1, member_quota=9 ).get(self.root_tag) response = self.get(self.QUOTAS_PATH, params={'sort': 'load_balancer:desc'}) quotas_desc = response.json.get(self.root_tag_list) response = self.get(self.QUOTAS_PATH, params={'sort': 'load_balancer:asc'}) quotas_asc = response.json.get(self.root_tag_list) self.assertEqual(3, len(quotas_desc)) self.assertEqual(3, len(quotas_asc)) quota_lb_member_desc = [(lb.get('load_balancer'), lb.get('member')) for lb in quotas_desc] quota_lb_member_asc = [(lb.get('load_balancer'), lb.get('member')) for lb in quotas_asc] self.assertEqual(quota_lb_member_asc, list(reversed(quota_lb_member_desc))) def test_get_all_limited(self): self.skipTest("No idea how this should work yet") # TODO(rm_work): Figure out how to make this ... work project1_id = uuidutils.generate_uuid() project2_id = uuidutils.generate_uuid() project3_id = uuidutils.generate_uuid() self.create_quota( project_id=project1_id, lb_quota=3, member_quota=8 ).get(self.root_tag) self.create_quota( project_id=project2_id, lb_quota=2, member_quota=10 ).get(self.root_tag) self.create_quota( project_id=project3_id, lb_quota=1, member_quota=9 ).get(self.root_tag) # First two -- should have 'next' link first_two = self.get(self.QUOTAS_PATH, params={'limit': 2}).json objs = first_two[self.root_tag_list] links = first_two[self.root_tag_links] self.assertEqual(2, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('next', links[0]['rel']) # Third + off the end -- should have previous link third = self.get(self.QUOTAS_PATH, params={ 'limit': 2, 'marker': first_two[self.root_tag_list][1]['id']}).json objs = third[self.root_tag_list] links = third[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(1, len(links)) self.assertEqual('previous', links[0]['rel']) # Middle -- should have both links middle = self.get(self.QUOTAS_PATH, params={ 'limit': 1, 'marker': first_two[self.root_tag_list][0]['id']}).json objs = middle[self.root_tag_list] links = middle[self.root_tag_links] self.assertEqual(1, len(objs)) self.assertEqual(2, len(links)) self.assertItemsEqual(['previous', 'next'], [link['rel'] for link in links]) def test_get_default_quotas(self): response = self.get(self.QUOTA_DEFAULT_PATH.format( project_id=self.project_id)) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota']) def test_get_default_quotas_Authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.get(self.QUOTA_DEFAULT_PATH.format( project_id=self.project_id)) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota']) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_get_default_quotas_not_Authorized(self): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', uuidutils.generate_uuid()): response = self.get(self.QUOTA_DEFAULT_PATH.format( project_id=self.project_id), status=403) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) self.conf.config(group='api_settings', auth_strategy=auth_strategy) def test_custom_quotas(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.put(quota_path, body, status=202) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) def test_custom_quotas_quota_admin(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_quota_admin'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.put(quota_path, body, status=202) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) def test_custom_quotas_not_Authorized_member(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): response = self.put(quota_path, body, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) def test_custom_partial_quotas(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'listener': None, 'pool': 30, 'health_monitor': 30, 'member': 30}} expected_body = {'quota': { 'load_balancer': 30, 'listener': CONF.quotas.default_listener_quota, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.put(quota_path, body, status=202) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=expected_body['quota']) def test_custom_missing_quotas(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'pool': 30, 'health_monitor': 30, 'member': 30}} expected_body = {'quota': { 'load_balancer': 30, 'listener': CONF.quotas.default_listener_quota, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.put(quota_path, body, status=202) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=expected_body['quota']) def test_delete_custom_quotas(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.put(quota_path, body, status=202) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) self.delete(quota_path, status=202) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota']) def test_delete_custom_quotas_admin(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.put(quota_path, body, status=202) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_quota_admin'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(quota_path, status=202) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota']) def test_delete_quotas_not_Authorized_member(self): quota_path = self.QUOTA_PATH.format(project_id=self.project_id) body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, 'health_monitor': 30, 'member': 30}} self.put(quota_path, body, status=202) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) with mock.patch.object(octavia.common.context.Context, 'project_id', self.project_id): override_credentials = { 'service_user_id': None, 'user_domain_id': None, 'is_admin_project': True, 'service_project_domain_id': None, 'service_project_id': None, 'roles': ['load-balancer_member'], 'user_id': None, 'is_admin': False, 'service_user_domain_id': None, 'project_domain_id': None, 'service_roles': [], 'project_id': self.project_id} with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): self.delete(quota_path, status=403) self.conf.config(group='api_settings', auth_strategy=auth_strategy) response = self.get(quota_path) quota_dict = response.json self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) def test_delete_non_existent_custom_quotas(self): quota_path = self.QUOTA_PATH.format(project_id='bogus') self.delete(quota_path, status=404) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4102166 octavia-6.2.2/octavia/tests/functional/db/0000775000175000017500000000000000000000000020511 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/db/__init__.py0000664000175000017500000000107400000000000022624 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/db/base.py0000664000175000017500000001322200000000000021775 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import test_base from octavia.common import config from octavia.common import constants from octavia.db import api as db_api from octavia.db import base_models from octavia.db import models class OctaviaDBTestBase(test_base.DbTestCase): def setUp(self, connection_string='sqlite://'): super(OctaviaDBTestBase, self).setUp() # NOTE(blogan): doing this for now because using the engine and # session set up in the fixture for test_base.DbTestCase does not work # with the API functional tests. Need to investigate more if this # becomes a problem conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) conf.config(group="database", connection=connection_string) # We need to get our own Facade so that the file backed sqlite tests # don't use the _FACADE singleton. Some tests will use in-memory # sqlite, some will use a file backed sqlite. if 'sqlite:///' in connection_string: facade = db_session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True) engine = facade.get_engine() self.session = facade.get_session(expire_on_commit=True, autocommit=True) else: engine = db_api.get_engine() self.session = db_api.get_session() base_models.BASE.metadata.create_all(engine) self._seed_lookup_tables(self.session) def clear_tables(): """Unregister all data models.""" base_models.BASE.metadata.drop_all(engine) # If we created a file, clean it up too if 'sqlite:///' in connection_string: os.remove(connection_string.replace('sqlite:///', '')) self.addCleanup(clear_tables) def _seed_lookup_tables(self, session): self._seed_lookup_table( session, constants.SUPPORTED_PROVISIONING_STATUSES, models.ProvisioningStatus) self._seed_lookup_table( session, constants.SUPPORTED_HEALTH_MONITOR_TYPES, models.HealthMonitorType) self._seed_lookup_table( session, constants.SUPPORTED_LB_ALGORITHMS, models.Algorithm) self._seed_lookup_table( session, constants.SUPPORTED_PROTOCOLS, models.Protocol) self._seed_lookup_table( session, constants.SUPPORTED_OPERATING_STATUSES, models.OperatingStatus) self._seed_lookup_table( session, constants.SUPPORTED_SP_TYPES, models.SessionPersistenceType) self._seed_lookup_table(session, constants.SUPPORTED_AMPHORA_ROLES, models.AmphoraRoles) self._seed_lookup_table(session, constants.SUPPORTED_LB_TOPOLOGIES, models.LBTopology) self._seed_lookup_table(session, constants.SUPPORTED_VRRP_AUTH, models.VRRPAuthMethod) self._seed_lookup_table(session, constants.SUPPORTED_L7RULE_TYPES, models.L7RuleType) self._seed_lookup_table(session, constants.SUPPORTED_L7RULE_COMPARE_TYPES, models.L7RuleCompareType) self._seed_lookup_table(session, constants.SUPPORTED_L7POLICY_ACTIONS, models.L7PolicyAction) self._seed_lookup_table(session, constants.SUPPORTED_CLIENT_AUTH_MODES, models.ClientAuthenticationMode) # Add in the id='DELETED' placeholders deleted_flavor_profile = models.FlavorProfile( id=constants.NIL_UUID, name='DELETED-PLACEHOLDER', provider_name=constants.DELETED, flavor_data='{}') session.add(deleted_flavor_profile) session.flush() deleted_flavor = models.Flavor( id=constants.NIL_UUID, flavor_profile_id=constants.NIL_UUID, name='DELETED-PLACEHOLDER', enabled=False, description='Placeholder for DELETED LBs with DELETED flavors') session.add(deleted_flavor) session.flush() deleted_az_profile = models.AvailabilityZoneProfile( id=constants.NIL_UUID, name='DELETED-PLACEHOLDER', provider_name=constants.DELETED, availability_zone_data='{}') session.add(deleted_az_profile) session.flush() deleted_az = models.AvailabilityZone( availability_zone_profile_id=constants.NIL_UUID, name=constants.NIL_UUID, enabled=False, description='Placeholder for DELETED LBs with DELETED ' 'availability zones') session.add(deleted_az) session.flush() def _seed_lookup_table(self, session, name_list, model_cls): for name in name_list: with session.begin(): model = model_cls(name=name) session.add(model) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/db/test_models.py0000664000175000017500000023254500000000000023420 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import uuidutils from octavia.common import constants from octavia.common import data_models from octavia.db import models from octavia.tests.functional.db import base from sqlalchemy.orm import collections class ModelTestMixin(object): FAKE_IP = '10.0.0.1' FAKE_UUID_1 = uuidutils.generate_uuid() FAKE_UUID_2 = uuidutils.generate_uuid() FAKE_AZ = 'zone1' def _insert(self, session, model_cls, model_kwargs): with session.begin(): model = model_cls(**model_kwargs) session.add(model) return model def create_flavor_profile(self, session, **overrides): kwargs = {'id': self.FAKE_UUID_1, 'name': 'fake_profile', 'provider_name': 'fake_provider', 'flavor_data': "{'glance_image': 'ubuntu-16.04.03'}"} kwargs.update(overrides) return self._insert(session, models.FlavorProfile, kwargs) def create_flavor(self, session, profile, **overrides): kwargs = {'id': self.FAKE_UUID_1, 'name': 'fake_flavor', 'flavor_profile_id': profile, 'description': 'fake flavor', 'enabled': True} kwargs.update(overrides) return self._insert(session, models.Flavor, kwargs) def associate_amphora(self, load_balancer, amphora): load_balancer.amphorae.append(amphora) def create_listener(self, session, **overrides): kwargs = {'project_id': self.FAKE_UUID_1, 'id': self.FAKE_UUID_1, 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'provisioning_status': constants.ACTIVE, 'operating_status': constants.ONLINE, 'enabled': True} kwargs.update(overrides) return self._insert(session, models.Listener, kwargs) def create_listener_statistics(self, session, listener_id, amphora_id, **overrides): kwargs = {'listener_id': listener_id, 'amphora_id': amphora_id, 'bytes_in': 0, 'bytes_out': 0, 'active_connections': 0, 'total_connections': 0, 'request_errors': 0} kwargs.update(overrides) return self._insert(session, models.ListenerStatistics, kwargs) def create_pool(self, session, **overrides): kwargs = {'project_id': self.FAKE_UUID_1, 'id': self.FAKE_UUID_1, 'protocol': constants.PROTOCOL_HTTP, 'lb_algorithm': constants.LB_ALGORITHM_LEAST_CONNECTIONS, 'provisioning_status': constants.ACTIVE, 'operating_status': constants.ONLINE, 'enabled': True, 'tls_enabled': False} kwargs.update(overrides) return self._insert(session, models.Pool, kwargs) def create_session_persistence(self, session, pool_id, **overrides): kwargs = {'pool_id': pool_id, 'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'cookie_name'} kwargs.update(overrides) return self._insert(session, models.SessionPersistence, kwargs) def create_health_monitor(self, session, pool_id, **overrides): kwargs = {'id': pool_id, 'pool_id': pool_id, 'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'fall_threshold': 1, 'rise_threshold': 1, 'enabled': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.ACTIVE, 'project_id': self.FAKE_UUID_1} kwargs.update(overrides) return self._insert(session, models.HealthMonitor, kwargs) def create_member(self, session, pool_id, **overrides): kwargs = {'project_id': self.FAKE_UUID_1, 'id': self.FAKE_UUID_1, 'pool_id': pool_id, 'ip_address': '10.0.0.1', 'protocol_port': 80, 'provisioning_status': constants.ACTIVE, 'operating_status': constants.ONLINE, 'enabled': True, 'backup': False} kwargs.update(overrides) return self._insert(session, models.Member, kwargs) def create_load_balancer(self, session, **overrides): kwargs = {'project_id': self.FAKE_UUID_1, 'id': self.FAKE_UUID_1, 'provisioning_status': constants.ACTIVE, 'operating_status': constants.ONLINE, 'enabled': True, 'server_group_id': self.FAKE_UUID_1} kwargs.update(overrides) return self._insert(session, models.LoadBalancer, kwargs) def create_vip(self, session, load_balancer_id, **overrides): kwargs = {'load_balancer_id': load_balancer_id} kwargs.update(overrides) return self._insert(session, models.Vip, kwargs) def create_sni(self, session, **overrides): kwargs = {'listener_id': self.FAKE_UUID_1, 'tls_container_id': self.FAKE_UUID_1} kwargs.update(overrides) return self._insert(session, models.SNI, kwargs) def create_amphora(self, session, **overrides): kwargs = {'id': self.FAKE_UUID_1, 'compute_id': self.FAKE_UUID_1, 'status': constants.ACTIVE, 'vrrp_ip': self.FAKE_IP, 'ha_ip': self.FAKE_IP, 'vrrp_port_id': self.FAKE_UUID_1, 'ha_port_id': self.FAKE_UUID_2, 'lb_network_ip': self.FAKE_IP, 'cert_expiration': datetime.datetime.utcnow(), 'cert_busy': False, 'cached_zone': self.FAKE_AZ} kwargs.update(overrides) return self._insert(session, models.Amphora, kwargs) def create_amphora_health(self, session, **overrides): kwargs = {'amphora_id': self.FAKE_UUID_1, 'last_update': datetime.date.today(), 'busy': True} kwargs.update(overrides) return self._insert(session, models.AmphoraHealth, kwargs) def create_l7policy(self, session, listener_id, **overrides): kwargs = {'id': self.FAKE_UUID_1, 'listener_id': listener_id, 'action': constants.L7POLICY_ACTION_REJECT, 'position': 1, 'provisioning_status': constants.ACTIVE, 'operating_status': constants.ONLINE, 'enabled': True} kwargs.update(overrides) return self._insert(session, models.L7Policy, kwargs) def create_l7rule(self, session, l7policy_id, **overrides): kwargs = {'id': self.FAKE_UUID_1, 'l7policy_id': l7policy_id, 'type': constants.L7RULE_TYPE_PATH, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, 'value': '/api', 'provisioning_status': constants.ACTIVE, 'operating_status': constants.ONLINE, 'enabled': True} kwargs.update(overrides) return self._insert(session, models.L7Rule, kwargs) def create_listener_cidr(self, session, listener_id, cidr): kwargs = {'listener_id': listener_id, 'cidr': cidr} return self._insert(session, models.ListenerCidr, kwargs) class PoolModelTest(base.OctaviaDBTestBase, ModelTestMixin): def test_create(self): pool = self.create_pool(self.session) self.assertIsNotNone(pool.created_at) self.assertIsNone(pool.updated_at) def test_update(self): pool = self.create_pool(self.session) self.assertIsNone(pool.updated_at) id = pool.id pool.enabled = False new_pool = self.session.query( models.Pool).filter_by(id=id).first() self.assertFalse(new_pool.enabled) self.assertIsNotNone(new_pool.updated_at) def test_delete(self): pool = self.create_pool(self.session) id = pool.id with self.session.begin(): self.session.delete(pool) self.session.flush() new_pool = self.session.query( models.Pool).filter_by(id=id).first() self.assertIsNone(new_pool) def test_member_relationship(self): pool = self.create_pool(self.session) self.create_member(self.session, pool.id, id=self.FAKE_UUID_1, ip_address="10.0.0.1") self.create_member(self.session, pool.id, id=self.FAKE_UUID_2, ip_address="10.0.0.2") new_pool = self.session.query( models.Pool).filter_by(id=pool.id).first() self.assertIsNotNone(new_pool.members) self.assertEqual(2, len(new_pool.members)) self.assertIsInstance(new_pool.members[0], models.Member) def test_health_monitor_relationship(self): pool = self.create_pool(self.session) self.create_health_monitor(self.session, pool.id) new_pool = self.session.query(models.Pool).filter_by( id=pool.id).first() self.assertIsNotNone(new_pool.health_monitor) self.assertIsInstance(new_pool.health_monitor, models.HealthMonitor) def test_session_persistence_relationship(self): pool = self.create_pool(self.session) self.create_session_persistence(self.session, pool_id=pool.id) new_pool = self.session.query(models.Pool).filter_by( id=pool.id).first() self.assertIsNotNone(new_pool.session_persistence) self.assertIsInstance(new_pool.session_persistence, models.SessionPersistence) def test_listener_relationship(self): pool = self.create_pool(self.session) listener = self.create_listener(self.session, default_pool_id=pool.id) new_pool = self.session.query(models.Pool).filter_by( id=pool.id).first() self.assertIsNotNone(new_pool.listeners) self.assertIsInstance(new_pool.listeners, list) self.assertIsInstance(new_pool.listeners[0], models.Listener) self.assertIn(listener.id, [li.id for li in new_pool.listeners]) class MemberModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(MemberModelTest, self).setUp() self.pool = self.create_pool(self.session) def test_create(self): member = self.create_member(self.session, self.pool.id) self.assertIsNotNone(member.created_at) self.assertIsNone(member.updated_at) def test_update(self): member = self.create_member(self.session, self.pool.id) self.assertIsNone(member.updated_at) member_id = member.id member.enabled = False new_member = self.session.query( models.Member).filter_by(id=member_id).first() self.assertFalse(new_member.enabled) self.assertIsNotNone(new_member.updated_at) def test_delete(self): member = self.create_member(self.session, self.pool.id) member_id = member.id with self.session.begin(): self.session.delete(member) self.session.flush() new_member = self.session.query( models.Member).filter_by(id=member_id).first() self.assertIsNone(new_member) def test_pool_relationship(self): member = self.create_member(self.session, self.pool.id, id=self.FAKE_UUID_1, ip_address="10.0.0.1") self.create_member(self.session, self.pool.id, id=self.FAKE_UUID_2, ip_address="10.0.0.2") new_member = self.session.query(models.Member).filter_by( id=member.id).first() self.assertIsNotNone(new_member.pool) self.assertIsInstance(new_member.pool, models.Pool) class SessionPersistenceModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(SessionPersistenceModelTest, self).setUp() self.pool = self.create_pool(self.session) def test_create(self): self.create_session_persistence(self.session, self.pool.id) def test_update(self): session_persistence = self.create_session_persistence(self.session, self.pool.id) session_persistence.name = 'test1' new_session_persistence = self.session.query( models.SessionPersistence).filter_by(pool_id=self.pool.id).first() self.assertEqual('test1', new_session_persistence.name) def test_delete(self): session_persistence = self.create_session_persistence(self.session, self.pool.id) with self.session.begin(): self.session.delete(session_persistence) self.session.flush() new_session_persistence = self.session.query( models.SessionPersistence).filter_by(pool_id=self.pool.id).first() self.assertIsNone(new_session_persistence) def test_pool_relationship(self): self.create_session_persistence(self.session, self.pool.id) new_persistence = self.session.query( models.SessionPersistence).filter_by(pool_id=self.pool.id).first() self.assertIsNotNone(new_persistence.pool) self.assertIsInstance(new_persistence.pool, models.Pool) class ListenerModelTest(base.OctaviaDBTestBase, ModelTestMixin): def test_create(self): listener = self.create_listener(self.session) self.assertIsNotNone(listener.created_at) self.assertIsNone(listener.updated_at) def test_create_with_timeouts(self): timeouts = { 'timeout_client_data': 1, 'timeout_member_connect': 2, 'timeout_member_data': constants.MIN_TIMEOUT, 'timeout_tcp_inspect': constants.MAX_TIMEOUT, } listener = self.create_listener(self.session, **timeouts) for item in timeouts: self.assertEqual(timeouts[item], getattr(listener, item)) def test_update(self): listener = self.create_listener(self.session) self.assertIsNone(listener.updated_at) listener_id = listener.id listener.name = 'test1' new_listener = self.session.query( models.Listener).filter_by(id=listener_id).first() self.assertEqual('test1', new_listener.name) self.assertIsNotNone(new_listener.updated_at) def test_update_with_timeouts(self): listener = self.create_listener(self.session) listener_id = listener.id timeouts = { 'timeout_client_data': 1, 'timeout_member_connect': 2, 'timeout_member_data': 3, 'timeout_tcp_inspect': 4, } for item in timeouts: setattr(listener, item, timeouts[item]) new_listener = self.session.query( models.Listener).filter_by(id=listener_id).first() for item in timeouts: self.assertEqual(timeouts[item], getattr(new_listener, item)) def test_delete(self): listener = self.create_listener(self.session) listener_id = listener.id with self.session.begin(): self.session.delete(listener) self.session.flush() new_listener = self.session.query( models.Listener).filter_by(id=listener_id).first() self.assertIsNone(new_listener) def test_load_balancer_relationship(self): lb = self.create_load_balancer(self.session) listener = self.create_listener(self.session, load_balancer_id=lb.id) new_listener = self.session.query( models.Listener).filter_by(id=listener.id).first() self.assertIsNotNone(new_listener.load_balancer) self.assertIsInstance(new_listener.load_balancer, models.LoadBalancer) def test_default_pool_relationship(self): pool = self.create_pool(self.session) listener = self.create_listener(self.session, default_pool_id=pool.id) new_listener = self.session.query(models.Listener).filter_by( id=listener.id).first() self.assertIsNotNone(new_listener.default_pool) self.assertIsInstance(new_listener.default_pool, models.Pool) self.assertIsInstance(new_listener.pools, list) self.assertIn(pool.id, [p.id for p in new_listener.pools]) def test_sni_relationship(self): listener = self.create_listener(self.session) self.create_sni(self.session, listener_id=listener.id, tls_container_id=self.FAKE_UUID_1) self.create_sni(self.session, listener_id=listener.id, tls_container_id=self.FAKE_UUID_2) new_listener = self.session.query(models.Listener).filter_by( id=listener.id).first() self.assertIsNotNone(new_listener.sni_containers) self.assertEqual(2, len(new_listener.sni_containers)) def test_pools_list(self): pool = self.create_pool(self.session) listener = self.create_listener(self.session, default_pool_id=pool.id) new_listener = self.session.query(models.Listener).filter_by( id=listener.id).first() self.assertIsNotNone(new_listener.pools) self.assertIsInstance(new_listener.pools, list) self.assertIsInstance(new_listener.pools[0], models.Pool) class ListenerStatisticsModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(ListenerStatisticsModelTest, self).setUp() self.listener = self.create_listener(self.session) self.amphora = self.create_amphora(self.session) def test_create(self): self.create_listener_statistics(self.session, self.listener.id, self.amphora.id) def test_create_with_negative_int(self): overrides = {'bytes_in': -1} self.assertRaises(ValueError, self.create_listener_statistics, self.session, self.listener.id, self.amphora.id, **overrides) def test_update(self): stats = self.create_listener_statistics(self.session, self.listener.id, self.amphora.id) stats.name = 'test1' new_stats = self.session.query(models.ListenerStatistics).filter_by( listener_id=self.listener.id).first() self.assertEqual('test1', new_stats.name) def test_delete(self): stats = self.create_listener_statistics(self.session, self.listener.id, self.amphora.id) with self.session.begin(): self.session.delete(stats) self.session.flush() new_stats = self.session.query(models.ListenerStatistics).filter_by( listener_id=self.listener.id).first() self.assertIsNone(new_stats) class HealthMonitorModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(HealthMonitorModelTest, self).setUp() self.pool = self.create_pool(self.session) def test_create(self): self.create_health_monitor(self.session, self.pool.id) def test_update(self): health_monitor = self.create_health_monitor(self.session, self.pool.id) health_monitor.name = 'test1' new_health_monitor = self.session.query( models.HealthMonitor).filter_by( pool_id=health_monitor.pool_id).first() self.assertEqual('test1', new_health_monitor.name) def test_delete(self): health_monitor = self.create_health_monitor(self.session, self.pool.id) with self.session.begin(): self.session.delete(health_monitor) self.session.flush() new_health_monitor = self.session.query( models.HealthMonitor).filter_by( pool_id=health_monitor.pool_id).first() self.assertIsNone(new_health_monitor) def test_pool_relationship(self): health_monitor = self.create_health_monitor(self.session, self.pool.id) new_health_monitor = self.session.query( models.HealthMonitor).filter_by( pool_id=health_monitor.pool_id).first() self.assertIsNotNone(new_health_monitor.pool) self.assertIsInstance(new_health_monitor.pool, models.Pool) class LoadBalancerModelTest(base.OctaviaDBTestBase, ModelTestMixin): def test_create(self): load_balancer = self.create_load_balancer(self.session) self.assertIsNotNone(load_balancer.created_at) self.assertIsNone(load_balancer.updated_at) def test_update(self): load_balancer = self.create_load_balancer(self.session) self.assertIsNone(load_balancer.updated_at) lb_id = load_balancer.id load_balancer.enabled = False new_load_balancer = self.session.query( models.LoadBalancer).filter_by(id=lb_id).first() self.assertFalse(new_load_balancer.enabled) self.assertIsNotNone(new_load_balancer.updated_at) def test_delete(self): load_balancer = self.create_load_balancer(self.session) lb_id = load_balancer.id with self.session.begin(): self.session.delete(load_balancer) self.session.flush() new_load_balancer = self.session.query( models.LoadBalancer).filter_by(id=lb_id).first() self.assertIsNone(new_load_balancer) def test_listener_relationship(self): load_balancer = self.create_load_balancer(self.session) self.create_listener(self.session, load_balancer_id=load_balancer.id) new_load_balancer = self.session.query( models.LoadBalancer).filter_by(id=load_balancer.id).first() self.assertIsNotNone(new_load_balancer.listeners) self.assertEqual(1, len(new_load_balancer.listeners)) def test_load_balancer_amphora_relationship(self): load_balancer = self.create_load_balancer(self.session) amphora = self.create_amphora(self.session) self.associate_amphora(load_balancer, amphora) new_load_balancer = self.session.query( models.LoadBalancer).filter_by(id=load_balancer.id).first() self.assertIsNotNone(new_load_balancer.amphorae) self.assertEqual(1, len(new_load_balancer.amphorae)) def test_load_balancer_vip_relationship(self): load_balancer = self.create_load_balancer(self.session) self.create_vip(self.session, load_balancer.id) new_load_balancer = self.session.query( models.LoadBalancer).filter_by(id=load_balancer.id).first() self.assertIsNotNone(new_load_balancer.vip) self.assertIsInstance(new_load_balancer.vip, models.Vip) class VipModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(VipModelTest, self).setUp() self.load_balancer = self.create_load_balancer(self.session) def test_create(self): self.create_vip(self.session, self.load_balancer.id) def test_update(self): vip = self.create_vip(self.session, self.load_balancer.id) vip.ip_address = "10.0.0.1" new_vip = self.session.query(models.Vip).filter_by( load_balancer_id=self.load_balancer.id).first() self.assertEqual("10.0.0.1", new_vip.ip_address) def test_delete(self): vip = self.create_vip(self.session, self.load_balancer.id) with self.session.begin(): self.session.delete(vip) self.session.flush() new_vip = self.session.query(models.Vip).filter_by( load_balancer_id=vip.load_balancer_id).first() self.assertIsNone(new_vip) def test_vip_load_balancer_relationship(self): self.create_vip(self.session, self.load_balancer.id) new_vip = self.session.query(models.Vip).filter_by( load_balancer_id=self.load_balancer.id).first() self.assertIsNotNone(new_vip.load_balancer) self.assertIsInstance(new_vip.load_balancer, models.LoadBalancer) class SNIModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(SNIModelTest, self).setUp() self.listener = self.create_listener(self.session) def test_create(self): self.create_sni(self.session, listener_id=self.listener.id) def test_update(self): sni = self.create_sni(self.session, listener_id=self.listener.id) sni.tls_container_id = self.FAKE_UUID_2 new_sni = self.session.query( models.SNI).filter_by(listener_id=self.FAKE_UUID_1).first() self.assertEqual(self.FAKE_UUID_2, new_sni.tls_container_id) def test_delete(self): sni = self.create_sni(self.session, listener_id=self.listener.id) with self.session.begin(): self.session.delete(sni) self.session.flush() new_sni = self.session.query( models.SNI).filter_by(listener_id=self.listener.id).first() self.assertIsNone(new_sni) def test_sni_relationship(self): self.create_sni(self.session, listener_id=self.listener.id) new_sni = self.session.query(models.SNI).filter_by( listener_id=self.listener.id).first() self.assertIsNotNone(new_sni.listener) self.assertIsInstance(new_sni.listener, models.Listener) class AmphoraModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(AmphoraModelTest, self).setUp() self.load_balancer = self.create_load_balancer(self.session) def test_create(self): self.create_amphora(self.session) def test_update(self): amphora = self.create_amphora( self.session) amphora.amphora_id = self.FAKE_UUID_2 new_amphora = self.session.query(models.Amphora).filter_by( id=amphora.id).first() self.assertEqual(self.FAKE_UUID_2, new_amphora.amphora_id) def test_delete(self): amphora = self.create_amphora( self.session) with self.session.begin(): self.session.delete(amphora) self.session.flush() new_amphora = self.session.query( models.Amphora).filter_by(id=amphora.id).first() self.assertIsNone(new_amphora) def test_load_balancer_relationship(self): amphora = self.create_amphora(self.session) self.associate_amphora(self.load_balancer, amphora) new_amphora = self.session.query(models.Amphora).filter_by( id=amphora.id).first() self.assertIsNotNone(new_amphora.load_balancer) self.assertIsInstance(new_amphora.load_balancer, models.LoadBalancer) class AmphoraHealthModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(AmphoraHealthModelTest, self).setUp() self.amphora = self.create_amphora(self.session) def test_create(self): self.create_amphora_health(self.session) def test_update(self): amphora_health = self.create_amphora_health(self.session) d = datetime.date.today() newdate = d.replace(day=d.day) amphora_health.last_update = newdate new_amphora_health = self.session.query( models.AmphoraHealth).filter_by( amphora_id=amphora_health.amphora_id).first() self.assertEqual(newdate, new_amphora_health.last_update.date()) def test_delete(self): amphora_health = self.create_amphora_health( self.session) with self.session.begin(): self.session.delete(amphora_health) self.session.flush() new_amphora_health = self.session.query( models.AmphoraHealth).filter_by( amphora_id=amphora_health.amphora_id).first() self.assertIsNone(new_amphora_health) class L7PolicyModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(L7PolicyModelTest, self).setUp() self.listener = self.create_listener(self.session) def test_create(self): l7policy = self.create_l7policy(self.session, self.listener.id) self.assertIsInstance(l7policy, models.L7Policy) def test_update(self): l7policy = self.create_l7policy(self.session, self.listener.id) pool = self.create_pool(self.session) l7policy.action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL l7policy.redirect_pool_id = pool.id new_l7policy = self.session.query( models.L7Policy).filter_by(id=l7policy.id).first() self.assertEqual(pool.id, new_l7policy.redirect_pool_id) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, new_l7policy.action) def test_delete(self): l7policy = self.create_l7policy(self.session, self.listener.id) l7policy_id = l7policy.id with self.session.begin(): self.session.delete(l7policy) self.session.flush() new_l7policy = self.session.query( models.L7Policy).filter_by(id=l7policy_id).first() self.assertIsNone(new_l7policy) def test_l7rule_relationship(self): l7policy = self.create_l7policy(self.session, self.listener.id) self.create_l7rule( self.session, l7policy.id, id=self.FAKE_UUID_1, type=constants.L7RULE_TYPE_HOST_NAME, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, value='www.example.com') self.create_l7rule( self.session, l7policy.id, id=self.FAKE_UUID_2, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, value='/api') new_l7policy = self.session.query( models.L7Policy).filter_by(id=l7policy.id).first() self.assertIsNotNone(new_l7policy.l7rules) self.assertEqual(2, len(new_l7policy.l7rules)) self.assertIsInstance(new_l7policy.l7rules[0], models.L7Rule) self.assertIsInstance(new_l7policy.l7rules[1], models.L7Rule) def test_pool_relationship(self): l7policy = self.create_l7policy(self.session, self.listener.id) self.create_pool(self.session, id=self.FAKE_UUID_2) l7policy.action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL l7policy.redirect_pool_id = self.FAKE_UUID_2 new_l7policy = self.session.query( models.L7Policy).filter_by(id=l7policy.id).first() self.assertIsNotNone(new_l7policy.redirect_pool) self.assertIsInstance(new_l7policy.redirect_pool, models.Pool) def test_listener_relationship(self): l7policy = self.create_l7policy(self.session, self.listener.id, id=self.FAKE_UUID_1) self.create_l7policy(self.session, self.listener.id, id=self.FAKE_UUID_2, position=1) new_l7policy = self.session.query(models.L7Policy).filter_by( id=l7policy.id).first() self.assertIsNotNone(new_l7policy.listener) self.assertIsInstance(new_l7policy.listener, models.Listener) def test_listeners_pools_refs_with_l7policy_with_l7rule(self): pool = self.create_pool(self.session, id=self.FAKE_UUID_2) l7policy = self.create_l7policy( self.session, self.listener.id, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool.id) self.create_l7rule(self.session, l7policy.id, id=self.FAKE_UUID_1) new_pool = self.session.query(models.Pool).filter_by( id=pool.id).first() new_listener = self.session.query(models.Listener).filter_by( id=self.listener.id).first() self.assertIsInstance(new_pool.listeners, list) self.assertIn(new_listener.id, [li.id for li in new_pool.listeners]) self.assertIsInstance(new_listener.pools, list) self.assertIn(new_pool.id, [p.id for p in new_listener.pools]) def test_listeners_pools_refs_with_l7policy_without_l7rule(self): pool = self.create_pool(self.session, id=self.FAKE_UUID_2) self.create_l7policy( self.session, self.listener.id, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool.id) new_pool = self.session.query(models.Pool).filter_by( id=pool.id).first() new_listener = self.session.query(models.Listener).filter_by( id=self.listener.id).first() self.assertIsInstance(new_pool.listeners, list) self.assertNotIn(new_listener.id, [li.id for li in new_pool.listeners]) self.assertIsInstance(new_listener.pools, list) self.assertNotIn(new_pool.id, [p.id for p in new_listener.pools]) def test_listeners_pools_refs_with_disabled_l7policy(self): pool = self.create_pool(self.session, id=self.FAKE_UUID_2) l7policy = self.create_l7policy( self.session, self.listener.id, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool.id, enabled=False) self.create_l7rule(self.session, l7policy.id, id=self.FAKE_UUID_1) new_pool = self.session.query(models.Pool).filter_by( id=pool.id).first() new_listener = self.session.query(models.Listener).filter_by( id=self.listener.id).first() self.assertIsInstance(new_pool.listeners, list) self.assertNotIn(new_listener.id, [li.id for li in new_pool.listeners]) self.assertIsInstance(new_listener.pools, list) self.assertNotIn(new_pool.id, [p.id for p in new_listener.pools]) class L7RuleModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(L7RuleModelTest, self).setUp() self.listener = self.create_listener(self.session) self.l7policy = self.create_l7policy(self.session, self.listener.id) def test_create(self): l7rule = self.create_l7rule(self.session, self.l7policy.id) self.assertIsInstance(l7rule, models.L7Rule) def test_update(self): l7rule = self.create_l7rule(self.session, self.l7policy.id) l7rule_id = l7rule.id l7rule.value = '/images' new_l7rule = self.session.query( models.L7Rule).filter_by(id=l7rule_id).first() self.assertEqual('/images', new_l7rule.value) def test_delete(self): l7rule = self.create_l7rule(self.session, self.l7policy.id) l7rule_id = l7rule.id with self.session.begin(): self.session.delete(l7rule) self.session.flush() new_l7rule = self.session.query( models.L7Rule).filter_by(id=l7rule_id).first() self.assertIsNone(new_l7rule) def test_l7policy_relationship(self): l7rule = self.create_l7rule( self.session, self.l7policy.id, id=self.FAKE_UUID_1, type=constants.L7RULE_TYPE_HOST_NAME, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, value='www.example.com') self.create_l7rule( self.session, self.l7policy.id, id=self.FAKE_UUID_2, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, value='/api') new_l7rule = self.session.query(models.L7Rule).filter_by( id=l7rule.id).first() self.assertIsNotNone(new_l7rule.l7policy) self.assertIsInstance(new_l7rule.l7policy, models.L7Policy) class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(TestDataModelConversionTest, self).setUp() self.lb = self.create_load_balancer(self.session) self.amphora = self.create_amphora(self.session) self.associate_amphora(self.lb, self.amphora) self.amphora_health = self.create_amphora_health(self.session) self.pool = self.create_pool(self.session, load_balancer_id=self.lb.id) self.hm = self.create_health_monitor(self.session, self.pool.id) self.member = self.create_member(self.session, self.pool.id, id=self.FAKE_UUID_1, ip_address='10.0.0.1') self.sp = self.create_session_persistence(self.session, self.pool.id) self.vip = self.create_vip(self.session, self.lb.id) self.listener = self.create_listener(self.session, default_pool_id=self.pool.id, load_balancer_id=self.lb.id) self.stats = self.create_listener_statistics(self.session, self.listener.id, self.amphora.id) self.sni = self.create_sni(self.session, listener_id=self.listener.id) self.l7policy = self.create_l7policy( self.session, listener_id=self.listener.id, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=self.pool.id) self.l7rule = self.create_l7rule(self.session, l7policy_id=self.l7policy.id) self.listener_cidr = self.create_listener_cidr( self.session, listener_id=self.listener.id, cidr='10.0.1.0/24') @staticmethod def _get_unique_key(obj): """Returns a unique key for passed object for data model building.""" # First handle all objects with their own ID, then handle subordinate # objects. if obj.__class__.__name__ in ['Member', 'Pool', 'LoadBalancer', 'Listener', 'Amphora', 'L7Policy', 'L7Rule']: return obj.__class__.__name__ + obj.id elif obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']: return obj.__class__.__name__ + obj.pool_id elif obj.__class__.__name__ in ['ListenerStatistics']: return obj.__class__.__name__ + obj.listener_id + obj.amphora_id elif obj.__class__.__name__ in ['ListenerCidr']: return obj.__class__.__name__ + obj.listener_id + obj.cidr elif obj.__class__.__name__ in ['VRRPGroup', 'Vip']: return obj.__class__.__name__ + obj.load_balancer_id elif obj.__class__.__name__ in ['AmphoraHealth']: return obj.__class__.__name__ + obj.amphora_id elif obj.__class__.__name__ in ['SNI']: return (obj.__class__.__name__ + obj.listener_id + obj.tls_container_id) else: raise NotImplementedError def count_graph_nodes(self, node, _graph_nodes=None): """Counts connected BaseDataModel nodes in a graph given the starting node. Node should be a data model in any case. """ _graph_nodes = _graph_nodes or [] total = 0 mykey = self._get_unique_key(node) if mykey in _graph_nodes: # Seen this node already return total else: total += 1 _graph_nodes.append(mykey) attr_names = [attr_name for attr_name in dir(node) if not attr_name.startswith('_')] for attr_name in attr_names: attr = getattr(node, attr_name) if isinstance(attr, data_models.BaseDataModel): total += self.count_graph_nodes( attr, _graph_nodes=_graph_nodes) elif isinstance(attr, (collections.InstrumentedList, list)): for item in attr: if isinstance(item, data_models.BaseDataModel): total += self.count_graph_nodes( item, _graph_nodes=_graph_nodes) return total def test_unique_key_generation(self): self.assertEqual(self._get_unique_key(self.lb), self.lb.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.amphora), self.amphora.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.amphora_health), self.amphora_health.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.pool), self.pool.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.hm), self.hm.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.member), self.member.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.sp), self.sp.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.vip), self.vip.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.listener), self.listener.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.stats), self.stats.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.sni), self.sni.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.l7policy), self.l7policy.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.l7rule), self.l7rule.to_data_model()._get_unique_key()) self.assertEqual(self._get_unique_key(self.listener_cidr), self.listener_cidr.to_data_model()._get_unique_key()) def test_graph_completeness(self): # Generate equivalent graphs starting arbitrarily from different # nodes within it; Make sure the resulting graphs all contain the # same number of nodes. lb_dm = self.session.query(models.LoadBalancer).filter_by( id=self.lb.id).first().to_data_model() lb_graph_count = self.count_graph_nodes(lb_dm) p_dm = self.session.query(models.Pool).filter_by( id=self.pool.id).first().to_data_model() p_graph_count = self.count_graph_nodes(p_dm) mem_dm = self.session.query(models.Member).filter_by( id=self.member.id).first().to_data_model() mem_graph_count = self.count_graph_nodes(mem_dm) self.assertNotEqual(0, lb_graph_count) self.assertNotEqual(1, lb_graph_count) self.assertEqual(lb_graph_count, p_graph_count) self.assertEqual(lb_graph_count, mem_graph_count) def test_data_model_graph_traversal(self): lb_dm = self.session.query(models.LoadBalancer).filter_by( id=self.lb.id).first().to_data_model() # This is an arbitrary traversal that covers one of each type # of parent an child relationship. lb_id = (lb_dm.listeners[0].default_pool.members[0].pool. session_persistence.pool.health_monitor.pool.listeners[0]. sni_containers[0].listener.load_balancer. listeners[0].load_balancer.pools[0].listeners[0]. load_balancer.listeners[0].pools[0].load_balancer.vip. load_balancer.id) self.assertEqual(lb_dm.id, lb_id) mem_dm = self.session.query(models.Member).filter_by( id=self.member.id).first().to_data_model() # Same as the above, but we generate the graph starting with an # arbitrary member. m_lb_id = (mem_dm.pool.listeners[0].load_balancer.vip.load_balancer. pools[0].session_persistence.pool.health_monitor.pool. listeners[0].sni_containers[0].listener. load_balancer.pools[0].members[0].pool.load_balancer.id) self.assertEqual(lb_dm.id, m_lb_id) def test_update_data_model_listener_default_pool_id(self): lb_dm = self.create_load_balancer( self.session, id=uuidutils.generate_uuid()).to_data_model() pool1_dm = self.create_pool( self.session, id=uuidutils.generate_uuid(), load_balancer_id=lb_dm.id).to_data_model() pool2_dm = self.create_pool( self.session, id=uuidutils.generate_uuid(), load_balancer_id=lb_dm.id).to_data_model() listener_dm = self.create_listener( self.session, id=uuidutils.generate_uuid(), load_balancer_id=lb_dm.id, default_pool_id=pool1_dm.id).to_data_model() self.assertEqual(pool1_dm.id, listener_dm.default_pool.id) listener_dm.update({'default_pool_id': pool2_dm.id}) self.assertEqual(listener_dm.default_pool.id, pool2_dm.id) def test_load_balancer_tree(self): lb_db = self.session.query(models.LoadBalancer).filter_by( id=self.lb.id).first() self.check_load_balancer(lb_db.to_data_model()) def test_vip_tree(self): vip_db = self.session.query(models.Vip).filter_by( load_balancer_id=self.lb.id).first() self.check_vip(vip_db.to_data_model()) def test_listener_tree(self): listener_db = self.session.query(models.Listener).filter_by( id=self.listener.id).first() self.check_listener(listener_db.to_data_model()) def test_sni_tree(self): sni_db = self.session.query(models.SNI).filter_by( listener_id=self.listener.id).first() self.check_sni(sni_db.to_data_model()) def test_listener_statistics_tree(self): stats_db = self.session.query(models.ListenerStatistics).filter_by( listener_id=self.listener.id).first() self.check_listener_statistics(stats_db.to_data_model()) def test_pool_tree(self): pool_db = self.session.query(models.Pool).filter_by( id=self.pool.id).first() self.check_pool(pool_db.to_data_model()) def test_session_persistence_tree(self): sp_db = self.session.query(models.SessionPersistence).filter_by( pool_id=self.pool.id).first() self.check_session_persistence(sp_db.to_data_model()) def test_health_monitor_tree(self): hm_db = self.session.query(models.HealthMonitor).filter_by( pool_id=self.hm.pool_id).first() self.check_health_monitor(hm_db.to_data_model()) def test_member_tree(self): member_db = self.session.query(models.Member).filter_by( id=self.member.id).first() self.check_member(member_db.to_data_model()) def test_l7policy_tree(self): l7policy_db = self.session.query(models.L7Policy).filter_by( id=self.l7policy.id).first() self.check_l7policy(l7policy_db.to_data_model()) def test_l7rule_tree(self): l7rule_db = self.session.query(models.L7Rule).filter_by( id=self.l7rule.id).first() self.check_l7rule(l7rule_db.to_data_model()) def check_load_balancer(self, lb, check_listeners=True, check_amphorae=True, check_vip=True, check_pools=True): self.assertIsInstance(lb, data_models.LoadBalancer) self.check_load_balancer_data_model(lb) self.assertIsInstance(lb.listeners, list) self.assertIsInstance(lb.amphorae, list) if check_listeners: for listener in lb.listeners: self.check_listener(listener, check_lb=False, check_pools=check_pools) if check_amphorae: for amphora in lb.amphorae: self.check_amphora(amphora, check_load_balancer=False) if check_vip: self.check_vip(lb.vip, check_lb=False) if check_pools: for pool in lb.pools: self.check_pool(pool, check_lb=False, check_listeners=check_listeners) def check_vip(self, vip, check_lb=True): self.assertIsInstance(vip, data_models.Vip) self.check_vip_data_model(vip) if check_lb: self.check_load_balancer(vip.load_balancer, check_vip=False) def check_sni(self, sni, check_listener=True): self.assertIsInstance(sni, data_models.SNI) self.check_sni_data_model(sni) if check_listener: self.check_listener(sni.listener, check_sni=False) def check_listener_statistics(self, stats, check_listener=True): self.assertIsInstance(stats, data_models.ListenerStatistics) self.check_listener_statistics_data_model(stats) if check_listener: listener_db = (self.session.query(models.Listener) .filter_by(id=stats.listener_id).first()) self.check_listener(listener_db.to_data_model()) def check_amphora(self, amphora, check_load_balancer=True): self.assertIsInstance(amphora, data_models.Amphora) self.check_amphora_data_model(amphora) if check_load_balancer: self.check_load_balancer(amphora.load_balancer) def check_listener(self, listener, check_sni=True, check_pools=True, check_lb=True, check_l7policies=True): self.assertIsInstance(listener, data_models.Listener) self.check_listener_data_model(listener) if check_lb: self.check_load_balancer(listener.load_balancer, check_listeners=False, check_pools=check_pools) if check_sni: c_containers = listener.sni_containers self.assertIsInstance(c_containers, list) for sni in c_containers: self.check_sni(sni, check_listener=False) if check_pools: for pool in listener.pools: self.check_pool(pool, check_listeners=False, check_lb=check_lb) if check_l7policies: c_l7policies = listener.l7policies self.assertIsInstance(c_l7policies, list) for policy in c_l7policies: self.check_l7policy(policy, check_listener=False, check_pool=check_pools, check_lb=check_lb) def check_session_persistence(self, session_persistence, check_pool=True): self.assertIsInstance(session_persistence, data_models.SessionPersistence) self.check_session_persistence_data_model(session_persistence) if check_pool: self.check_pool(session_persistence.pool, check_sp=False) def check_member(self, member, check_pool=True): self.assertIsInstance(member, data_models.Member) self.check_member_data_model(member) if check_pool: self.check_pool(member.pool, check_members=False) def check_l7policy(self, l7policy, check_listener=True, check_pool=True, check_l7rules=True, check_lb=True): self.assertIsInstance(l7policy, data_models.L7Policy) self.check_l7policy_data_model(l7policy) if check_listener: self.check_listener(l7policy.listener, check_l7policies=False, check_pools=check_pool, check_lb=check_lb) if check_l7rules: c_l7rules = l7policy.l7rules self.assertIsInstance(c_l7rules, list) for rule in c_l7rules: self.check_l7rule(rule, check_l7policy=False) if check_pool and l7policy.redirect_pool is not None: self.assertEqual(l7policy.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.check_pool(l7policy.redirect_pool, check_listeners=check_listener, check_l7policies=False, check_lb=check_lb) def check_l7rule(self, l7rule, check_l7policy=True): self.assertIsInstance(l7rule, data_models.L7Rule) self.check_l7rule_data_model(l7rule) if check_l7policy: self.check_l7policy(l7rule.l7policy) def check_health_monitor(self, health_monitor, check_pool=True): self.assertIsInstance(health_monitor, data_models.HealthMonitor) self.check_health_monitor_data_model(health_monitor) if check_pool: self.check_pool(health_monitor.pool, check_hm=False) def check_pool(self, pool, check_listeners=True, check_sp=True, check_hm=True, check_members=True, check_l7policies=True, check_lb=True): self.assertIsInstance(pool, data_models.Pool) self.check_pool_data_model(pool) if check_listeners: for listener in pool.listeners: self.check_listener(listener, check_pools=False, check_lb=check_lb) if check_sp: self.check_session_persistence(pool.session_persistence, check_pool=False) if check_members: c_members = pool.members self.assertIsNotNone(c_members) self.assertEqual(1, len(c_members)) for c_member in c_members: self.check_member(c_member, check_pool=False) if check_hm: self.check_health_monitor(pool.health_monitor, check_pool=False) if check_lb: self.check_load_balancer(pool.load_balancer, check_pools=False, check_listeners=check_listeners) if check_l7policies: c_l7policies = pool.l7policies self.assertIsInstance(c_l7policies, list) for policy in c_l7policies: self.check_l7policy(policy, check_pool=False, check_listener=check_listeners, check_lb=check_lb) def check_load_balancer_data_model(self, lb): self.assertEqual(self.FAKE_UUID_1, lb.project_id) self.assertEqual(self.FAKE_UUID_1, lb.id) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertTrue(lb.enabled) def check_vip_data_model(self, vip): self.assertEqual(self.FAKE_UUID_1, vip.load_balancer_id) def check_listener_data_model(self, listener): self.assertEqual(self.FAKE_UUID_1, listener.project_id) self.assertEqual(self.FAKE_UUID_1, listener.id) self.assertEqual(constants.PROTOCOL_HTTP, listener.protocol) self.assertEqual(80, listener.protocol_port) self.assertEqual(constants.ACTIVE, listener.provisioning_status) self.assertEqual(constants.ONLINE, listener.operating_status) self.assertTrue(listener.enabled) def check_sni_data_model(self, sni): self.assertEqual(self.FAKE_UUID_1, sni.listener_id) self.assertEqual(self.FAKE_UUID_1, sni.tls_container_id) def check_listener_statistics_data_model(self, stats): self.assertEqual(self.listener.id, stats.listener_id) self.assertEqual(0, stats.bytes_in) self.assertEqual(0, stats.bytes_out) self.assertEqual(0, stats.active_connections) self.assertEqual(0, stats.total_connections) def check_pool_data_model(self, pool): self.assertEqual(self.FAKE_UUID_1, pool.project_id) self.assertEqual(self.FAKE_UUID_1, pool.id) self.assertEqual(constants.PROTOCOL_HTTP, pool.protocol) self.assertEqual(constants.LB_ALGORITHM_LEAST_CONNECTIONS, pool.lb_algorithm) self.assertEqual(constants.ONLINE, pool.operating_status) self.assertTrue(pool.enabled) def check_session_persistence_data_model(self, sp): self.assertEqual(self.pool.id, sp.pool_id) self.assertEqual(constants.SESSION_PERSISTENCE_HTTP_COOKIE, sp.type) def check_health_monitor_data_model(self, hm): self.assertEqual(constants.HEALTH_MONITOR_HTTP, hm.type) self.assertEqual(1, hm.delay) self.assertEqual(1, hm.timeout) self.assertEqual(1, hm.fall_threshold) self.assertEqual(1, hm.rise_threshold) self.assertTrue(hm.enabled) def check_member_data_model(self, member): self.assertEqual(self.FAKE_UUID_1, member.project_id) self.assertEqual(self.FAKE_UUID_1, member.id) self.assertEqual(self.pool.id, member.pool_id) self.assertEqual('10.0.0.1', member.ip_address) self.assertEqual(80, member.protocol_port) self.assertEqual(constants.ONLINE, member.operating_status) self.assertTrue(member.enabled) def check_l7policy_data_model(self, l7policy): self.assertEqual(self.FAKE_UUID_1, l7policy.id) self.assertEqual(self.listener.id, l7policy.listener_id) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, l7policy.action) self.assertEqual(self.pool.id, l7policy.redirect_pool_id) self.assertEqual(1, l7policy.position) def check_l7rule_data_model(self, l7rule): self.assertEqual(self.FAKE_UUID_1, l7rule.id) self.assertEqual(self.l7policy.id, l7rule.l7policy_id) self.assertEqual(constants.L7RULE_TYPE_PATH, l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, l7rule.compare_type) self.assertEqual('/api', l7rule.value) self.assertFalse(l7rule.invert) def check_amphora_data_model(self, amphora): self.assertEqual(self.FAKE_UUID_1, amphora.id) self.assertEqual(self.FAKE_UUID_1, amphora.compute_id) self.assertEqual(constants.ACTIVE, amphora.status) def check_load_balancer_amphora_data_model(self, amphora): self.assertEqual(self.FAKE_UUID_1, amphora.amphora_id) self.assertEqual(self.FAKE_UUID_1, amphora.load_balancer_id) class TestDataModelManipulations(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(TestDataModelManipulations, self).setUp() self.lb = self.create_load_balancer(self.session) self.amphora = self.create_amphora(self.session) self.associate_amphora(self.lb, self.amphora) # This pool will be the listener's default_pool and be referenced # by self.l7policy self.pool = self.create_pool(self.session, load_balancer_id=self.lb.id) self.hm = self.create_health_monitor(self.session, self.pool.id) self.member = self.create_member(self.session, self.pool.id, id=self.FAKE_UUID_1, ip_address='10.0.0.1') self.sp = self.create_session_persistence(self.session, self.pool.id) self.vip = self.create_vip(self.session, self.lb.id) self.listener = self.create_listener(self.session, default_pool_id=self.pool.id, load_balancer_id=self.lb.id) self.stats = self.create_listener_statistics(self.session, self.listener.id, self.amphora.id) self.sni = self.create_sni(self.session, listener_id=self.listener.id) self.l7policy = self.create_l7policy( self.session, listener_id=self.listener.id, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=self.pool.id) self.l7rule = self.create_l7rule(self.session, l7policy_id=self.l7policy.id) # This pool, l7policy and l7rule are connected to the listener, # but are not the default_pool self.pool2 = self.create_pool( self.session, load_balancer_id=self.lb.id, id=uuidutils.generate_uuid()) self.l7policy2 = self.create_l7policy( self.session, listener_id=self.listener.id, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=self.pool2.id, id=uuidutils.generate_uuid(), position=2) self.l7rule2 = self.create_l7rule( self.session, l7policy_id=self.l7policy2.id, id=uuidutils.generate_uuid()) # This pool is not connected to the listener at all self.pool3 = self.create_pool( self.session, load_balancer_id=self.lb.id, id=uuidutils.generate_uuid()) def test_dm_simple_update(self): lb = self.lb.to_data_model() self.assertIsNone(lb.name) lb.update({'name': 'test_name_change'}) self.assertEqual(lb.name, 'test_name_change') def test_dm_session_persistence_delete(self): sp = self.sp.to_data_model() pool = sp.pool sp.delete() self.assertIsNone(pool.session_persistence) def test_dm_health_monitor_delete(self): hm = self.hm.to_data_model() pool = hm.pool hm.delete() self.assertIsNone(pool.health_monitor) def test_dm_pool_simple_update(self): pool = self.pool.to_data_model() self.assertIsNone(pool.name) pool.update({'name': 'new_pool_name'}) self.assertEqual(pool.name, 'new_pool_name') def test_dm_pool_session_persistence_update(self): pool = self.pool.to_data_model() self.assertEqual(pool.session_persistence.cookie_name, 'cookie_name') sp_dict = {'cookie_name': 'new_name'} pool.update({'session_persistence': sp_dict}) self.assertEqual(pool.session_persistence.cookie_name, 'new_name') def test_dm_pool_session_persistence_delete(self): pool = self.pool.to_data_model() self.assertEqual(pool.session_persistence.cookie_name, 'cookie_name') sp_dict = {} pool.update({'session_persistence': sp_dict}) self.assertIsNone(pool.session_persistence) def test_dm_pool_session_persistence_create(self): pool = self.pool.to_data_model() pool.update({'session_persistence': {}}) self.assertIsNone(pool.session_persistence) sp_dict = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'cookie_name'} pool.update({'session_persistence': sp_dict}) self.assertEqual(pool.session_persistence.type, constants.SESSION_PERSISTENCE_HTTP_COOKIE) self.assertEqual(pool.session_persistence.pool_id, pool.id) def test_dm_pool_delete(self): pool = self.pool.to_data_model() listener = pool.listeners[0] lb = pool.load_balancer l7policy = pool.l7policies[0] self.assertIn(pool, listener.pools) self.assertIn(pool, lb.pools) self.assertEqual(pool.id, l7policy.redirect_pool_id) pool.delete() self.assertNotIn(pool, listener.pools) self.assertIsNone(listener.default_pool) self.assertIsNone(listener.default_pool_id) self.assertNotIn(pool, lb.pools) self.assertEqual(l7policy.action, constants.L7POLICY_ACTION_REJECT) self.assertIsNone(l7policy.redirect_pool_id) self.assertIsNone(l7policy.redirect_pool) def test_dm_member_delete(self): member = self.member.to_data_model() pool = member.pool self.assertIn(member, pool.members) member.delete() self.assertNotIn(member, pool.members) def test_dm_listener_update_and_clear_default_pool(self): listener = self.listener.to_data_model() new_pool = listener._find_in_graph('Pool' + self.pool3.id) self.assertNotEqual(new_pool.id, listener.default_pool_id) self.assertNotIn(listener, new_pool.listeners) self.assertNotIn(new_pool, listener.pools) listener.update({'default_pool_id': new_pool.id}) self.assertEqual(new_pool.id, listener.default_pool_id) self.assertIn(listener, new_pool.listeners) self.assertIn(new_pool, listener.pools) listener.update({'default_pool_id': None}) self.assertIsNone(listener.default_pool_id) self.assertIsNone(listener.default_pool) self.assertNotIn(listener, new_pool.listeners) self.assertNotIn(new_pool, listener.pools) def test_dm_listener_update_clear_default_pool_with_l7p_referral(self): listener = self.listener.to_data_model() pool = listener.default_pool self.assertEqual(pool.id, listener.default_pool_id) self.assertIn(listener, pool.listeners) self.assertIn(pool, listener.pools) listener.update({'default_pool_id': None}) self.assertIsNone(listener.default_pool_id) self.assertIsNone(listener.default_pool) self.assertIn(listener, pool.listeners) self.assertIn(pool, listener.pools) def test_dm_listener_delete(self): listener = self.listener.to_data_model() lb = listener.load_balancer pools = listener.pools self.assertIn(listener, lb.listeners) for pool in pools: self.assertIn(listener, pool.listeners) listener.delete() self.assertNotIn(listener, lb.listeners) for pool in pools: self.assertNotIn(listener, pool.listeners) def test_dm_amphora_delete(self): amphora = self.amphora.to_data_model() lb = amphora.load_balancer self.assertIn(amphora, lb.amphorae) amphora.delete() self.assertNotIn(amphora, lb.amphorae) def test_dm_l7rule_delete(self): l7r = self.l7rule2.to_data_model() l7p = l7r.l7policy listener = l7p.listener pool2 = l7p.redirect_pool self.assertIn(pool2, listener.pools) self.assertNotEqual(pool2.id, listener.default_pool_id) self.assertIn(l7r, l7p.l7rules) self.assertEqual(1, len(l7p.l7rules)) l7r.delete() self.assertNotIn(l7r, l7p.l7rules) self.assertNotIn(pool2, listener.pools) def test_dm_l7policy_delete_with_listener_default_pool_ref(self): l7p = self.l7policy.to_data_model() listener = l7p.listener pool = l7p.redirect_pool self.assertIn(pool, listener.pools) self.assertEqual(pool.id, listener.default_pool_id) self.assertIn(l7p, listener.l7policies) self.assertIn(l7p, pool.l7policies) l7p.delete() self.assertIn(pool, listener.pools) self.assertEqual(pool.id, listener.default_pool_id) self.assertNotIn(l7p, listener.l7policies) self.assertNotIn(l7p, pool.l7policies) def test_dm_l7policy_delete_not_listener_default_pool(self): l7p = self.l7policy2.to_data_model() listener = l7p.listener pool2 = l7p.redirect_pool self.assertIn(pool2, listener.pools) self.assertNotEqual(pool2.id, listener.default_pool_id) self.assertIn(l7p, listener.l7policies) self.assertIn(l7p, pool2.l7policies) l7p.delete() self.assertNotIn(pool2, listener.pools) self.assertNotIn(l7p, listener.l7policies) self.assertNotIn(l7p, pool2.l7policies) def test_dm_l7policy_update_simple(self): l7p = self.l7policy.to_data_model() self.assertIsNone(l7p.name) l7p.update({'name': 'new_name'}) self.assertEqual(l7p.name, 'new_name') def test_dm_l7policy_update_action_rdr_url_no_default_pool_link(self): l7p = self.l7policy2.to_data_model() listener = l7p.listener pool2 = l7p.redirect_pool self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIn(pool2, listener.pools) self.assertIn(listener, pool2.listeners) self.assertIsNone(l7p.redirect_url) update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com/'} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_URL) self.assertEqual(l7p.redirect_url, 'http://www.example.com/') self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertNotIn(pool2, listener.pools) self.assertNotIn(listener, pool2.listeners) def test_dm_l7policy_update_action_rdr_url_with_default_pool_link(self): l7p = self.l7policy.to_data_model() listener = l7p.listener pool = l7p.redirect_pool self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIn(pool, listener.pools) self.assertIn(listener, pool.listeners) self.assertIsNone(l7p.redirect_url) update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://www.example.com/'} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_URL) self.assertEqual(l7p.redirect_url, 'http://www.example.com/') self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertIn(pool, listener.pools) self.assertIn(listener, pool.listeners) def test_dm_l7policy_update_action_reject_no_default_pool_link(self): l7p = self.l7policy2.to_data_model() listener = l7p.listener pool2 = l7p.redirect_pool self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIn(pool2, listener.pools) self.assertIn(listener, pool2.listeners) self.assertIsNone(l7p.redirect_url) update_dict = {'action': constants.L7POLICY_ACTION_REJECT} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) self.assertIsNone(l7p.redirect_url) self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertNotIn(pool2, listener.pools) self.assertNotIn(listener, pool2.listeners) def test_dm_l7policy_update_action_reject_with_default_pool_link(self): l7p = self.l7policy.to_data_model() listener = l7p.listener pool = l7p.redirect_pool self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIn(pool, listener.pools) self.assertIn(listener, pool.listeners) self.assertIsNone(l7p.redirect_url) update_dict = {'action': constants.L7POLICY_ACTION_REJECT} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) self.assertIsNone(l7p.redirect_url) self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertIn(pool, listener.pools) self.assertIn(listener, pool.listeners) def test_dm_l7policy_update_position(self): l7p = self.l7policy.to_data_model() listener = l7p.listener self.assertEqual(l7p, listener.l7policies[l7p.position - 1]) update_dict = {'position': 1} l7p.update(update_dict) self.assertEqual(l7p, listener.l7policies[0]) update_dict = {'position': 2} l7p.update(update_dict) self.assertEqual(l7p, listener.l7policies[1]) def test_dm_l7policy_update_reject_to_rdr_pool(self): l7p = self.l7policy.to_data_model() listener = l7p.listener new_pool = listener._find_in_graph('Pool' + self.pool3.id) update_dict = {'action': constants.L7POLICY_ACTION_REJECT} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) self.assertIsNone(l7p.redirect_url) self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertNotIn(new_pool, listener.pools) self.assertNotIn(listener, new_pool.listeners) update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': new_pool.id} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIsNone(l7p.redirect_url) self.assertEqual(l7p.redirect_pool_id, new_pool.id) self.assertEqual(l7p.redirect_pool, new_pool) self.assertIn(new_pool, listener.pools) self.assertIn(listener, new_pool.listeners) def test_dm_l7policy_update_reject_to_rdr_pool_with_no_l7rules(self): l7p = self.l7policy.to_data_model() listener = l7p.listener new_pool = listener._find_in_graph('Pool' + self.pool3.id) update_dict = {'action': constants.L7POLICY_ACTION_REJECT} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) self.assertIsNone(l7p.redirect_url) self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertNotIn(new_pool, listener.pools) self.assertNotIn(listener, new_pool.listeners) l7p.l7rules[0].delete() update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': new_pool.id} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIsNone(l7p.redirect_url) self.assertEqual(l7p.redirect_pool_id, new_pool.id) self.assertEqual(l7p.redirect_pool, new_pool) self.assertNotIn(new_pool, listener.pools) self.assertNotIn(listener, new_pool.listeners) def test_dm_l7policy_update_reject_to_rdr_pool_with_disabled_policy(self): l7p = self.l7policy.to_data_model() listener = l7p.listener new_pool = listener._find_in_graph('Pool' + self.pool3.id) update_dict = {'action': constants.L7POLICY_ACTION_REJECT} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) self.assertIsNone(l7p.redirect_url) self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertNotIn(new_pool, listener.pools) self.assertNotIn(listener, new_pool.listeners) update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': new_pool.id, 'enabled': False} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIsNone(l7p.redirect_url) self.assertEqual(l7p.redirect_pool_id, new_pool.id) self.assertEqual(l7p.redirect_pool, new_pool) self.assertNotIn(new_pool, listener.pools) self.assertNotIn(listener, new_pool.listeners) def test_dm_l7policy_update_enable_and_disable(self): l7p = self.l7policy2.to_data_model() listener = l7p.listener self.assertIn(l7p.redirect_pool, listener.pools) update_dict = {'enabled': False} l7p.update(update_dict) self.assertNotIn(l7p.redirect_pool, listener.pools) update_dict = {'enabled': True} l7p.update(update_dict) self.assertIn(l7p.redirect_pool, listener.pools) def test_dm_l7policy_update_disable_with_default_pool_link(self): l7p = self.l7policy.to_data_model() listener = l7p.listener self.assertIn(l7p.redirect_pool, listener.pools) update_dict = {'enabled': False} l7p.update(update_dict) self.assertIn(l7p.redirect_pool, listener.pools) def test_dm_l7policy_update_enable_with_reject_to_rdr_pool(self): l7p = self.l7policy.to_data_model() listener = l7p.listener new_pool = listener._find_in_graph('Pool' + self.pool3.id) update_dict = {'action': constants.L7POLICY_ACTION_REJECT, 'enabled': False} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) self.assertIsNone(l7p.redirect_url) self.assertIsNone(l7p.redirect_pool_id) self.assertIsNone(l7p.redirect_pool) self.assertNotIn(new_pool, listener.pools) self.assertNotIn(listener, new_pool.listeners) update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': new_pool.id, 'enabled': True} l7p.update(update_dict) self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REDIRECT_TO_POOL) self.assertIsNone(l7p.redirect_url) self.assertEqual(l7p.redirect_pool_id, new_pool.id) self.assertEqual(l7p.redirect_pool, new_pool) self.assertIn(new_pool, listener.pools) self.assertIn(listener, new_pool.listeners) class FlavorModelTest(base.OctaviaDBTestBase, ModelTestMixin): def setUp(self): super(FlavorModelTest, self).setUp() self.profile = self.create_flavor_profile(self.session) def test_create(self): flavor = self.create_flavor(self.session, self.profile.id) self.assertIsNotNone(flavor.id) def test_delete(self): flavor = self.create_flavor(self.session, self.profile.id) self.assertIsNotNone(flavor.id) id = flavor.id with self.session.begin(): self.session.delete(flavor) self.session.flush() new_flavor = self.session.query( models.Flavor).filter_by(id=id).first() self.assertIsNone(new_flavor) class FlavorProfileModelTest(base.OctaviaDBTestBase, ModelTestMixin): def test_create(self): fp = self.create_flavor_profile(self.session) self.assertIsNotNone(fp.id) def test_delete(self): fp = self.create_flavor_profile(self.session) self.assertIsNotNone(fp.id) id = fp.id with self.session.begin(): self.session.delete(fp) self.session.flush() new_fp = self.session.query( models.FlavorProfile).filter_by(id=id).first() self.assertIsNone(new_fp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/functional/db/test_repositories.py0000664000175000017500000070516300000000000024665 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import random from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_db import exception as db_exception from oslo_utils import uuidutils from sqlalchemy.orm import defer from sqlalchemy.orm import exc as sa_exception from octavia.common import constants from octavia.common import data_models as models from octavia.common import exceptions from octavia.db import api as db_api from octavia.db import models as db_models from octavia.db import repositories as repo from octavia.tests.functional.db import base CONF = cfg.CONF class BaseRepositoryTest(base.OctaviaDBTestBase): FAKE_IP = "192.0.2.1" FAKE_UUID_1 = uuidutils.generate_uuid() FAKE_UUID_2 = uuidutils.generate_uuid() FAKE_UUID_3 = uuidutils.generate_uuid() FAKE_UUID_4 = uuidutils.generate_uuid() FAKE_UUID_5 = uuidutils.generate_uuid() FAKE_UUID_6 = uuidutils.generate_uuid() FAKE_UUID_7 = uuidutils.generate_uuid() FAKE_EXP_AGE = 10 def setUp(self): super(BaseRepositoryTest, self).setUp() self.pool_repo = repo.PoolRepository() self.member_repo = repo.MemberRepository() self.lb_repo = repo.LoadBalancerRepository() self.vip_repo = repo.VipRepository() self.listener_repo = repo.ListenerRepository() self.listener_stats_repo = repo.ListenerStatisticsRepository() self.sp_repo = repo.SessionPersistenceRepository() self.hm_repo = repo.HealthMonitorRepository() self.sni_repo = repo.SNIRepository() self.amphora_repo = repo.AmphoraRepository() self.amphora_health_repo = repo.AmphoraHealthRepository() self.vrrp_group_repo = repo.VRRPGroupRepository() self.l7policy_repo = repo.L7PolicyRepository() self.l7rule_repo = repo.L7RuleRepository() self.quota_repo = repo.QuotasRepository() self.flavor_repo = repo.FlavorRepository() self.flavor_profile_repo = repo.FlavorProfileRepository() def test_get_all_return_value(self): pool_list, _ = self.pool_repo.get_all(self.session, project_id=self.FAKE_UUID_2) self.assertIsInstance(pool_list, list) lb_list, _ = self.lb_repo.get_all(self.session, project_id=self.FAKE_UUID_2) self.assertIsInstance(lb_list, list) listener_list, _ = self.listener_repo.get_all( self.session, project_id=self.FAKE_UUID_2) self.assertIsInstance(listener_list, list) member_list, _ = self.member_repo.get_all(self.session, project_id=self.FAKE_UUID_2) self.assertIsInstance(member_list, list) fp_list, _ = self.flavor_profile_repo.get_all( self.session, id=self.FAKE_UUID_2) self.assertIsInstance(fp_list, list) flavor_list, _ = self.flavor_repo.get_all( self.session, id=self.FAKE_UUID_2) self.assertIsInstance(flavor_list, list) class AllRepositoriesTest(base.OctaviaDBTestBase): FAKE_UUID_1 = uuidutils.generate_uuid() FAKE_UUID_2 = uuidutils.generate_uuid() FAKE_UUID_3 = uuidutils.generate_uuid() FAKE_IP = '192.0.2.44' def setUp(self): super(AllRepositoriesTest, self).setUp() self.repos = repo.Repositories() self.load_balancer = self.repos.load_balancer.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) self.listener = self.repos.listener.create( self.session, protocol=constants.PROTOCOL_HTTP, protocol_port=80, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, load_balancer_id=self.load_balancer.id) self.amphora = self.repos.amphora.create( self.session, id=uuidutils.generate_uuid(), load_balancer_id=self.load_balancer.id, compute_id=self.FAKE_UUID_3, status=constants.ACTIVE, vrrp_ip=self.FAKE_IP, lb_network_ip=self.FAKE_IP) def test_all_repos_has_correct_repos(self): repo_attr_names = ('load_balancer', 'vip', 'health_monitor', 'session_persistence', 'pool', 'member', 'listener', 'listener_stats', 'amphora', 'sni', 'amphorahealth', 'vrrpgroup', 'l7rule', 'l7policy', 'amp_build_slots', 'amp_build_req', 'quotas', 'flavor', 'flavor_profile', 'spares_pool', 'listener_cidr', 'availability_zone', 'availability_zone_profile') for repo_attr in repo_attr_names: single_repo = getattr(self.repos, repo_attr, None) message = ("Class Repositories should have %s instance" " variable.") % repo_attr self.assertIsNotNone(single_repo, message=message) message = (("instance variable, %(repo_name)s, of class " "Repositories should be an instance of %(base)s") % {'repo_name': repo_attr, 'base': repo.BaseRepository.__name__}) self.assertIsInstance(single_repo, repo.BaseRepository, msg=message) for attr in vars(self.repos): if attr.startswith('_') or attr in repo_attr_names: continue possible_repo = getattr(self.repos, attr, None) message = ('Class Repositories is not expected to have %s instance' ' variable as a repository.' % attr) self.assertNotIsInstance(possible_repo, repo.BaseRepository, msg=message) def test_create_load_balancer_and_vip(self): lb = {'name': 'test1', 'description': 'desc1', 'enabled': True, 'provisioning_status': constants.PENDING_UPDATE, 'operating_status': constants.OFFLINE, 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, 'vrrp_group': None, 'provider': 'amphora', 'server_group_id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid(), 'flavor_id': None, 'tags': ['test_tag']} vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), 'subnet_id': uuidutils.generate_uuid(), 'network_id': uuidutils.generate_uuid(), 'qos_policy_id': None, 'octavia_owned': True} lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip) lb_dm_dict = lb_dm.to_dict() del lb_dm_dict['vip'] del lb_dm_dict['listeners'] del lb_dm_dict['amphorae'] del lb_dm_dict['pools'] del lb_dm_dict['created_at'] del lb_dm_dict['updated_at'] self.assertIsNone(lb_dm_dict.pop('availability_zone')) self.assertEqual(lb, lb_dm_dict) vip_dm_dict = lb_dm.vip.to_dict() vip_dm_dict['load_balancer_id'] = lb_dm.id del vip_dm_dict['load_balancer'] self.assertEqual(vip, vip_dm_dict) def test_create_pool_on_listener_without_sp(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid(), 'provisioning_status': constants.ACTIVE, 'tags': ['test_tag'], 'tls_certificate_id': uuidutils.generate_uuid(), 'tls_enabled': False, 'tls_ciphers': None} pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) pool_dm_dict = pool_dm.to_dict() # These are not defiend in the sample pool dict but will # be in the live data. del pool_dm_dict['members'] del pool_dm_dict['health_monitor'] del pool_dm_dict['session_persistence'] del pool_dm_dict['listeners'] del pool_dm_dict['load_balancer'] del pool_dm_dict['load_balancer_id'] del pool_dm_dict['l7policies'] del pool_dm_dict['created_at'] del pool_dm_dict['updated_at'] del pool_dm_dict['ca_tls_certificate_id'] del pool_dm_dict['crl_container_id'] self.assertEqual(pool, pool_dm_dict) new_listener = self.repos.listener.get(self.session, id=self.listener.id) self.assertEqual(pool_dm.id, new_listener.default_pool_id) def test_create_pool_on_listener_with_sp(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid(), 'provisioning_status': constants.ACTIVE, 'tags': ['test_tag'], 'tls_certificate_id': uuidutils.generate_uuid(), 'tls_enabled': False, 'tls_ciphers': None} sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'cookie_monster', 'pool_id': pool['id'], 'persistence_granularity': None, 'persistence_timeout': None} pool.update({'session_persistence': sp}) pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) pool_dm_dict = pool_dm.to_dict() # These are not defiend in the sample pool dict but will # be in the live data. del pool_dm_dict['members'] del pool_dm_dict['health_monitor'] del pool_dm_dict['session_persistence'] del pool_dm_dict['listeners'] del pool_dm_dict['load_balancer'] del pool_dm_dict['load_balancer_id'] del pool_dm_dict['l7policies'] del pool_dm_dict['created_at'] del pool_dm_dict['updated_at'] del pool_dm_dict['ca_tls_certificate_id'] del pool_dm_dict['crl_container_id'] self.assertEqual(pool, pool_dm_dict) sp_dm_dict = pool_dm.session_persistence.to_dict() del sp_dm_dict['pool'] sp['pool_id'] = pool_dm.id self.assertEqual(sp, sp_dm_dict) new_listener = self.repos.listener.get(self.session, id=self.listener.id) self.assertEqual(pool_dm.id, new_listener.default_pool_id) new_sp = self.repos.session_persistence.get(self.session, pool_id=pool_dm.id) self.assertIsNotNone(new_sp) def test_update_pool_without_sp(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid(), 'provisioning_status': constants.ACTIVE, 'tags': ['test_tag'], 'tls_enabled': False, 'tls_ciphers': None} pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} new_pool_dm = self.repos.update_pool_and_sp( self.session, pool_dm.id, update_pool) pool_dm_dict = new_pool_dm.to_dict() # These are not defiend in the sample pool dict but will # be in the live data. del pool_dm_dict['members'] del pool_dm_dict['health_monitor'] del pool_dm_dict['session_persistence'] del pool_dm_dict['listeners'] del pool_dm_dict['load_balancer'] del pool_dm_dict['load_balancer_id'] del pool_dm_dict['l7policies'] del pool_dm_dict['created_at'] del pool_dm_dict['updated_at'] del pool_dm_dict['ca_tls_certificate_id'] del pool_dm_dict['crl_container_id'] pool.update(update_pool) pool['tls_certificate_id'] = None self.assertEqual(pool, pool_dm_dict) self.assertIsNone(new_pool_dm.session_persistence) def test_update_pool_with_existing_sp(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid(), 'provisioning_status': constants.ACTIVE, 'tags': ['test_tag'], 'tls_certificate_id': uuidutils.generate_uuid(), 'tls_enabled': False, 'tls_ciphers': None} sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'cookie_monster', 'pool_id': pool['id'], 'persistence_granularity': None, 'persistence_timeout': None} pool.update({'session_persistence': sp}) pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} update_sp = {'type': constants.SESSION_PERSISTENCE_SOURCE_IP} update_pool.update({'session_persistence': update_sp}) new_pool_dm = self.repos.update_pool_and_sp( self.session, pool_dm.id, update_pool) pool_dm_dict = new_pool_dm.to_dict() # These are not defiend in the sample pool dict but will # be in the live data. del pool_dm_dict['members'] del pool_dm_dict['health_monitor'] del pool_dm_dict['session_persistence'] del pool_dm_dict['listeners'] del pool_dm_dict['load_balancer'] del pool_dm_dict['load_balancer_id'] del pool_dm_dict['l7policies'] del pool_dm_dict['created_at'] del pool_dm_dict['updated_at'] del pool_dm_dict['ca_tls_certificate_id'] del pool_dm_dict['crl_container_id'] pool.update(update_pool) self.assertEqual(pool, pool_dm_dict) sp_dm_dict = new_pool_dm.session_persistence.to_dict() del sp_dm_dict['pool'] sp['pool_id'] = pool_dm.id sp.update(update_sp) self.assertEqual(sp, sp_dm_dict) def test_update_pool_with_nonexisting_sp(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.ACTIVE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid()} pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} update_sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'monster_cookie', 'persistence_granularity': None, 'persistence_timeout': None} update_pool.update({'session_persistence': update_sp}) new_pool_dm = self.repos.update_pool_and_sp( self.session, pool_dm.id, update_pool) sp_dm_dict = new_pool_dm.session_persistence.to_dict() del sp_dm_dict['pool'] update_sp['pool_id'] = pool_dm.id update_sp.update(update_sp) self.assertEqual(update_sp, sp_dm_dict) def test_update_pool_with_nonexisting_sp_delete_sp(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.ACTIVE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid()} pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool', 'session_persistence': None} new_pool_dm = self.repos.update_pool_and_sp( self.session, pool_dm.id, update_pool) self.assertIsNone(new_pool_dm.session_persistence) def test_update_pool_with_existing_sp_delete_sp(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_CREATE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid()} sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': 'cookie_monster', 'pool_id': pool['id']} pool.update({'session_persistence': sp}) pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool', 'session_persistence': {}} new_pool_dm = self.repos.update_pool_and_sp( self.session, pool_dm.id, update_pool) self.assertIsNone(new_pool_dm.session_persistence) def test_update_pool_with_cert(self): pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid(), 'provisioning_status': constants.ACTIVE, 'tls_enabled': False, 'tls_ciphers': None} pool_dm = self.repos.create_pool_on_load_balancer( self.session, pool, listener_id=self.listener.id) update_pool = {'tls_certificate_id': uuidutils.generate_uuid()} new_pool_dm = self.repos.update_pool_and_sp( self.session, pool_dm.id, update_pool) pool_dm_dict = new_pool_dm.to_dict() # These are not defiend in the sample pool dict but will # be in the live data. del pool_dm_dict['members'] del pool_dm_dict['health_monitor'] del pool_dm_dict['session_persistence'] del pool_dm_dict['listeners'] del pool_dm_dict['load_balancer'] del pool_dm_dict['load_balancer_id'] del pool_dm_dict['l7policies'] del pool_dm_dict['created_at'] del pool_dm_dict['updated_at'] del pool_dm_dict['tags'] del pool_dm_dict['ca_tls_certificate_id'] del pool_dm_dict['crl_container_id'] pool.update(update_pool) self.assertEqual(pool, pool_dm_dict) def test_create_load_balancer_tree(self): self.skipTest("SLQAlchemy/PySqlite transaction handling is broken. " "Version 1.3.16 of sqlachemy changes how sqlite3 " "transactions are handled and this test fails as " "The LB created early in this process now disappears " "from the transaction context.") project_id = uuidutils.generate_uuid() member = {'project_id': project_id, 'ip_address': '11.0.0.1', 'protocol_port': 80, 'enabled': True, 'backup': False, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_CREATE, 'id': uuidutils.generate_uuid()} health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'fall_threshold': 1, 'rise_threshold': 1, 'enabled': True, 'operating_status': constants.OFFLINE, 'provisioning_status': constants.PENDING_CREATE} sp = {'type': constants.SESSION_PERSISTENCE_APP_COOKIE, 'cookie_name': 'cookie_name'} pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'listener_id': None, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_CREATE, 'project_id': project_id, 'members': [member], 'health_monitor': health_monitor, 'session_persistence': sp, 'id': uuidutils.generate_uuid()} sp['pool_id'] = pool.get('id') member['pool_id'] = pool.get('id') health_monitor['pool_id'] = pool.get('id') l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_CREATE, 'value': 'localhost', 'enabled': True} r_health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'fall_threshold': 1, 'rise_threshold': 1, 'enabled': True, 'operating_status': constants.OFFLINE, 'provisioning_status': constants.PENDING_CREATE} redirect_pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'project_id': project_id, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'provisioning_status': constants.PENDING_CREATE, 'id': uuidutils.generate_uuid(), 'health_monitor': r_health_monitor} l7policy = {'name': 'l7policy1', 'enabled': True, 'description': 'l7policy_description', 'position': 1, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool': redirect_pool, 'l7rules': [l7rule], 'redirect_pool_id': redirect_pool.get('id'), 'id': uuidutils.generate_uuid(), 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE} l7rule['l7policy_id'] = l7policy.get('id') listener = {'project_id': project_id, 'name': 'listener1', 'description': 'listener_description', 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 1, 'enabled': True, 'default_pool': pool, 'l7policies': [l7policy], 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} l7policy['listener_id'] = listener.get('id') vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), 'subnet_id': uuidutils.generate_uuid()} lb = {'name': 'lb1', 'description': 'desc1', 'enabled': True, 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, 'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), 'project_id': project_id, 'vip': vip, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid(), 'listeners': [listener]} listener['load_balancer_id'] = lb.get('id') pool['load_balancer_id'] = lb.get('id') redirect_pool['load_balancer_id'] = lb.get('id') lock_session = db_api.get_session(autocommit=False) db_lb = self.repos.create_load_balancer_tree(self.session, lock_session, lb) self.assertIsNotNone(db_lb) self.assertIsInstance(db_lb, models.LoadBalancer) def test_sqlite_transactions_broken(self): self.skipTest("SLQAlchemy/PySqlite transaction handling is broken. " "Version 1.3.16 of sqlachemy changes how sqlite3 " "transactions are handled and this test fails as " "The LB created early in this process now disappears " "from the transaction context.") """This test is a canary for pysqlite fixing transaction handling. When this test starts failing, we can fix and un-skip the deadlock test below: `test_create_load_balancer_tree_quotas`. """ project_id = uuidutils.generate_uuid() vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), 'subnet_id': uuidutils.generate_uuid()} lb = {'name': 'lb1', 'description': 'desc1', 'enabled': True, 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, 'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), 'project_id': project_id, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} session = db_api.get_session() lock_session = db_api.get_session(autocommit=False) lbs = lock_session.query(db_models.LoadBalancer).filter_by( project_id=project_id).all() self.assertEqual(0, len(lbs)) # Initially: 0 self.repos.create_load_balancer_and_vip(lock_session, lb, vip) lbs = lock_session.query(db_models.LoadBalancer).filter_by( project_id=project_id).all() self.assertEqual(1, len(lbs)) # After create: 1 lock_session.rollback() lbs = lock_session.query(db_models.LoadBalancer).filter_by( project_id=project_id).all() self.assertEqual(0, len(lbs)) # After rollback: 0 self.repos.create_load_balancer_and_vip(lock_session, lb, vip) lbs = lock_session.query(db_models.LoadBalancer).filter_by( project_id=project_id).all() self.assertEqual(1, len(lbs)) # After create: 1 lock_session.rollback() lbs = lock_session.query(db_models.LoadBalancer).filter_by( project_id=project_id).all() self.assertEqual(0, len(lbs)) # After rollback: 0 # Force a count(), which breaks transaction integrity in pysqlite session.query(db_models.LoadBalancer).filter( db_models.LoadBalancer.project_id == project_id).count() self.repos.create_load_balancer_and_vip(lock_session, lb, vip) lbs = lock_session.query(db_models.LoadBalancer).filter_by( project_id=project_id).all() self.assertEqual(1, len(lbs)) # After create: 1 lock_session.rollback() lbs = lock_session.query(db_models.LoadBalancer).filter_by( project_id=project_id).all() self.assertEqual(1, len(lbs)) # After rollback: 1 (broken!) def test_create_load_balancer_tree_quotas(self): self.skipTest("PySqlite transaction handling is broken. We can unskip" "this when `test_sqlite_transactions_broken` fails.") conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.TESTING) project_id = uuidutils.generate_uuid() member = {'project_id': project_id, 'ip_address': '11.0.0.1', 'protocol_port': 80, 'enabled': True, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} member2 = {'project_id': project_id, 'ip_address': '11.0.0.2', 'protocol_port': 81, 'enabled': True, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} member3 = {'project_id': project_id, 'ip_address': '11.0.0.3', 'protocol_port': 81, 'enabled': True, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'fall_threshold': 1, 'rise_threshold': 1, 'enabled': True} sp = {'type': constants.SESSION_PERSISTENCE_APP_COOKIE, 'cookie_name': 'cookie_name'} pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'listener_id': None, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': project_id, 'members': [member], 'health_monitor': health_monitor, 'session_persistence': sp, 'id': uuidutils.generate_uuid()} pool2 = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool2', 'description': 'desc1', 'listener_id': None, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': project_id, 'members': [member2], 'health_monitor': health_monitor, 'id': uuidutils.generate_uuid()} sp['pool_id'] = pool.get('id') member['pool_id'] = pool.get('id') health_monitor['pool_id'] = pool.get('id') l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'value': 'localhost'} r_health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'fall_threshold': 1, 'rise_threshold': 1, 'enabled': True} redirect_pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', 'description': 'desc1', 'project_id': project_id, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid(), 'health_monitor': r_health_monitor, 'members': [member3]} l7policy = {'name': 'l7policy1', 'enabled': True, 'description': 'l7policy_description', 'position': 1, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool': redirect_pool, 'l7rules': [l7rule], 'redirect_pool_id': redirect_pool.get('id'), 'id': uuidutils.generate_uuid()} l7rule['l7policy_id'] = l7policy.get('id') listener = {'project_id': project_id, 'name': 'listener1', 'description': 'listener_description', 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'connection_limit': 1, 'enabled': True, 'default_pool': pool, 'l7policies': [l7policy], 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} listener2 = {'project_id': project_id, 'name': 'listener2', 'description': 'listener_description', 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 83, 'connection_limit': 1, 'enabled': True, 'default_pool': pool2, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} l7policy['listener_id'] = listener.get('id') vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), 'subnet_id': uuidutils.generate_uuid()} lb = {'name': 'lb1', 'description': 'desc1', 'enabled': True, 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, 'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), 'project_id': project_id, 'vip': vip, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid(), 'listeners': [listener, listener2]} listener['load_balancer_id'] = lb.get('id') listener2['load_balancer_id'] = lb.get('id') pool['load_balancer_id'] = lb.get('id') redirect_pool['load_balancer_id'] = lb.get('id') lb2_health_monitor = {'type': constants.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'fall_threshold': 1, 'rise_threshold': 1, 'enabled': True} lb2_member = {'project_id': project_id, 'ip_address': '11.0.0.3', 'protocol_port': 80, 'enabled': True, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} lb2_pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'lb2_pool', 'description': 'desc1', 'listener_id': None, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, 'enabled': True, 'operating_status': constants.ONLINE, 'project_id': project_id, 'members': [lb2_member], 'health_monitor': lb2_health_monitor, 'session_persistence': sp, 'id': uuidutils.generate_uuid()} lb2_listener = {'project_id': project_id, 'name': 'lb2_listener', 'description': 'listener_description', 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 83, 'connection_limit': 1, 'enabled': True, 'default_pool': lb2_pool, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid()} lb2 = {'name': 'lb2', 'description': 'desc2', 'enabled': True, 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, 'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), 'project_id': project_id, 'vip': vip, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.ONLINE, 'id': uuidutils.generate_uuid(), 'listeners': [lb2_listener]} lb2_listener['load_balancer_id'] = lb2.get('id') lb2_pool['load_balancer_id'] = lb2.get('id') # Test zero quota quota = {'load_balancer': 0, 'listener': 10, 'pool': 10, 'health_monitor': 10, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) quota = {'load_balancer': 10, 'listener': 0, 'pool': 10, 'health_monitor': 10, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) quota = {'load_balancer': 10, 'listener': 10, 'pool': 0, 'health_monitor': 10, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) quota = {'load_balancer': 10, 'listener': 10, 'pool': 10, 'health_monitor': 0, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) quota = {'load_balancer': 10, 'listener': 10, 'pool': 10, 'health_monitor': 10, 'member': 0} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) # Test l7policy quota for pools quota = {'load_balancer': 10, 'listener': 10, 'pool': 1, 'health_monitor': 10, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) # Test l7policy quota for health monitor quota = {'load_balancer': 10, 'listener': 10, 'pool': 10, 'health_monitor': 1, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) # Test l7policy quota for member quota = {'load_balancer': 10, 'listener': 10, 'pool': 10, 'health_monitor': 10, 'member': 1} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb1')) # ### Test load balancer quota # Test one quota, attempt to create another quota = {'load_balancer': 1, 'listener': 10, 'pool': 10, 'health_monitor': 10, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.repos.create_load_balancer_tree(self.session, lock_session, copy.deepcopy(lb)) # Check if first LB build passed quota checks self.assertIsNotNone(self.repos.load_balancer.get(self.session, name='lb1')) # Try building another LB, it should fail lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb2)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb2')) # ### Test listener quota # Create with custom quotas and limit to two listener (lb has two), # expect error of too many listeners/over quota quota = {'load_balancer': 10, 'listener': 2, 'pool': 10, 'health_monitor': 10, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb2)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb2')) # ### Test pool quota # Create with custom quotas and limit to two pools (lb has two), # expect error of too many pool/over quota quota = {'load_balancer': 10, 'listener': 10, 'pool': 2, 'health_monitor': 10, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb2)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb2')) # ### Test health monitor quota # Create with custom quotas and limit to one health monitor, # expect error of too many health monitor/over quota quota = {'load_balancer': 10, 'listener': 10, 'pool': 10, 'health_monitor': 1, 'member': 10} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb2)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb2')) # ### Test member quota # Create with custom quotas and limit to two member (lb has two), # expect error of too many member/over quota quota = {'load_balancer': 10, 'listener': 10, 'pool': 10, 'health_monitor': 10, 'member': 2} self.repos.quotas.update(self.session, project_id, quota=quota) lock_session = db_api.get_session(autocommit=False) self.assertRaises( exceptions.QuotaException, self.repos.create_load_balancer_tree, self.session, lock_session, copy.deepcopy(lb2)) # Make sure we didn't create the load balancer anyway self.assertIsNone(self.repos.load_balancer.get(self.session, name='lb2')) def test_check_quota_met(self): project_id = uuidutils.generate_uuid() # Test auth_strategy == NOAUTH conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) conf.config(group='api_settings', auth_strategy=constants.TESTING) # Test check for missing project_id self.assertRaises(exceptions.MissingProjectID, self.repos.check_quota_met, self.session, self.session, models.LoadBalancer, None) # Test non-quota object project_id = uuidutils.generate_uuid() self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.SessionPersistence, project_id)) # Test DB deadlock case project_id = uuidutils.generate_uuid() mock_session = mock.MagicMock() mock_session.query = mock.MagicMock( side_effect=db_exception.DBDeadlock) self.assertRaises(exceptions.ProjectBusyException, self.repos.check_quota_met, self.session, mock_session, models.LoadBalancer, project_id) # ### Test load balancer quota # Test with no pre-existing quota record default 0 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=0) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertIsNone(self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test with no pre-existing quota record default 1 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=1) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test with no pre-existing quota record default unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=constants.QUOTA_UNLIMITED) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test above project adding another load balancer self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test upgrade case with pre-quota load balancers project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) # Test upgrade case with pre-quota deleted load balancers project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.DELETED, operating_status=constants.ONLINE, enabled=True) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test pre-existing quota with quota of zero project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=10) quota = {'load_balancer': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) # Test pre-existing quota with quota of one project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=0) quota = {'load_balancer': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test pre-existing quota with quota of unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_load_balancer_quota=0) quota = {'load_balancer': constants.QUOTA_UNLIMITED} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test above project adding another load balancer self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.LoadBalancer, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # ### Test listener quota # Test with no pre-existing quota record default 0 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=0) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertIsNone(self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test with no pre-existing quota record default 1 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=1) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test with no pre-existing quota record default unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=constants.QUOTA_UNLIMITED) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test above project adding another listener self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test upgrade case with pre-quota listener project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) self.repos.listener.create( self.session, protocol=constants.PROTOCOL_HTTP, protocol_port=80, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, project_id=project_id, load_balancer_id=lb.id) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) # Test upgrade case with pre-quota deleted listener project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) self.repos.listener.create( self.session, protocol=constants.PROTOCOL_HTTP, protocol_port=80, enabled=True, provisioning_status=constants.DELETED, operating_status=constants.ONLINE, project_id=project_id, load_balancer_id=lb.id) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test pre-existing quota with quota of zero project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=10) quota = {'listener': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) # Test pre-existing quota with quota of one project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=0) quota = {'listener': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test pre-existing quota with quota of unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_listener_quota=0) quota = {'listener': constants.QUOTA_UNLIMITED} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test above project adding another listener self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Listener, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # ### Test pool quota # Test with no pre-existing quota record default 0 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=0) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertIsNone(self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test with no pre-existing quota record default 1 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=1) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test with no pre-existing quota record default unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=constants.QUOTA_UNLIMITED) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test above project adding another pool self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test upgrade case with pre-quota pool project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool = self.repos.pool.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="pool1", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=lb.id) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) # Test upgrade case with pre-quota deleted pool project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool = self.repos.pool.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="pool1", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.DELETED, operating_status=constants.ONLINE, enabled=True, load_balancer_id=lb.id) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test pre-existing quota with quota of zero project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=10) quota = {'pool': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) # Test pre-existing quota with quota of one project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=0) quota = {'pool': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test pre-existing quota with quota of unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_pool_quota=0) quota = {'pool': constants.QUOTA_UNLIMITED} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test above project adding another pool self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Pool, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # ### Test health monitor quota # Test with no pre-existing quota record default 0 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=0) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertIsNone(self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test with no pre-existing quota record default 1 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=1) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test with no pre-existing quota record default unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=constants.QUOTA_UNLIMITED) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test above project adding another health monitor self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test upgrade case with pre-quota health monitor project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool = self.repos.pool.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="pool1", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=lb.id) self.repos.health_monitor.create( self.session, project_id=project_id, name="health_mon1", type=constants.HEALTH_MONITOR_HTTP, delay=1, timeout=1, fall_threshold=1, rise_threshold=1, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, pool_id=pool.id) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) # Test upgrade case with pre-quota deleted health monitor project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool = self.repos.pool.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="pool1", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=lb.id) self.repos.health_monitor.create( self.session, project_id=project_id, name="health_mon1", type=constants.HEALTH_MONITOR_HTTP, delay=1, timeout=1, fall_threshold=1, rise_threshold=1, provisioning_status=constants.DELETED, operating_status=constants.OFFLINE, enabled=True, pool_id=pool.id) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test pre-existing quota with quota of zero project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=10) quota = {'health_monitor': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) # Test pre-existing quota with quota of one project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=0) quota = {'health_monitor': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test pre-existing quota with quota of unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_health_monitor_quota=0) quota = {'health_monitor': constants.QUOTA_UNLIMITED} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test above project adding another health monitor self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.HealthMonitor, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # ### Test member quota # Test with no pre-existing quota record default 0 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=0) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertIsNone(self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test with no pre-existing quota record default 1 project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=1) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test with no pre-existing quota record default unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=constants.QUOTA_UNLIMITED) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test above project adding another member self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test upgrade case with pre-quota member project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool = self.repos.pool.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="pool1", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=lb.id) self.repos.member.create( self.session, project_id=project_id, ip_address='192.0.2.1', protocol_port=80, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, pool_id=pool.id, backup=False) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) # Test upgrade case with pre-quota deleted member project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=1) lb = self.repos.load_balancer.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool = self.repos.pool.create( self.session, id=uuidutils.generate_uuid(), project_id=project_id, name="pool1", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=lb.id) self.repos.member.create( self.session, project_id=project_id, ip_address='192.0.2.1', protocol_port=80, provisioning_status=constants.DELETED, operating_status=constants.ONLINE, enabled=True, pool_id=pool.id, backup=False) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test pre-existing quota with quota of zero project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=10) quota = {'member': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) # Test pre-existing quota with quota of one project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=0) quota = {'member': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test above project is now at quota self.assertTrue(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test pre-existing quota with quota of unlimited project_id = uuidutils.generate_uuid() conf.config(group='quotas', default_member_quota=0) quota = {'member': constants.QUOTA_UNLIMITED} self.repos.quotas.update(self.session, project_id, quota=quota) self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(1, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test above project adding another member self.assertFalse(self.repos.check_quota_met(self.session, self.session, models.Member, project_id)) self.assertEqual(2, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) def test_decrement_quota(self): # Test decrement on non-existent quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) self.repos.decrement_quota(self.session, models.LoadBalancer, project_id) self.assertEqual(0, self.repos.quotas.count(self.session, project_id=project_id)) conf.config(group='api_settings', auth_strategy=constants.TESTING) # Test decrement on non-existent quota project_id = uuidutils.generate_uuid() self.repos.decrement_quota(self.session, models.LoadBalancer, project_id) self.assertEqual(0, self.repos.quotas.count(self.session, project_id=project_id)) # Test DB deadlock case project_id = uuidutils.generate_uuid() mock_session = mock.MagicMock() mock_session.query = mock.MagicMock( side_effect=db_exception.DBDeadlock) self.assertRaises(exceptions.ProjectBusyException, self.repos.decrement_quota, mock_session, models.LoadBalancer, project_id) # ### Test load balancer quota # Test decrement on zero in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_load_balancer': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.LoadBalancer, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test decrement on zero in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_load_balancer': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.LoadBalancer, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) conf.config(group='api_settings', auth_strategy=constants.TESTING) # Test decrement on in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_load_balancer': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.LoadBalancer, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) # Test decrement on in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_load_balancer': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.LoadBalancer, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_load_balancer) conf.config(group='api_settings', auth_strategy=constants.TESTING) # ### Test listener quota # Test decrement on zero in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_listener': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Listener, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test decrement on zero in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_listener': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Listener, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) conf.config(group='api_settings', auth_strategy=constants.TESTING) # Test decrement on in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_listener': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Listener, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) # Test decrement on in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_listener': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Listener, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_listener) conf.config(group='api_settings', auth_strategy=constants.TESTING) # ### Test pool quota # Test decrement on zero in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_pool': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Pool, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test decrement on zero in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_pool': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Pool, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) conf.config(group='api_settings', auth_strategy=constants.TESTING) # Test decrement on in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_pool': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Pool, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) # Test decrement on in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_pool': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Pool, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_pool) conf.config(group='api_settings', auth_strategy=constants.TESTING) # ### Test health monitor quota # Test decrement on zero in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_health_monitor': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.HealthMonitor, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test decrement on zero in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_health_monitor': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.HealthMonitor, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) conf.config(group='api_settings', auth_strategy=constants.TESTING) # Test decrement on in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_health_monitor': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.HealthMonitor, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) # Test decrement on in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_health_monitor': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.HealthMonitor, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_health_monitor) conf.config(group='api_settings', auth_strategy=constants.TESTING) # ### Test member quota # Test decrement on zero in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_member': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Member, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test decrement on zero in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_member': 0} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Member, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) conf.config(group='api_settings', auth_strategy=constants.TESTING) # Test decrement on in use quota project_id = uuidutils.generate_uuid() quota = {'in_use_member': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Member, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) # Test decrement on in use quota with noauth project_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', auth_strategy=constants.NOAUTH) quota = {'in_use_member': 1} self.repos.quotas.update(self.session, project_id, quota=quota) self.repos.decrement_quota(self.session, models.Member, project_id) self.assertEqual(0, self.repos.quotas.get( self.session, project_id=project_id).in_use_member) conf.config(group='api_settings', auth_strategy=constants.TESTING) def test_get_amphora_stats(self): listener2_id = uuidutils.generate_uuid() self.repos.listener_stats.create( self.session, listener_id=self.listener.id, amphora_id=self.amphora.id, bytes_in=1, bytes_out=2, active_connections=3, total_connections=4, request_errors=5) self.repos.listener_stats.create( self.session, listener_id=listener2_id, amphora_id=self.amphora.id, bytes_in=6, bytes_out=7, active_connections=8, total_connections=9, request_errors=10) amp_stats = self.repos.get_amphora_stats(self.session, self.amphora.id) self.assertEqual(2, len(amp_stats)) for stats in amp_stats: if stats['listener_id'] == self.listener.id: self.assertEqual(self.load_balancer.id, stats['loadbalancer_id']) self.assertEqual(self.listener.id, stats['listener_id']) self.assertEqual(self.amphora.id, stats['id']) self.assertEqual(1, stats['bytes_in']) self.assertEqual(2, stats['bytes_out']) self.assertEqual(3, stats['active_connections']) self.assertEqual(4, stats['total_connections']) self.assertEqual(5, stats['request_errors']) else: self.assertEqual(self.load_balancer.id, stats['loadbalancer_id']) self.assertEqual(listener2_id, stats['listener_id']) self.assertEqual(self.amphora.id, stats['id']) self.assertEqual(6, stats['bytes_in']) self.assertEqual(7, stats['bytes_out']) self.assertEqual(8, stats['active_connections']) self.assertEqual(9, stats['total_connections']) self.assertEqual(10, stats['request_errors']) class PoolRepositoryTest(BaseRepositoryTest): def create_pool(self, pool_id, project_id): pool = self.pool_repo.create( self.session, id=pool_id, project_id=project_id, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, tags=['test_tag']) return pool def test_get(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) new_pool = self.pool_repo.get(self.session, id=pool.id) self.assertIsInstance(new_pool, models.Pool) self.assertEqual(pool, new_pool) def test_get_all(self): pool_one = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) pool_two = self.create_pool(pool_id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2) pool_list, _ = self.pool_repo.get_all(self.session, project_id=self.FAKE_UUID_2) self.assertIsInstance(pool_list, list) self.assertEqual(2, len(pool_list)) self.assertEqual(pool_one, pool_list[0]) self.assertEqual(pool_two, pool_list[1]) def test_create(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) self.assertIsInstance(pool, models.Pool) self.assertEqual(self.FAKE_UUID_2, pool.project_id) self.assertEqual("pool_test", pool.name) self.assertEqual("pool_description", pool.description) self.assertEqual(constants.PROTOCOL_HTTP, pool.protocol) self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, pool.lb_algorithm) self.assertEqual(constants.ONLINE, pool.operating_status) def test_update(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) self.pool_repo.update(self.session, pool.id, description="other_pool_description") new_pool = self.pool_repo.get(self.session, id=self.FAKE_UUID_1) self.assertEqual("other_pool_description", new_pool.description) def test_delete(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) self.pool_repo.delete(self.session, id=pool.id) self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) def test_delete_with_member(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) member = self.member_repo.create(self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, pool_id=pool.id, ip_address="192.0.2.1", protocol_port=80, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, backup=False) new_pool = self.pool_repo.get(self.session, id=pool.id) self.assertEqual(1, len(new_pool.members)) self.assertEqual(member, new_pool.members[0]) self.pool_repo.delete(self.session, id=pool.id) self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) self.assertIsNone(self.member_repo.get(self.session, id=member.id)) def test_delete_with_health_monitor(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) hm = self.hm_repo.create(self.session, pool_id=pool.id, type=constants.HEALTH_MONITOR_HTTP, delay=1, timeout=1, fall_threshold=1, rise_threshold=1, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE) new_pool = self.pool_repo.get(self.session, id=pool.id) self.assertEqual(pool, new_pool) self.assertEqual(hm, new_pool.health_monitor) self.pool_repo.delete(self.session, id=pool.id) self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) self.assertIsNone(self.hm_repo.get(self.session, pool_id=hm.pool_id)) def test_delete_with_session_persistence(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) sp = self.sp_repo.create( self.session, pool_id=pool.id, type=constants.SESSION_PERSISTENCE_HTTP_COOKIE, cookie_name="cookie_name") new_pool = self.pool_repo.get(self.session, id=pool.id) self.assertEqual(pool, new_pool) self.assertEqual(sp, new_pool.session_persistence) self.pool_repo.delete(self.session, id=new_pool.id) self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) self.assertIsNone(self.sp_repo.get(self.session, pool_id=sp.pool_id)) def test_delete_with_all_children(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) hm = self.hm_repo.create(self.session, pool_id=pool.id, type=constants.HEALTH_MONITOR_HTTP, delay=1, timeout=1, fall_threshold=1, rise_threshold=1, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE) member = self.member_repo.create(self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, pool_id=pool.id, ip_address="192.0.2.1", protocol_port=80, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, backup=False) sp = self.sp_repo.create( self.session, pool_id=pool.id, type=constants.SESSION_PERSISTENCE_HTTP_COOKIE, cookie_name="cookie_name") new_pool = self.pool_repo.get(self.session, id=pool.id) self.assertEqual(pool, new_pool) self.assertEqual(1, len(new_pool.members)) new_member = self.member_repo.get(self.session, id=member.id) self.assertEqual(new_member, new_pool.members[0]) self.assertEqual(hm, new_pool.health_monitor) self.assertEqual(sp, new_pool.session_persistence) self.pool_repo.delete(self.session, id=pool.id) self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) self.assertIsNone(self.member_repo.get(self.session, id=member.id)) self.assertIsNone(self.hm_repo.get(self.session, pool_id=hm.pool_id)) self.assertIsNone(self.sp_repo.get(self.session, pool_id=sp.pool_id)) def test_get_children_count(self): pool = self.create_pool(pool_id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2) hm_count, member_count = ( self.pool_repo.get_children_count(self.session, pool.id)) self.assertEqual(0, hm_count) self.assertEqual(0, member_count) self.hm_repo.create(self.session, pool_id=pool.id, type=constants.HEALTH_MONITOR_HTTP, delay=1, timeout=1, fall_threshold=1, rise_threshold=1, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE) hm_count, member_count = ( self.pool_repo.get_children_count(self.session, pool.id)) self.assertEqual(1, hm_count) self.assertEqual(0, member_count) self.member_repo.create(self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, pool_id=pool.id, ip_address="192.0.2.1", protocol_port=80, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, backup=False) self.member_repo.create(self.session, id=self.FAKE_UUID_4, project_id=self.FAKE_UUID_2, pool_id=pool.id, ip_address="192.0.2.2", protocol_port=80, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, backup=False) hm_count, member_count = ( self.pool_repo.get_children_count(self.session, pool.id)) self.assertEqual(1, hm_count) self.assertEqual(2, member_count) class MemberRepositoryTest(BaseRepositoryTest): def setUp(self): super(MemberRepositoryTest, self).setUp() self.pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, tags=['test_tag']) def create_member(self, member_id, project_id, pool_id, ip_address): member = self.member_repo.create(self.session, id=member_id, project_id=project_id, pool_id=pool_id, ip_address=ip_address, protocol_port=80, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True, backup=False) return member def test_get(self): member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, self.pool.id, "192.0.2.1") new_member = self.member_repo.get(self.session, id=member.id) self.assertIsInstance(new_member, models.Member) self.assertEqual(member, new_member) def test_get_all(self): member_one = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, self.pool.id, "192.0.2.1") member_two = self.create_member(self.FAKE_UUID_3, self.FAKE_UUID_2, self.pool.id, "192.0.2.2") member_list, _ = self.member_repo.get_all(self.session, project_id=self.FAKE_UUID_2) self.assertIsInstance(member_list, list) self.assertEqual(2, len(member_list)) self.assertEqual(member_one, member_list[0]) self.assertEqual(member_two, member_list[1]) def test_create(self): member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, self.pool.id, ip_address="192.0.2.1") new_member = self.member_repo.get(self.session, id=member.id) self.assertEqual(self.FAKE_UUID_1, new_member.id) self.assertEqual(self.FAKE_UUID_2, new_member.project_id) self.assertEqual(self.pool.id, new_member.pool_id) self.assertEqual("192.0.2.1", new_member.ip_address) self.assertEqual(80, new_member.protocol_port) self.assertEqual(constants.ONLINE, new_member.operating_status) self.assertTrue(new_member.enabled) def test_update(self): ip_address_change = "192.0.2.2" member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, self.pool.id, "192.0.2.1") self.member_repo.update(self.session, id=member.id, ip_address=ip_address_change) new_member = self.member_repo.get(self.session, id=member.id) self.assertEqual(ip_address_change, new_member.ip_address) def test_delete(self): member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, self.pool.id, "192.0.2.1") self.member_repo.delete(self.session, id=member.id) self.assertIsNone(self.member_repo.get(self.session, id=member.id)) new_pool = self.pool_repo.get(self.session, id=self.pool.id) self.assertIsNotNone(new_pool) self.assertEqual(0, len(new_pool.members)) def test_update_pool_members(self): member1 = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, self.pool.id, "192.0.2.1") member2 = self.create_member(self.FAKE_UUID_3, self.FAKE_UUID_2, self.pool.id, "192.0.2.2") self.member_repo.update_pool_members( self.session, pool_id=self.pool.id, operating_status=constants.OFFLINE) new_member1 = self.member_repo.get(self.session, id=member1.id) new_member2 = self.member_repo.get(self.session, id=member2.id) self.assertEqual(constants.OFFLINE, new_member1.operating_status) self.assertEqual(constants.OFFLINE, new_member2.operating_status) class SessionPersistenceRepositoryTest(BaseRepositoryTest): def setUp(self): super(SessionPersistenceRepositoryTest, self).setUp() self.pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) def create_session_persistence(self, pool_id): sp = self.sp_repo.create( self.session, pool_id=pool_id, type=constants.SESSION_PERSISTENCE_HTTP_COOKIE, cookie_name="cookie_name") return sp def test_get(self): sp = self.create_session_persistence(self.pool.id) new_sp = self.sp_repo.get(self.session, pool_id=sp.pool_id) self.assertIsInstance(new_sp, models.SessionPersistence) self.assertEqual(sp, new_sp) def test_create(self): sp = self.create_session_persistence(self.pool.id) new_sp = self.sp_repo.get(self.session, pool_id=sp.pool_id) self.assertEqual(self.pool.id, new_sp.pool_id) self.assertEqual(constants.SESSION_PERSISTENCE_HTTP_COOKIE, new_sp.type) self.assertEqual("cookie_name", new_sp.cookie_name) def test_update(self): name_change = "new_cookie_name" sp = self.create_session_persistence(self.pool.id) self.sp_repo.update(self.session, pool_id=sp.pool_id, cookie_name=name_change) new_sp = self.sp_repo.get(self.session, pool_id=sp.pool_id) self.assertEqual(name_change, new_sp.cookie_name) def test_delete(self): sp = self.create_session_persistence(self.pool.id) self.sp_repo.delete(self.session, pool_id=sp.pool_id) self.assertIsNone(self.member_repo.get(self.session, pool_id=sp.pool_id)) new_pool = self.pool_repo.get(self.session, id=self.pool.id) self.assertIsNotNone(new_pool) self.assertIsNone(new_pool.session_persistence) class TestListenerRepositoryTest(BaseRepositoryTest): def setUp(self): super(TestListenerRepositoryTest, self).setUp() self.load_balancer = self.lb_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, server_group_id=self.FAKE_UUID_1) def create_listener(self, listener_id, port, default_pool_id=None, provisioning_status=constants.ACTIVE): listener = self.listener_repo.create( self.session, id=listener_id, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", protocol=constants.PROTOCOL_HTTP, protocol_port=port, connection_limit=1, load_balancer_id=self.load_balancer.id, default_pool_id=default_pool_id, operating_status=constants.ONLINE, provisioning_status=provisioning_status, enabled=True, peer_port=1025, tags=['test_tag']) return listener def create_amphora(self, amphora_id, loadbalancer_id): amphora = self.amphora_repo.create(self.session, id=amphora_id, load_balancer_id=loadbalancer_id, compute_id=self.FAKE_UUID_3, status=constants.ACTIVE, vrrp_ip=self.FAKE_IP, lb_network_ip=self.FAKE_IP) return amphora def create_loadbalancer(self, lb_id): lb = self.lb_repo.create(self.session, id=lb_id, project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) return lb def test_get(self): listener = self.create_listener(self.FAKE_UUID_1, 80) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsInstance(new_listener, models.Listener) self.assertEqual(listener, new_listener) def test_get_all(self): listener_one = self.create_listener(self.FAKE_UUID_1, 80) listener_two = self.create_listener(self.FAKE_UUID_3, 88) listener_list, _ = self.listener_repo.get_all( self.session, project_id=self.FAKE_UUID_2) self.assertIsInstance(listener_list, list) self.assertEqual(2, len(listener_list)) self.assertEqual(listener_one, listener_list[0]) self.assertEqual(listener_two, listener_list[1]) def test_create(self): listener = self.create_listener(self.FAKE_UUID_1, 80) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertEqual(self.FAKE_UUID_1, new_listener.id) self.assertEqual(self.FAKE_UUID_2, new_listener.project_id) self.assertEqual("listener_name", new_listener.name) self.assertEqual("listener_description", new_listener.description) self.assertEqual(constants.PROTOCOL_HTTP, new_listener.protocol) self.assertEqual(80, new_listener.protocol_port) self.assertEqual(1, new_listener.connection_limit) self.assertEqual(self.load_balancer.id, new_listener.load_balancer_id) self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) self.assertEqual(constants.ONLINE, new_listener.operating_status) self.assertEqual(1025, new_listener.peer_port) self.assertTrue(new_listener.enabled) def test_create_no_peer_port(self): lb = self.create_loadbalancer(uuidutils.generate_uuid()) listener = self.listener_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, protocol_port=80, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertEqual(1025, new_listener.peer_port) def test_create_no_peer_port_increments(self): lb = self.create_loadbalancer(uuidutils.generate_uuid()) listener_a = self.listener_repo.create( self.session, id=uuidutils.generate_uuid(), project_id=self.FAKE_UUID_2, load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, protocol_port=80, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) listener_b = self.listener_repo.create( self.session, id=uuidutils.generate_uuid(), project_id=self.FAKE_UUID_2, load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, protocol_port=81, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) new_listener_a = self.listener_repo.get(self.session, id=listener_a.id) new_listener_b = self.listener_repo.get(self.session, id=listener_b.id) self.assertEqual(1025, new_listener_a.peer_port) self.assertEqual(1026, new_listener_b.peer_port) def test_create_listener_on_different_lb_than_default_pool(self): load_balancer2 = self.lb_repo.create( self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, name="lb_name2", description="lb_description2", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_4, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=load_balancer2.id) self.assertRaises(exceptions.NotFound, self.create_listener, self.FAKE_UUID_1, 80, default_pool_id=pool.id) def test_create_2_sni_containers(self): listener = self.create_listener(self.FAKE_UUID_1, 80) container1 = {'listener_id': listener.id, 'tls_container_id': self.FAKE_UUID_1} container2 = {'listener_id': listener.id, 'tls_container_id': self.FAKE_UUID_2} container1_dm = models.SNI(**container1) container2_dm = models.SNI(**container2) self.sni_repo.create(self.session, **container1) self.sni_repo.create(self.session, **container2) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIn(container1_dm, new_listener.sni_containers) self.assertIn(container2_dm, new_listener.sni_containers) def test_update(self): name_change = "new_listener_name" listener = self.create_listener(self.FAKE_UUID_1, 80) self.listener_repo.update(self.session, listener.id, name=name_change) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertEqual(name_change, new_listener.name) def test_update_with_sni(self): listener = self.create_listener(self.FAKE_UUID_1, 80) container1 = {'listener_id': listener.id, 'tls_container_id': self.FAKE_UUID_2} container1_dm = models.SNI(**container1) self.listener_repo.update(self.session, listener.id, sni_containers=[self.FAKE_UUID_2]) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIn(container1_dm, new_listener.sni_containers) def test_update_bad_id(self): self.assertRaises(exceptions.NotFound, self.listener_repo.update, self.session, id=uuidutils.generate_uuid()) def test_delete(self): listener = self.create_listener(self.FAKE_UUID_1, 80) self.listener_repo.delete(self.session, id=listener.id) self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) def test_delete_with_sni(self): listener = self.create_listener(self.FAKE_UUID_1, 80) sni = self.sni_repo.create(self.session, listener_id=listener.id, tls_container_id=self.FAKE_UUID_3) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNotNone(new_listener) self.assertEqual(sni, new_listener.sni_containers[0]) self.listener_repo.delete(self.session, id=new_listener.id) self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) self.assertIsNone(self.sni_repo.get(self.session, listener_id=listener.id)) def test_delete_with_stats(self): listener = self.create_listener(self.FAKE_UUID_1, 80) lb = self.create_loadbalancer(uuidutils.generate_uuid()) amphora = self.create_amphora(uuidutils.generate_uuid(), lb.id) self.listener_stats_repo.create( self.session, listener_id=listener.id, amphora_id=amphora.id, bytes_in=1, bytes_out=1, active_connections=1, total_connections=1, request_errors=1) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNotNone(new_listener) self.assertIsNotNone(self.listener_stats_repo.get( self.session, listener_id=listener.id)) self.listener_repo.delete(self.session, id=listener.id) self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) # ListenerStatistics should stick around self.assertIsNotNone(self.listener_stats_repo.get( self.session, listener_id=listener.id)) def test_delete_with_pool(self): pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=self.load_balancer.id) listener = self.create_listener(self.FAKE_UUID_1, 80, default_pool_id=pool.id) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNotNone(new_listener) self.assertEqual(pool, new_listener.default_pool) self.listener_repo.delete(self.session, id=new_listener.id) self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) # Pool should stick around self.assertIsNotNone(self.pool_repo.get(self.session, id=pool.id)) def test_delete_with_all_children(self): pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=self.load_balancer.id) listener = self.create_listener(self.FAKE_UUID_1, 80, default_pool_id=pool.id) sni = self.sni_repo.create(self.session, listener_id=listener.id, tls_container_id=self.FAKE_UUID_3) lb = self.create_loadbalancer(uuidutils.generate_uuid()) amphora = self.create_amphora(uuidutils.generate_uuid(), lb.id) self.listener_stats_repo.create( self.session, listener_id=listener.id, amphora_id=amphora.id, bytes_in=1, bytes_out=1, active_connections=1, total_connections=1, request_errors=1) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNotNone(new_listener) self.assertEqual(pool, new_listener.default_pool) self.assertEqual(sni, new_listener.sni_containers[0]) self.listener_repo.delete(self.session, id=listener.id) self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) self.assertIsNone(self.sni_repo.get(self.session, listener_id=listener.id)) # ListenerStatistics should stick around self.assertIsNotNone(self.listener_stats_repo.get( self.session, listener_id=sni.listener_id)) # Pool should stick around self.assertIsNotNone(self.pool_repo.get(self.session, id=pool.id)) def test_delete_default_pool_from_beneath_listener(self): pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, load_balancer_id=self.load_balancer.id) listener = self.create_listener(self.FAKE_UUID_1, 80, default_pool_id=pool.id) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNotNone(new_listener) self.assertEqual(pool, new_listener.default_pool) self.pool_repo.delete(self.session, id=pool.id) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNone(new_listener.default_pool) def test_prov_status_active_if_not_error_active(self): listener = self.create_listener(self.FAKE_UUID_1, 80, provisioning_status=constants.ACTIVE) self.listener_repo.prov_status_active_if_not_error(self.session, listener.id) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) def test_prov_status_active_if_not_error_error(self): listener = self.create_listener(self.FAKE_UUID_1, 80, provisioning_status=constants.ERROR) self.listener_repo.prov_status_active_if_not_error(self.session, listener.id) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertEqual(constants.ERROR, new_listener.provisioning_status) def test_prov_status_active_if_not_error_pending_update(self): listener = self.create_listener( self.FAKE_UUID_1, 80, provisioning_status=constants.PENDING_UPDATE) self.listener_repo.prov_status_active_if_not_error(self.session, listener.id) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) def test_prov_status_active_if_not_error_bogus_listener(self): listener = self.create_listener( self.FAKE_UUID_1, 80, provisioning_status=constants.PENDING_UPDATE) # Should not raise an exception nor change any status self.listener_repo.prov_status_active_if_not_error(self.session, 'bogus_id') new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertEqual(constants.PENDING_UPDATE, new_listener.provisioning_status) class ListenerStatisticsRepositoryTest(BaseRepositoryTest): def setUp(self): super(ListenerStatisticsRepositoryTest, self).setUp() self.listener = self.listener_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", protocol=constants.PROTOCOL_HTTP, protocol_port=80, connection_limit=1, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, peer_port=1025) self.lb = self.lb_repo.create(self.session, id=uuidutils.generate_uuid(), project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) self.amphora = self.amphora_repo.create(self.session, id=uuidutils.generate_uuid(), load_balancer_id=self.lb.id, compute_id=self.FAKE_UUID_3, status=constants.ACTIVE, vrrp_ip=self.FAKE_IP, lb_network_ip=self.FAKE_IP) def create_listener_stats(self, listener_id, amphora_id): stats = self.listener_stats_repo.create( self.session, listener_id=listener_id, amphora_id=amphora_id, bytes_in=1, bytes_out=1, active_connections=1, total_connections=1, request_errors=1) return stats def test_get(self): stats = self.create_listener_stats(self.listener.id, self.amphora.id) new_stats = self.listener_stats_repo.get(self.session, listener_id=stats.listener_id) self.assertIsInstance(new_stats, models.ListenerStatistics) self.assertEqual(stats.listener_id, new_stats.listener_id) def test_create(self): stats = self.create_listener_stats(self.listener.id, self.amphora.id) new_stats = self.listener_stats_repo.get(self.session, listener_id=stats.listener_id) self.assertEqual(self.listener.id, new_stats.listener_id) self.assertEqual(1, new_stats.bytes_in) self.assertEqual(1, new_stats.bytes_out) self.assertEqual(1, new_stats.active_connections) self.assertEqual(1, new_stats.total_connections) self.assertEqual(1, new_stats.request_errors) def test_update(self): bytes_in_change = 2 stats = self.create_listener_stats(self.listener.id, self.amphora.id) self.listener_stats_repo.update(self.session, stats.listener_id, bytes_in=bytes_in_change) new_stats = self.listener_stats_repo.get(self.session, listener_id=stats.listener_id) self.assertIsInstance(new_stats, models.ListenerStatistics) self.assertEqual(stats.listener_id, new_stats.listener_id) def test_delete(self): stats = self.create_listener_stats(self.listener.id, self.amphora.id) self.listener_stats_repo.delete(self.session, listener_id=stats.listener_id) self.assertIsNone(self.listener_stats_repo.get( self.session, listener_id=stats.listener_id)) new_listener = self.listener_repo.get(self.session, id=self.listener.id) self.assertIsNotNone(new_listener) self.assertIsNone(new_listener.stats) def test_replace(self): # Test the create path bytes_in = random.randrange(1000000000) bytes_out = random.randrange(1000000000) active_conns = random.randrange(1000000000) total_conns = random.randrange(1000000000) request_errors = random.randrange(1000000000) self.assertIsNone(self.listener_stats_repo.get( self.session, listener_id=self.listener.id)) self.listener_stats_repo.replace(self.session, self.listener.id, self.amphora.id, bytes_in=bytes_in, bytes_out=bytes_out, active_connections=active_conns, total_connections=total_conns, request_errors=request_errors) obj = self.listener_stats_repo.get(self.session, listener_id=self.listener.id) self.assertIsNotNone(obj) self.assertEqual(self.listener.id, obj.listener_id) self.assertEqual(bytes_in, obj.bytes_in) self.assertEqual(bytes_out, obj.bytes_out) self.assertEqual(active_conns, obj.active_connections) self.assertEqual(total_conns, obj.total_connections) self.assertEqual(request_errors, obj.request_errors) # Test the update path bytes_in_2 = random.randrange(1000000000) bytes_out_2 = random.randrange(1000000000) active_conns_2 = random.randrange(1000000000) total_conns_2 = random.randrange(1000000000) request_errors_2 = random.randrange(1000000000) self.listener_stats_repo.replace(self.session, self.listener.id, self.amphora.id, bytes_in=bytes_in_2, bytes_out=bytes_out_2, active_connections=active_conns_2, total_connections=total_conns_2, request_errors=request_errors_2) obj = self.listener_stats_repo.get(self.session, listener_id=self.listener.id) self.assertIsNotNone(obj) self.assertEqual(self.listener.id, obj.listener_id) self.assertEqual(bytes_in_2, obj.bytes_in) self.assertEqual(bytes_out_2, obj.bytes_out) self.assertEqual(active_conns_2, obj.active_connections) self.assertEqual(total_conns_2, obj.total_connections) self.assertEqual(request_errors_2, obj.request_errors) class HealthMonitorRepositoryTest(BaseRepositoryTest): def setUp(self): super(HealthMonitorRepositoryTest, self).setUp() self.pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) self.pool2 = self.pool_repo.create( self.session, id=self.FAKE_UUID_2, project_id=self.FAKE_UUID_2, name="pool2_test", description="pool2_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) def create_health_monitor(self, hm_id, pool_id): health_monitor = self.hm_repo.create( self.session, type=constants.HEALTH_MONITOR_HTTP, id=hm_id, pool_id=pool_id, delay=1, timeout=1, fall_threshold=1, rise_threshold=1, http_method="POST", url_path="http://localhost:80/index.php", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, expected_codes="200", enabled=True, tags=['test_tag']) self.assertEqual(hm_id, health_monitor.id) return health_monitor def test_get(self): hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) new_hm = self.hm_repo.get(self.session, id=hm.id) self.assertIsInstance(new_hm, models.HealthMonitor) self.assertEqual(hm, new_hm) def test_create(self): hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) new_hm = self.hm_repo.get(self.session, id=hm.id) self.assertEqual(constants.HEALTH_MONITOR_HTTP, new_hm.type) self.assertEqual(self.pool.id, new_hm.pool_id) self.assertEqual(1, new_hm.delay) self.assertEqual(1, new_hm.timeout) self.assertEqual(1, new_hm.fall_threshold) self.assertEqual(1, new_hm.rise_threshold) self.assertEqual("POST", new_hm.http_method) self.assertEqual("http://localhost:80/index.php", new_hm.url_path) self.assertEqual("200", new_hm.expected_codes) self.assertTrue(new_hm.enabled) def test_update(self): delay_change = 2 hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) self.hm_repo.update( self.session, hm.id, delay=delay_change) new_hm = self.hm_repo.get(self.session, id=hm.id) self.assertEqual(delay_change, new_hm.delay) def test_delete(self): hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) self.hm_repo.delete(self.session, id=hm.id) self.assertIsNone(self.hm_repo.get(self.session, id=hm.id)) new_pool = self.pool_repo.get(self.session, id=self.pool.id) self.assertIsNotNone(new_pool) self.assertIsNone(new_pool.health_monitor) class LoadBalancerRepositoryTest(BaseRepositoryTest): def create_loadbalancer(self, lb_id, **overrides): settings = dict( id=lb_id, project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, tags=['test_tag'], ) settings.update(**overrides) lb = self.lb_repo.create(self.session, **settings) return lb def test_get(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertIsInstance(new_lb, models.LoadBalancer) self.assertEqual(lb, new_lb) def test_get_all(self): lb_one = self.create_loadbalancer(self.FAKE_UUID_1) lb_two = self.create_loadbalancer(self.FAKE_UUID_3) lb_list, _ = self.lb_repo.get_all(self.session, project_id=self.FAKE_UUID_2) self.assertEqual(2, len(lb_list)) self.assertEqual(lb_one, lb_list[0]) self.assertEqual(lb_two, lb_list[1]) def test_create(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) self.assertEqual(self.FAKE_UUID_1, lb.id) self.assertEqual(self.FAKE_UUID_2, lb.project_id) self.assertEqual("lb_name", lb.name) self.assertEqual("lb_description", lb.description) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertEqual(constants.ONLINE, lb.operating_status) self.assertTrue(lb.enabled) def test_update(self): name_change = "load_balancer_name" lb = self.create_loadbalancer(self.FAKE_UUID_1) self.lb_repo.update(self.session, lb.id, name=name_change) new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertEqual(name_change, new_lb.name) def test_delete(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) self.lb_repo.delete(self.session, id=lb.id) self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) def test_delete_with_amphora(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) amphora = self.amphora_repo.create(self.session, id=self.FAKE_UUID_1, load_balancer_id=lb.id, compute_id=self.FAKE_UUID_3, status=constants.ACTIVE, vrrp_ip=self.FAKE_IP, lb_network_ip=self.FAKE_IP) new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertIsNotNone(new_lb) self.assertEqual(1, len(new_lb.amphorae)) self.assertEqual(amphora, new_lb.amphorae[0]) self.lb_repo.delete(self.session, id=new_lb.id) self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) new_amphora = self.amphora_repo.get(self.session, id=amphora.id) self.assertIsNotNone(new_amphora) self.assertIsNone(new_amphora.load_balancer_id) def test_delete_with_many_amphora(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) amphora_1 = self.amphora_repo.create(self.session, id=self.FAKE_UUID_1, load_balancer_id=lb.id, compute_id=self.FAKE_UUID_3, status=constants.ACTIVE) amphora_2 = self.amphora_repo.create(self.session, id=self.FAKE_UUID_3, load_balancer_id=lb.id, compute_id=self.FAKE_UUID_3, lb_network_ip=self.FAKE_IP, vrrp_ip=self.FAKE_IP, status=constants.ACTIVE) new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertIsNotNone(new_lb) self.assertEqual(2, len(new_lb.amphorae)) self.assertIn(amphora_1, new_lb.amphorae) self.assertIn(amphora_2, new_lb.amphorae) self.lb_repo.delete(self.session, id=new_lb.id) self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) new_amphora_1 = self.amphora_repo.get(self.session, id=amphora_1.id) new_amphora_2 = self.amphora_repo.get(self.session, id=amphora_2.id) self.assertIsNotNone(new_amphora_1) self.assertIsNotNone(new_amphora_2) self.assertIsNone(new_amphora_1.load_balancer_id) self.assertIsNone(new_amphora_2.load_balancer_id) def test_delete_with_vip(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) vip = self.vip_repo.create(self.session, load_balancer_id=lb.id, ip_address="192.0.2.1") new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertIsNotNone(new_lb) self.assertIsNotNone(new_lb.vip) self.assertEqual(vip, new_lb.vip) self.lb_repo.delete(self.session, id=new_lb.id) self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) self.assertIsNone(self.vip_repo.get(self.session, load_balancer_id=lb.id)) def test_delete_with_listener(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) listener = self.listener_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, protocol_port=80, connection_limit=1, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertIsNotNone(new_lb) self.assertEqual(1, len(new_lb.listeners)) self.assertEqual(listener, new_lb.listeners[0]) self.lb_repo.delete(self.session, id=new_lb.id) self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) def test_delete_with_many_listeners(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) listener_1 = self.listener_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, protocol_port=80, connection_limit=1, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) listener_2 = self.listener_repo.create( self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTPS, protocol_port=443, connection_limit=1, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertIsNotNone(new_lb) self.assertEqual(2, len(new_lb.listeners)) self.assertIn(listener_1, new_lb.listeners) self.assertIn(listener_2, new_lb.listeners) self.lb_repo.delete(self.session, id=new_lb.id) self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) self.assertIsNone(self.listener_repo.get(self.session, id=listener_1.id)) self.assertIsNone(self.listener_repo.get(self.session, id=listener_2.id)) def test_delete_with_all_children(self): lb = self.create_loadbalancer(self.FAKE_UUID_1) amphora = self.amphora_repo.create(self.session, id=self.FAKE_UUID_1, load_balancer_id=lb.id, compute_id=self.FAKE_UUID_3, lb_network_ip=self.FAKE_IP, status=constants.ACTIVE) vip = self.vip_repo.create(self.session, load_balancer_id=lb.id, ip_address="192.0.2.1") listener = self.listener_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, protocol_port=80, connection_limit=1, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) new_lb = self.lb_repo.get(self.session, id=lb.id) self.assertIsNotNone(new_lb) self.assertIsNotNone(new_lb.vip) self.assertEqual(vip, new_lb.vip) self.assertEqual(1, len(new_lb.amphorae)) self.assertEqual(1, len(new_lb.listeners)) self.assertEqual(amphora, new_lb.amphorae[0]) self.assertEqual(listener, new_lb.listeners[0]) self.lb_repo.delete(self.session, id=new_lb.id) self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) new_amphora = self.amphora_repo.get(self.session, id=amphora.id) self.assertIsNotNone(new_amphora) self.assertIsNone(new_amphora.load_balancer_id) self.assertIsNone(self.vip_repo.get(self.session, load_balancer_id=lb.id)) self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) def test_test_and_set_provisioning_status_immutable(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.PENDING_CREATE, operating_status=constants.OFFLINE, enabled=True) self.assertFalse(self.lb_repo.test_and_set_provisioning_status( self.session, lb_id, constants.PENDING_UPDATE)) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) def test_test_and_set_provisioning_status_immutable_raise(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.PENDING_CREATE, operating_status=constants.OFFLINE, enabled=True) self.assertRaises(exceptions.ImmutableObject, self.lb_repo.test_and_set_provisioning_status, self.session, lb_id, status=constants.PENDING_UPDATE, raise_exception=True) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) def test_test_and_set_provisioning_status_mutable(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.ACTIVE, operating_status=constants.OFFLINE, enabled=True) self.lb_repo.test_and_set_provisioning_status( self.session, lb_id, constants.PENDING_UPDATE) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) def test_test_and_set_provisioning_status_error_on_delete(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.ERROR, operating_status=constants.OFFLINE, enabled=True) self.lb_repo.test_and_set_provisioning_status( self.session, lb_id, constants.PENDING_DELETE) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_DELETE, lb.provisioning_status) def test_set_status_for_failover_immutable(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.PENDING_CREATE, operating_status=constants.OFFLINE, enabled=True) self.assertFalse(self.lb_repo.set_status_for_failover( self.session, lb_id, constants.PENDING_UPDATE)) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) def test_set_status_for_failover_immutable_raise(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.PENDING_CREATE, operating_status=constants.OFFLINE, enabled=True) self.assertRaises(exceptions.ImmutableObject, self.lb_repo.set_status_for_failover, self.session, lb_id, status=constants.PENDING_UPDATE, raise_exception=True) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) def test_set_status_for_failover_mutable(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.ACTIVE, operating_status=constants.OFFLINE, enabled=True) self.lb_repo.set_status_for_failover( self.session, lb_id, constants.PENDING_UPDATE) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) def test_set_status_for_failover_error(self): lb_id = uuidutils.generate_uuid() self.lb_repo.create(self.session, id=lb_id, provisioning_status=constants.ERROR, operating_status=constants.OFFLINE, enabled=True) self.lb_repo.set_status_for_failover( self.session, lb_id, constants.PENDING_UPDATE) lb = self.lb_repo.get(self.session, id=lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) def test_get_all_deleted_expiring_load_balancer(self): exp_age = datetime.timedelta(seconds=self.FAKE_EXP_AGE) updated_at = datetime.datetime.utcnow() - exp_age lb1 = self.create_loadbalancer( self.FAKE_UUID_1, updated_at=updated_at, provisioning_status=constants.DELETED) lb2 = self.create_loadbalancer( self.FAKE_UUID_2, provisioning_status=constants.DELETED) expiring_ids = self.lb_repo.get_all_deleted_expiring( self.session, exp_age=exp_age) self.assertIn(lb1.id, expiring_ids) self.assertNotIn(lb2.id, expiring_ids) class VipRepositoryTest(BaseRepositoryTest): def setUp(self): super(VipRepositoryTest, self).setUp() self.lb = self.lb_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) def create_vip(self, lb_id): vip = self.vip_repo.create(self.session, load_balancer_id=lb_id, ip_address="192.0.2.1") return vip def test_get(self): vip = self.create_vip(self.lb.id) new_vip = self.vip_repo.get(self.session, load_balancer_id=vip.load_balancer_id) self.assertIsInstance(new_vip, models.Vip) self.assertEqual(vip, new_vip) def test_create(self): vip = self.create_vip(self.lb.id) self.assertEqual(self.lb.id, vip.load_balancer_id) self.assertEqual("192.0.2.1", vip.ip_address) def test_update(self): address_change = "192.0.2.2" vip = self.create_vip(self.lb.id) self.vip_repo.update(self.session, vip.load_balancer_id, ip_address=address_change) new_vip = self.vip_repo.get(self.session, load_balancer_id=vip.load_balancer_id) self.assertEqual(address_change, new_vip.ip_address) def test_delete(self): vip = self.create_vip(self.lb.id) self.vip_repo.delete(self.session, load_balancer_id=vip.load_balancer_id) self.assertIsNone(self.vip_repo.get( self.session, load_balancer_id=vip.load_balancer_id)) new_lb = self.lb_repo.get(self.session, id=self.lb.id) self.assertIsNotNone(new_lb) self.assertIsNone(new_lb.vip) def test_create_ipv6(self): vip = self.vip_repo.create(self.session, load_balancer_id=self.lb.id, ip_address="2001:DB8::10") self.assertEqual(self.lb.id, vip.load_balancer_id) self.assertEqual("2001:DB8::10", vip.ip_address) # Note: This test is using the unique local address range to # validate that we handle a fully expaned IP address properly. # This is not possible with the documentation/testnet range. def test_create_ipv6_full(self): vip = self.vip_repo.create( self.session, load_balancer_id=self.lb.id, ip_address="fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") self.assertEqual(self.lb.id, vip.load_balancer_id) self.assertEqual("fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", vip.ip_address) class SNIRepositoryTest(BaseRepositoryTest): def setUp(self): super(SNIRepositoryTest, self).setUp() self.listener = self.listener_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", protocol=constants.PROTOCOL_HTTP, protocol_port=80, connection_limit=1, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, peer_port=1025) def create_sni(self, listener_id): sni = self.sni_repo.create(self.session, listener_id=listener_id, tls_container_id=self.FAKE_UUID_3, position=0) return sni def test_get(self): sni = self.create_sni(self.listener.id) new_sni = self.sni_repo.get(self.session, listener_id=sni.listener_id) self.assertIsInstance(new_sni, models.SNI) self.assertEqual(sni, new_sni) def test_create(self): sni = self.create_sni(self.listener.id) new_sni = self.sni_repo.get(self.session, listener_id=sni.listener_id) self.assertEqual(self.listener.id, new_sni.listener_id) self.assertEqual(self.FAKE_UUID_3, new_sni.tls_container_id) self.assertEqual(0, new_sni.position) def test_update(self): position_change = 10 sni = self.create_sni(self.listener.id) self.sni_repo.update(self.session, listener_id=sni.listener_id, position=position_change) new_sni = self.sni_repo.get(self.session, listener_id=sni.listener_id) self.assertEqual(position_change, new_sni.position) def test_delete(self): sni = self.create_sni(self.listener.id) self.sni_repo.delete(self.session, listener_id=sni.listener_id) self.assertIsNone(self.sni_repo.get(self.session, listener_id=sni.listener_id)) new_listener = self.listener_repo.get(self.session, id=self.listener.id) self.assertIsNotNone(new_listener) self.assertEqual(0, len(new_listener.sni_containers)) class AmphoraRepositoryTest(BaseRepositoryTest): def setUp(self): super(AmphoraRepositoryTest, self).setUp() self.lb = self.lb_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) def create_amphora(self, amphora_id, **overrides): settings = { 'id': amphora_id, 'compute_id': self.FAKE_UUID_3, 'status': constants.ACTIVE, 'lb_network_ip': self.FAKE_IP, 'vrrp_ip': self.FAKE_IP, 'ha_ip': self.FAKE_IP, 'role': constants.ROLE_MASTER, 'cert_expiration': datetime.datetime.utcnow(), 'cert_busy': False } settings.update(overrides) amphora = self.amphora_repo.create(self.session, **settings) return amphora def test_get(self): amphora = self.create_amphora(self.FAKE_UUID_1) new_amphora = self.amphora_repo.get(self.session, id=amphora.id) self.assertIsInstance(new_amphora, models.Amphora) self.assertEqual(amphora, new_amphora) def test_count(self): comp_id = uuidutils.generate_uuid() self.create_amphora(self.FAKE_UUID_1, compute_id=comp_id) self.create_amphora(self.FAKE_UUID_2, compute_id=comp_id, status=constants.DELETED) amp_count = self.amphora_repo.count(self.session, compute_id=comp_id) self.assertEqual(2, amp_count) def test_count_not_deleted(self): comp_id = uuidutils.generate_uuid() self.create_amphora(self.FAKE_UUID_1, compute_id=comp_id) self.create_amphora(self.FAKE_UUID_2, compute_id=comp_id, status=constants.DELETED) amp_count = self.amphora_repo.count(self.session, compute_id=comp_id, show_deleted=False) self.assertEqual(1, amp_count) def test_create(self): amphora = self.create_amphora(self.FAKE_UUID_1) self.assertEqual(self.FAKE_UUID_1, amphora.id) self.assertEqual(self.FAKE_UUID_3, amphora.compute_id) self.assertEqual(constants.ACTIVE, amphora.status) self.assertEqual(constants.ROLE_MASTER, amphora.role) def test_exists_true(self): amphora = self.create_amphora(self.FAKE_UUID_1) exist = self.amphora_repo.exists(self.session, id=amphora.id) self.assertTrue(exist) def test_exists_false(self): self.create_amphora(self.FAKE_UUID_1) exist = self.amphora_repo.exists(self.session, id='test') self.assertFalse(exist) def test_update(self): status_change = constants.PENDING_UPDATE amphora = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.update(self.session, amphora.id, status=status_change) new_amphora = self.amphora_repo.get(self.session, id=amphora.id) self.assertEqual(status_change, new_amphora.status) def test_delete(self): amphora = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.delete(self.session, id=amphora.id) self.assertIsNone(self.amphora_repo.get(self.session, id=amphora.id)) def test_associate_amphora_load_balancer(self): amphora = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.associate(self.session, self.lb.id, amphora.id) new_amphora = self.amphora_repo.get(self.session, id=amphora.id) self.assertIsNotNone(new_amphora.load_balancer) self.assertIsInstance(new_amphora.load_balancer, models.LoadBalancer) def test_delete_amphora_with_load_balancer(self): amphora = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.associate(self.session, self.lb.id, amphora.id) self.amphora_repo.delete(self.session, id=amphora.id) self.assertIsNone(self.amphora_repo.get(self.session, id=amphora.id)) new_lb = self.lb_repo.get(self.session, id=self.lb.id) self.assertEqual(0, len(new_lb.amphorae)) def test_allocate_and_associate(self): new_amphora = self.amphora_repo.allocate_and_associate(self.session, self.lb.id) self.assertIsNone(new_amphora) amphora = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.update(self.session, amphora.id, status=constants.AMPHORA_READY) new_amphora = self.amphora_repo.allocate_and_associate(self.session, self.lb.id) self.assertIsNotNone(new_amphora) self.assertIsInstance(new_amphora, models.Amphora) def test_get_lb_for_amphora(self): # TODO(bzhao) this test will raise error as there are more than 64 # tables in a Join statement in sqlite env. This is a new issue when # we introduce resources tags and client certificates, both of them # are 1:1 relationship. But we can image that if we have many # associated loadbalancer subresources, such as listeners, pools, # members and l7 resources. Even though, we don't have tags and # client certificates features, we will still hit this issue in # sqlite env. self.skipTest("No idea how this should work yet") amphora = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.associate(self.session, self.lb.id, amphora.id) lb = self.amphora_repo.get_lb_for_amphora(self.session, amphora.id) self.assertIsNotNone(lb) self.assertEqual(self.lb, lb) def test_get_all_deleted_expiring_amphora(self): exp_age = datetime.timedelta(seconds=self.FAKE_EXP_AGE) updated_at = datetime.datetime.utcnow() - exp_age amphora1 = self.create_amphora( self.FAKE_UUID_1, updated_at=updated_at, status=constants.DELETED) amphora2 = self.create_amphora( self.FAKE_UUID_2, status=constants.DELETED) expiring_ids = self.amphora_repo.get_all_deleted_expiring( self.session, exp_age=exp_age) self.assertIn(amphora1.id, expiring_ids) self.assertNotIn(amphora2.id, expiring_ids) def test_get_spare_amphora_count(self): count = self.amphora_repo.get_spare_amphora_count(self.session) self.assertEqual(0, count) amphora1 = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.update(self.session, amphora1.id, status=constants.AMPHORA_READY) amphora2 = self.create_amphora(self.FAKE_UUID_2) self.amphora_repo.update(self.session, amphora2.id, status=constants.AMPHORA_READY) count = self.amphora_repo.get_spare_amphora_count(self.session) self.assertEqual(2, count) def test_get_spare_amphora_count_check_booting_amphora_true(self): count = self.amphora_repo.get_spare_amphora_count( self.session, check_booting_amphora=True) self.assertEqual(0, count) amphora1 = self.create_amphora(self.FAKE_UUID_1) self.amphora_repo.update(self.session, amphora1.id, status=constants.AMPHORA_READY,) amphora2 = self.create_amphora(self.FAKE_UUID_2) self.amphora_repo.update(self.session, amphora2.id, status=constants.AMPHORA_BOOTING) count = self.amphora_repo.get_spare_amphora_count( self.session, check_booting_amphora=True) self.assertEqual(2, count) def test_get_none_cert_expired_amphora(self): # test with no expired amphora amp = self.amphora_repo.get_cert_expiring_amphora(self.session) self.assertIsNone(amp) amphora = self.create_amphora(self.FAKE_UUID_1) expired_interval = CONF.house_keeping.cert_expiry_buffer expiration = datetime.datetime.utcnow() + datetime.timedelta( seconds=2 * expired_interval) self.amphora_repo.update(self.session, amphora.id, cert_expiration=expiration) amp = self.amphora_repo.get_cert_expiring_amphora(self.session) self.assertIsNone(amp) def test_get_cert_expired_amphora(self): # test with expired amphora amphora2 = self.create_amphora(self.FAKE_UUID_2) expiration = datetime.datetime.utcnow() + datetime.timedelta( seconds=1) self.amphora_repo.update(self.session, amphora2.id, cert_expiration=expiration) cert_expired_amphora = self.amphora_repo.get_cert_expiring_amphora( self.session) self.assertEqual(cert_expired_amphora.cert_expiration, expiration) self.assertEqual(cert_expired_amphora.id, amphora2.id) def test_get_cert_expired_amphora_deleted(self): amphora = self.create_amphora(self.FAKE_UUID_3) expiration = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) self.amphora_repo.update(self.session, amphora.id, status=constants.DELETED, cert_expiration=expiration) cert_expired_amphora = self.amphora_repo.get_cert_expiring_amphora( self.session) self.assertIsNone(cert_expired_amphora) def test_get_lb_for_health_update(self): amphora1 = self.create_amphora(self.FAKE_UUID_1) amphora2 = self.create_amphora(self.FAKE_UUID_3) self.amphora_repo.associate(self.session, self.lb.id, amphora1.id) self.amphora_repo.associate(self.session, self.lb.id, amphora2.id) lb_ref = {'enabled': True, 'id': self.lb.id, 'operating_status': constants.ONLINE, 'provisioning_status': constants.ACTIVE} # Test with just a load balancer lb = self.amphora_repo.get_lb_for_health_update(self.session, self.FAKE_UUID_1) self.assertEqual(lb_ref, lb) pool = self.pool_repo.create( self.session, id=self.FAKE_UUID_4, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, load_balancer_id=self.lb.id, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) pool_ref = {pool.id: {'members': {}, 'operating_status': constants.ONLINE}} lb_ref['pools'] = pool_ref # Test with an LB and a pool lb = self.amphora_repo.get_lb_for_health_update(self.session, self.FAKE_UUID_1) self.assertEqual(lb_ref, lb) listener = self.listener_repo.create( self.session, id=self.FAKE_UUID_5, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", protocol=constants.PROTOCOL_HTTP, protocol_port=80, connection_limit=1, operating_status=constants.ONLINE, load_balancer_id=self.lb.id, provisioning_status=constants.ACTIVE, enabled=True, peer_port=1025, default_pool_id=pool.id) listener_ref = {listener.id: {'operating_status': constants.ONLINE, 'protocol': constants.PROTOCOL_HTTP, 'enabled': 1}} lb_ref['listeners'] = listener_ref # Test with an LB, pool, and listener (no members) lb = self.amphora_repo.get_lb_for_health_update(self.session, self.FAKE_UUID_1) self.assertEqual(lb_ref, lb) member1 = self.member_repo.create(self.session, id=self.FAKE_UUID_6, project_id=self.FAKE_UUID_2, pool_id=pool.id, ip_address="192.0.2.1", protocol_port=80, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, backup=False) member2 = self.member_repo.create(self.session, id=self.FAKE_UUID_7, project_id=self.FAKE_UUID_2, pool_id=pool.id, ip_address="192.0.2.21", protocol_port=80, enabled=True, provisioning_status=constants.ACTIVE, operating_status=constants.OFFLINE, backup=False) member_ref = {member1.id: {'operating_status': constants.ONLINE}, member2.id: {'operating_status': constants.OFFLINE}} lb_ref['pools'][pool.id]['members'] = member_ref # Test with an LB, pool, listener, and members lb = self.amphora_repo.get_lb_for_health_update(self.session, self.FAKE_UUID_1) self.assertEqual(lb_ref, lb) class AmphoraHealthRepositoryTest(BaseRepositoryTest): def setUp(self): super(AmphoraHealthRepositoryTest, self).setUp() self.amphora = self.amphora_repo.create(self.session, id=self.FAKE_UUID_1, compute_id=self.FAKE_UUID_3, status=constants.ACTIVE, lb_network_ip=self.FAKE_IP) def create_amphora_health(self, amphora_id): newdate = datetime.datetime.utcnow() - datetime.timedelta(minutes=10) amphora_health = self.amphora_health_repo.create( self.session, amphora_id=amphora_id, last_update=newdate, busy=False) return amphora_health def test_replace(self): amphora_id = uuidutils.generate_uuid() now = datetime.datetime.utcnow() self.assertIsNone(self.amphora_health_repo.get( self.session, amphora_id=amphora_id)) self.amphora_health_repo.replace(self.session, amphora_id, last_update=now) obj = self.amphora_health_repo.get(self.session, amphora_id=amphora_id) self.assertIsNotNone(obj) self.assertEqual(amphora_id, obj.amphora_id) self.assertEqual(now, obj.last_update) now += datetime.timedelta(seconds=69) self.amphora_health_repo.replace(self.session, amphora_id, last_update=now) obj = self.amphora_health_repo.get(self.session, amphora_id=amphora_id) self.assertIsNotNone(obj) self.assertEqual(amphora_id, obj.amphora_id) self.assertEqual(now, obj.last_update) def test_get(self): amphora_health = self.create_amphora_health(self.amphora.id) new_amphora_health = self.amphora_health_repo.get( self.session, amphora_id=amphora_health.amphora_id) self.assertIsInstance(new_amphora_health, models.AmphoraHealth) self.assertEqual(amphora_health, new_amphora_health) def test_check_amphora_expired_default_exp_age(self): """When exp_age defaults to CONF.house_keeping.amphora_expiry_age.""" self.create_amphora_health(self.amphora.id) checkres = self.amphora_health_repo.check_amphora_health_expired( self.session, self.amphora.id) # Default amphora_expiry_age value is 1 week so amphora shouldn't be # considered expired. self.assertFalse(checkres) def test_check_amphora_expired_with_exp_age(self): """When exp_age is passed as an argument.""" exp_age = datetime.timedelta( seconds=self.FAKE_EXP_AGE) self.create_amphora_health(self.amphora.id) checkres = self.amphora_health_repo.check_amphora_health_expired( self.session, self.amphora.id, exp_age) self.assertTrue(checkres) def test_check_amphora_expired_with_no_age(self): """When the amphora_health entry is missing in the DB.""" checkres = self.amphora_health_repo.check_amphora_health_expired( self.session, self.amphora.id) self.assertTrue(checkres) def test_get_stale_amphora(self): stale_amphora = self.amphora_health_repo.get_stale_amphora( self.session) self.assertIsNone(stale_amphora) self.create_amphora_health(self.amphora.id) stale_amphora = self.amphora_health_repo.get_stale_amphora( self.session) self.assertEqual(self.amphora.id, stale_amphora.amphora_id) def test_create(self): amphora_health = self.create_amphora_health(self.FAKE_UUID_1) self.assertEqual(self.FAKE_UUID_1, amphora_health.amphora_id) newcreatedtime = datetime.datetime.utcnow() oldcreatetime = amphora_health.last_update diff = newcreatedtime - oldcreatetime self.assertEqual(600, diff.seconds) def test_update(self): d = datetime.datetime.today() amphora_health = self.create_amphora_health(self.FAKE_UUID_1) self.amphora_health_repo.update(self.session, amphora_health.amphora_id, last_update=d) new_amphora_health = self.amphora_health_repo.get( self.session, amphora_id=amphora_health.amphora_id) self.assertEqual(d, new_amphora_health.last_update) def test_delete(self): amphora_health = self.create_amphora_health(self.FAKE_UUID_1) self.amphora_health_repo.delete( self.session, amphora_id=amphora_health.amphora_id) self.assertIsNone(self.amphora_health_repo.get( self.session, amphora_id=amphora_health.amphora_id)) class VRRPGroupRepositoryTest(BaseRepositoryTest): def setUp(self): super(VRRPGroupRepositoryTest, self).setUp() self.lb = self.lb_repo.create( self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, name="lb_name", description="lb_description", provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) def test_update(self): self.vrrpgroup = self.vrrp_group_repo.create( self.session, load_balancer_id=self.lb.id, vrrp_group_name='TESTVRRPGROUP', vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_pass='TESTPASS', advert_int=1) # Validate baseline old_vrrp_group = self.vrrp_group_repo.get(self.session, load_balancer_id=self.lb.id) self.assertEqual('TESTVRRPGROUP', old_vrrp_group.vrrp_group_name) self.assertEqual(constants.VRRP_AUTH_DEFAULT, old_vrrp_group.vrrp_auth_type) self.assertEqual('TESTPASS', old_vrrp_group.vrrp_auth_pass) self.assertEqual(1, old_vrrp_group.advert_int) # Test update self.vrrp_group_repo.update(self.session, load_balancer_id=self.lb.id, vrrp_group_name='TESTVRRPGROUP2', vrrp_auth_type='AH', vrrp_auth_pass='TESTPASS2', advert_int=2) new_vrrp_group = self.vrrp_group_repo.get(self.session, load_balancer_id=self.lb.id) self.assertEqual('TESTVRRPGROUP2', new_vrrp_group.vrrp_group_name) self.assertEqual('AH', new_vrrp_group.vrrp_auth_type) self.assertEqual('TESTPASS2', new_vrrp_group.vrrp_auth_pass) self.assertEqual(2, new_vrrp_group.advert_int) class L7PolicyRepositoryTest(BaseRepositoryTest): def setUp(self): super(L7PolicyRepositoryTest, self).setUp() self.pool = self.create_pool(self.FAKE_UUID_1) self.listener = self.create_listener(self.FAKE_UUID_1, 80) def create_listener(self, listener_id, port): listener = self.listener_repo.create( self.session, id=listener_id, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", protocol=constants.PROTOCOL_HTTP, protocol_port=port, connection_limit=1, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True, peer_port=1025) return listener def create_pool(self, pool_id): pool = self.pool_repo.create( self.session, id=pool_id, project_id=self.FAKE_UUID_2, name="pool_test", description="pool_description", protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True, tags=['test_tag']) return pool def create_l7policy(self, l7policy_id, listener_id, position, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=None, redirect_url=None): l7policy = self.l7policy_repo.create( self.session, id=l7policy_id, name='l7policy_test', description='l7policy_description', listener_id=listener_id, position=position, action=action, redirect_pool_id=redirect_pool_id, redirect_url=redirect_url, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True) return l7policy def create_l7rule(self, l7rule_id, l7policy_id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, key=None, value="/api", enabled=True): l7rule = self.l7rule_repo.create( self.session, id=l7rule_id, l7policy_id=l7policy_id, type=type, compare_type=compare_type, key=key, value=value, operating_status=constants.ONLINE, enabled=enabled, provisioning_status=constants.ACTIVE) return l7rule def test_get(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 999, redirect_pool_id=pool.id) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertIsInstance(new_l7policy, models.L7Policy) self.assertEqual(l7policy, new_l7policy) self.assertEqual(1, new_l7policy.position) def test_get_all(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy_a = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 1, redirect_pool_id=pool.id) l7policy_c = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 2, redirect_pool_id=pool.id) l7policy_b = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 2, redirect_pool_id=pool.id) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) new_l7policy_c = self.l7policy_repo.get(self.session, id=l7policy_c.id) self.assertEqual(1, new_l7policy_a.position) self.assertEqual(2, new_l7policy_b.position) self.assertEqual(3, new_l7policy_c.position) l7policy_list, _ = self.l7policy_repo.get_all( self.session, listener_id=listener.id) self.assertIsInstance(l7policy_list, list) self.assertEqual(3, len(l7policy_list)) self.assertEqual(l7policy_a.id, l7policy_list[0].id) self.assertEqual(l7policy_b.id, l7policy_list[1].id) self.assertEqual(l7policy_c.id, l7policy_list[2].id) def test_create(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy(self.FAKE_UUID_1, listener.id, 1, redirect_pool_id=pool.id) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertEqual(self.FAKE_UUID_1, new_l7policy.id) self.assertEqual(listener.id, new_l7policy.listener_id) self.assertEqual(pool.id, new_l7policy.redirect_pool_id) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, new_l7policy.action) self.assertEqual(1, new_l7policy.position) self.assertIsNone(new_l7policy.redirect_url) def test_create_no_id(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) l7policy = self.l7policy_repo.create( self.session, listener_id=listener.id, action=constants.L7POLICY_ACTION_REJECT, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertEqual(listener.id, new_l7policy.listener_id) self.assertIsNone(new_l7policy.redirect_pool_id) self.assertIsNone(new_l7policy.redirect_url) self.assertEqual(constants.L7POLICY_ACTION_REJECT, new_l7policy.action) self.assertEqual(1, new_l7policy.position) def test_l7policy_create_no_listener_id(self): self.assertRaises( db_exception.DBError, self.l7policy_repo.create, self.session, action=constants.L7POLICY_ACTION_REJECT, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True) def test_update(self): new_url = 'http://www.example.com/' listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 1, redirect_pool_id=pool.id) self.l7policy_repo.update( self.session, id=l7policy.id, action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url=new_url, position=l7policy.position) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) new_pool = self.pool_repo.get(self.session, id=pool.id) self.assertEqual(new_url, new_l7policy.redirect_url) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, new_l7policy.action) self.assertIsNone(new_l7policy.redirect_pool_id) self.assertNotIn(new_l7policy.id, new_pool.l7policies) def test_update_bad_id(self): self.assertRaises(exceptions.NotFound, self.l7policy_repo.update, self.session, id=uuidutils.generate_uuid()) def test_delete(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 1, redirect_pool_id=pool.id) self.l7policy_repo.delete(self.session, id=l7policy.id) self.assertIsNone(self.l7policy_repo.get(self.session, id=l7policy.id)) new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNotNone(new_listener) self.assertEqual(0, len(new_listener.l7policies)) def test_delete_bad_id(self): self.assertRaises(exceptions.NotFound, self.l7policy_repo.delete, self.session, id=uuidutils.generate_uuid()) def test_reorder_policies(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy_a = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 1, redirect_pool_id=pool.id) l7policy_b = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 2, redirect_pool_id=pool.id) l7policy_c = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 3, redirect_pool_id=pool.id) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) new_l7policy_c = self.l7policy_repo.get(self.session, id=l7policy_c.id) self.assertEqual(1, new_l7policy_a.position) self.assertEqual(2, new_l7policy_b.position) self.assertEqual(3, new_l7policy_c.position) self.l7policy_repo.update(self.session, id=l7policy_a.id, position=2) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) new_l7policy_c = self.l7policy_repo.get(self.session, id=l7policy_c.id) self.assertEqual(2, new_l7policy_a.position) self.assertEqual(1, new_l7policy_b.position) self.assertEqual(3, new_l7policy_c.position) self.l7policy_repo.update(self.session, id=l7policy_c.id, position=1) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) new_l7policy_c = self.l7policy_repo.get(self.session, id=l7policy_c.id) self.assertEqual(3, new_l7policy_a.position) self.assertEqual(2, new_l7policy_b.position) self.assertEqual(1, new_l7policy_c.position) self.l7policy_repo.update(self.session, id=l7policy_c.id, position=1) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) new_l7policy_c = self.l7policy_repo.get(self.session, id=l7policy_c.id) self.assertEqual(3, new_l7policy_a.position) self.assertEqual(2, new_l7policy_b.position) self.assertEqual(1, new_l7policy_c.position) def test_delete_forcing_reorder(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy_a = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 1, redirect_pool_id=pool.id) l7policy_b = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 2, redirect_pool_id=pool.id) l7policy_c = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 999, redirect_pool_id=pool.id) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) new_l7policy_c = self.l7policy_repo.get(self.session, id=l7policy_c.id) self.assertEqual(1, new_l7policy_a.position) self.assertEqual(2, new_l7policy_b.position) self.assertEqual(3, new_l7policy_c.position) self.l7policy_repo.delete(self.session, id=l7policy_b.id) l7policy_list, _ = self.l7policy_repo.get_all( self.session, listener_id=listener.id) self.assertIsInstance(l7policy_list, list) self.assertEqual(2, len(l7policy_list)) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_c = self.l7policy_repo.get(self.session, id=l7policy_c.id) self.assertEqual(1, new_l7policy_a.position) self.assertEqual(2, new_l7policy_c.position) def test_delete_with_rule(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy(uuidutils.generate_uuid(), listener.id, 1, redirect_pool_id=pool.id,) l7rule = self.create_l7rule(uuidutils.generate_uuid(), l7policy.id) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(l7policy.id, new_l7policy.id) self.assertEqual(l7rule.id, new_l7rule.id) self.l7policy_repo.delete(self.session, id=l7policy.id) self.assertIsNone(self.l7policy_repo.get(self.session, id=l7policy.id)) self.assertIsNone(self.l7rule_repo.get(self.session, id=l7rule.id)) def test_update_action_rdr_url_to_redirect_pool(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy( uuidutils.generate_uuid(), listener.id, 1, action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url="http://www.example.com/") new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertIsNone(new_l7policy.redirect_pool_id) self.l7policy_repo.update( self.session, id=l7policy.id, redirect_pool_id=pool.id) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertEqual(pool.id, new_l7policy.redirect_pool.id) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, new_l7policy.action) self.assertIsNone(new_l7policy.redirect_url) def test_update_action_rdr_url_to_reject(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) l7policy = self.create_l7policy( uuidutils.generate_uuid(), listener.id, 1, action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url="http://www.example.com/") new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertIsNone(new_l7policy.redirect_pool_id) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, new_l7policy.action) self.l7policy_repo.update( self.session, id=l7policy.id, action=constants.L7POLICY_ACTION_REJECT) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertEqual(constants.L7POLICY_ACTION_REJECT, new_l7policy.action) self.assertIsNone(new_l7policy.redirect_url) self.assertIsNone(new_l7policy.redirect_pool_id) def test_update_action_rdr_pool_to_reject(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy( uuidutils.generate_uuid(), listener.id, 1, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool.id) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertIsNone(new_l7policy.redirect_url) self.l7policy_repo.update( self.session, id=l7policy.id, action=constants.L7POLICY_ACTION_REJECT) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertEqual(constants.L7POLICY_ACTION_REJECT, new_l7policy.action) self.assertIsNone(new_l7policy.redirect_url) self.assertIsNone(new_l7policy.redirect_pool_id) def test_update_reject_to_rdr_pool(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) pool = self.create_pool(uuidutils.generate_uuid()) l7policy = self.create_l7policy( uuidutils.generate_uuid(), listener.id, 1, action=constants.L7POLICY_ACTION_REJECT) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertIsNone(new_l7policy.redirect_url) self.assertIsNone(new_l7policy.redirect_pool_id) self.l7policy_repo.update( self.session, id=l7policy.id, redirect_pool_id=pool.id) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertEqual(pool.id, new_l7policy.redirect_pool_id) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, new_l7policy.action) self.assertIsNone(new_l7policy.redirect_url) def test_update_reject_to_rdr_url(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) l7policy = self.create_l7policy( uuidutils.generate_uuid(), listener.id, 1, action=constants.L7POLICY_ACTION_REJECT) new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertIsNone(new_l7policy.redirect_url) self.assertIsNone(new_l7policy.redirect_pool_id) self.l7policy_repo.update( self.session, id=l7policy.id, redirect_url='http://www.example.com/') new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) self.assertEqual('http://www.example.com/', new_l7policy.redirect_url) self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, new_l7policy.action) self.assertIsNone(new_l7policy.redirect_pool_id) def test_update_position_only(self): listener = self.create_listener(uuidutils.generate_uuid(), 80) l7policy_a = self.create_l7policy( uuidutils.generate_uuid(), listener.id, 1, action=constants.L7POLICY_ACTION_REJECT) l7policy_b = self.create_l7policy( uuidutils.generate_uuid(), listener.id, 2, action=constants.L7POLICY_ACTION_REJECT) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) self.assertEqual(1, new_l7policy_a.position) self.assertEqual(2, new_l7policy_b.position) self.l7policy_repo.update( self.session, id=l7policy_a.id, position=999) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) self.assertEqual(2, new_l7policy_a.position) self.assertEqual(1, new_l7policy_b.position) self.l7policy_repo.update( self.session, id=l7policy_a.id, position=1) new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) self.assertEqual(1, new_l7policy_a.position) self.assertEqual(2, new_l7policy_b.position) def test_create_with_invalid_redirect_pool_id(self): bad_lb = self.lb_repo.create( self.session, id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(), provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) bad_pool = self.pool_repo.create( self.session, id=uuidutils.generate_uuid(), project_id=bad_lb.project_id, protocol=constants.PROTOCOL_HTTP, lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=True) self.assertRaises(exceptions.NotFound, self.create_l7policy, uuidutils.generate_uuid(), self.listener.id, 1, action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=bad_pool.id) def test_create_with_invalid_redirect_url(self): self.assertRaises(exceptions.InvalidURL, self.create_l7policy, uuidutils.generate_uuid(), self.listener.id, 1, action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, redirect_url="This is not a URL.") class L7RuleRepositoryTest(BaseRepositoryTest): def setUp(self): super(L7RuleRepositoryTest, self).setUp() self.listener = self.listener_repo.create( self.session, id=uuidutils.generate_uuid(), project_id=self.FAKE_UUID_2, protocol=constants.PROTOCOL_HTTP, protocol_port=80, connection_limit=1, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True, peer_port=1025) self.l7policy = self.l7policy_repo.create( self.session, id=self.FAKE_UUID_1, name='l7policy_test', description='l7policy_description', listener_id=self.listener.id, position=1, action=constants.L7POLICY_ACTION_REJECT, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True) def create_l7rule(self, l7rule_id, l7policy_id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, key=None, value="/api", invert=False, enabled=True): l7rule = self.l7rule_repo.create( self.session, id=l7rule_id, l7policy_id=l7policy_id, type=type, compare_type=compare_type, key=key, value=value, invert=invert, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, enabled=enabled, tags=['test_tag']) return l7rule def test_get(self): l7rule = self.create_l7rule(uuidutils.generate_uuid(), self.l7policy.id) new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertIsInstance(new_l7rule, models.L7Rule) self.assertEqual(l7rule, new_l7rule) def test_get_all(self): l7policy = self.l7policy_repo.create( self.session, id=uuidutils.generate_uuid(), name='l7policy_test', description='l7policy_description', listener_id=self.listener.id, position=1, action=constants.L7POLICY_ACTION_REJECT, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE, enabled=True) l7rule_a = self.create_l7rule(uuidutils.generate_uuid(), l7policy.id) l7rule_b = self.create_l7rule(uuidutils.generate_uuid(), l7policy.id) new_l7rule_a = self.l7rule_repo.get(self.session, id=l7rule_a.id) new_l7rule_b = self.l7rule_repo.get(self.session, id=l7rule_b.id) l7rule_list, _ = self.l7rule_repo.get_all( self.session, l7policy_id=l7policy.id) self.assertIsInstance(l7rule_list, list) self.assertEqual(2, len(l7rule_list)) self.assertIn(new_l7rule_a.id, [r.id for r in l7rule_list]) self.assertIn(new_l7rule_b.id, [r.id for r in l7rule_list]) def test_create(self): l7rule = self.create_l7rule(self.FAKE_UUID_1, self.l7policy.id) new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(self.FAKE_UUID_1, new_l7rule.id) self.assertEqual(self.l7policy.id, new_l7rule.l7policy_id) self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, new_l7rule.compare_type) self.assertIsNone(new_l7rule.key) self.assertEqual('/api', new_l7rule.value) self.assertFalse(new_l7rule.invert) def test_create_without_id(self): l7rule = self.l7rule_repo.create( self.session, id=None, l7policy_id=self.l7policy.id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_CONTAINS, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, value='something', enabled=True) new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertIsNotNone(l7rule.id) self.assertEqual(self.l7policy.id, new_l7rule.l7policy_id) self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_CONTAINS, new_l7rule.compare_type) self.assertIsNone(new_l7rule.key) self.assertEqual('something', new_l7rule.value) self.assertFalse(new_l7rule.invert) def test_l7rule_create_wihout_l7policy_id(self): self.assertRaises( db_exception.DBError, self.l7rule_repo.create, self.session, id=None, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_CONTAINS, provisioning_status=constants.ACTIVE, operating_status=constants.ONLINE, value='something', enabled=True) def test_update(self): l7rule = self.create_l7rule(uuidutils.generate_uuid(), self.l7policy.id, type=constants.L7RULE_TYPE_HEADER, key="My-Header") new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual('/api', new_l7rule.value) self.assertFalse(new_l7rule.invert) update_dict = {'type': constants.L7RULE_TYPE_PATH, 'value': '/images', 'invert': True} self.l7rule_repo.update(self.session, id=l7rule.id, **update_dict) new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) self.assertEqual('/images', new_l7rule.value) self.assertIsNone(new_l7rule.key) self.assertTrue(new_l7rule.invert) def test_update_bad_id(self): self.assertRaises(exceptions.NotFound, self.l7rule_repo.update, self.session, id='bad id', value='/some/path') def test_bad_update(self): l7rule = self.create_l7rule(uuidutils.generate_uuid(), self.l7policy.id) new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual('/api', new_l7rule.value) self.assertRaises(exceptions.InvalidString, self.l7rule_repo.update, self.session, id=l7rule.id, value='bad path') def test_delete(self): l7rule = self.create_l7rule(uuidutils.generate_uuid(), self.l7policy.id) self.l7rule_repo.delete(self.session, id=l7rule.id) self.assertIsNone(self.l7rule_repo.get(self.session, id=l7rule.id)) def test_create_bad_rule_type(self): self.assertRaises(exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type="not valid") def test_create_header_rule(self): l7rule = self.create_l7rule( uuidutils.generate_uuid(), self.l7policy.id, type=constants.L7RULE_TYPE_HEADER, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, key="Some-header", value='"some value"') new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(constants.L7RULE_TYPE_HEADER, new_l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, new_l7rule.compare_type) self.assertEqual('Some-header', new_l7rule.key) self.assertEqual('"some value"', new_l7rule.value) def test_create_header_rule_no_key(self): self.assertRaises( exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HEADER, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, value='"some value"') def test_create_header_rule_invalid_key(self): self.assertRaises( exceptions.InvalidString, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HEADER, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, key='bad key;', value='"some value"') def test_create_header_rule_invalid_value_string(self): self.assertRaises( exceptions.InvalidString, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HEADER, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, key='Some-header', value='\x18') def test_create_header_rule_invalid_value_regex(self): self.assertRaises( exceptions.InvalidRegex, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HEADER, compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, key='Some-header', value='bad regex\\') def test_create_header_rule_bad_compare_type(self): self.assertRaises( exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HEADER, compare_type="bad compare", key="Some-header", value='"some value"') def test_create_cookie_rule(self): l7rule = self.create_l7rule( uuidutils.generate_uuid(), self.l7policy.id, type=constants.L7RULE_TYPE_COOKIE, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, key="some_cookie", value='some-value') new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(constants.L7RULE_TYPE_COOKIE, new_l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, new_l7rule.compare_type) self.assertEqual('some_cookie', new_l7rule.key) self.assertEqual('some-value', new_l7rule.value) def test_create_cookie_rule_no_key(self): self.assertRaises( exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_COOKIE, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, value='some-value') def test_create_cookie_rule_invalid_key(self): self.assertRaises( exceptions.InvalidString, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_COOKIE, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, key='bad key;', value='some-value') def test_create_cookie_rule_invalid_value_string(self): self.assertRaises( exceptions.InvalidString, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_COOKIE, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, key='some_cookie', value='bad value;') def test_create_cookie_rule_invalid_value_regex(self): self.assertRaises( exceptions.InvalidRegex, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_COOKIE, compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, key='some_cookie', value='bad regex\\') def test_create_cookie_rule_bad_compare_type(self): self.assertRaises( exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_COOKIE, compare_type="bad compare", key="some_cookie", value='some-value') def test_create_path_rule(self): l7rule = self.create_l7rule( uuidutils.generate_uuid(), self.l7policy.id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, value='/some/path') new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, new_l7rule.compare_type) self.assertEqual('/some/path', new_l7rule.value) def test_create_path_rule_invalid_value_string(self): self.assertRaises( exceptions.InvalidString, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, value='bad path') def test_create_path_rule_invalid_value_regex(self): self.assertRaises( exceptions.InvalidRegex, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, value='bad regex\\') def test_create_path_rule_bad_compare_type(self): self.assertRaises( exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_PATH, compare_type="bad compare", value='/some/path') def test_create_host_name_rule(self): l7rule = self.create_l7rule( uuidutils.generate_uuid(), self.l7policy.id, type=constants.L7RULE_TYPE_HOST_NAME, compare_type=constants.L7RULE_COMPARE_TYPE_ENDS_WITH, value='.example.com') new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(constants.L7RULE_TYPE_HOST_NAME, new_l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_ENDS_WITH, new_l7rule.compare_type) self.assertEqual('.example.com', new_l7rule.value) def test_create_host_name_rule_invalid_value_string(self): self.assertRaises( exceptions.InvalidString, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HOST_NAME, compare_type=constants.L7RULE_COMPARE_TYPE_ENDS_WITH, value='bad hostname') def test_create_host_name_rule_invalid_value_regex(self): self.assertRaises( exceptions.InvalidRegex, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HOST_NAME, compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, value='bad regex\\') def test_create_host_name_rule_bad_compare_type(self): self.assertRaises( exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_HOST_NAME, compare_type="bad compare", value='.example.com') def test_create_file_type_rule(self): l7rule = self.create_l7rule( uuidutils.generate_uuid(), self.l7policy.id, type=constants.L7RULE_TYPE_FILE_TYPE, compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, value='png|jpg') new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) self.assertEqual(constants.L7RULE_TYPE_FILE_TYPE, new_l7rule.type) self.assertEqual(constants.L7RULE_COMPARE_TYPE_REGEX, new_l7rule.compare_type) self.assertEqual('png|jpg', new_l7rule.value) def test_create_file_type_rule_invalid_value_string(self): self.assertRaises( exceptions.InvalidString, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_FILE_TYPE, compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, value='bad file type') def test_create_file_type_rule_invalid_value_regex(self): self.assertRaises( exceptions.InvalidRegex, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_FILE_TYPE, compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, value='bad regex\\') def test_create_file_type_rule_bad_compare_type(self): self.assertRaises( exceptions.InvalidL7Rule, self.create_l7rule, self.FAKE_UUID_1, self.l7policy.id, type=constants.L7RULE_TYPE_FILE_TYPE, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, value='png|jpg') class TestQuotasRepository(BaseRepositoryTest): def setUp(self): super(TestQuotasRepository, self).setUp() def update_quotas(self, project_id, load_balancer=20, listener=20, pool=20, health_monitor=20, member=20): quota = {'load_balancer': load_balancer, 'listener': listener, 'pool': pool, 'health_monitor': health_monitor, 'member': member} quotas = self.quota_repo.update(self.session, project_id, quota=quota) return quotas def _compare(self, expected, observed): self.assertEqual(expected.project_id, observed.project_id) self.assertEqual(expected.load_balancer, observed.load_balancer) self.assertEqual(expected.listener, observed.listener) self.assertEqual(expected.pool, observed.pool) self.assertEqual(expected.health_monitor, observed.health_monitor) self.assertEqual(expected.member, observed.member) def test_get(self): expected = self.update_quotas(self.FAKE_UUID_1) observed = self.quota_repo.get(self.session, project_id=self.FAKE_UUID_1) self.assertIsInstance(observed, models.Quotas) self._compare(expected, observed) def test_update(self): first_expected = self.update_quotas(self.FAKE_UUID_1) first_observed = self.quota_repo.get(self.session, project_id=self.FAKE_UUID_1) second_expected = self.update_quotas(self.FAKE_UUID_1, load_balancer=1) second_observed = self.quota_repo.get(self.session, project_id=self.FAKE_UUID_1) self.assertIsInstance(first_expected, models.Quotas) self._compare(first_expected, first_observed) self.assertIsInstance(second_expected, models.Quotas) self._compare(second_expected, second_observed) self.assertIsNot(first_expected.load_balancer, second_expected.load_balancer) def test_delete(self): expected = self.update_quotas(self.FAKE_UUID_1) observed = self.quota_repo.get(self.session, project_id=self.FAKE_UUID_1) self.assertIsInstance(observed, models.Quotas) self._compare(expected, observed) self.quota_repo.delete(self.session, self.FAKE_UUID_1) observed = self.quota_repo.get(self.session, project_id=self.FAKE_UUID_1) self.assertIsNone(observed.health_monitor) self.assertIsNone(observed.load_balancer) self.assertIsNone(observed.listener) self.assertIsNone(observed.member) self.assertIsNone(observed.pool) def test_delete_non_existent(self): self.assertRaises(exceptions.NotFound, self.quota_repo.delete, self.session, 'bogus') class FlavorProfileRepositoryTest(BaseRepositoryTest): def create_flavor_profile(self, fp_id): fp = self.flavor_profile_repo.create( self.session, id=fp_id, name="fp1", provider_name='pr1', flavor_data="{'image': 'unbuntu'}") return fp def test_get(self): fp = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) new_fp = self.flavor_profile_repo.get(self.session, id=fp.id) self.assertIsInstance(new_fp, models.FlavorProfile) self.assertEqual(fp, new_fp) def test_get_all(self): fp1 = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) fp2 = self.create_flavor_profile(fp_id=self.FAKE_UUID_2) fp_list, _ = self.flavor_profile_repo.get_all( self.session, query_options=defer('name')) self.assertIsInstance(fp_list, list) self.assertEqual(2, len(fp_list)) self.assertEqual(fp1, fp_list[0]) self.assertEqual(fp2, fp_list[1]) def test_create(self): fp = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) self.assertIsInstance(fp, models.FlavorProfile) self.assertEqual(self.FAKE_UUID_1, fp.id) self.assertEqual("fp1", fp.name) def test_delete(self): fp = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) self.flavor_profile_repo.delete(self.session, id=fp.id) self.assertIsNone(self.flavor_profile_repo.get( self.session, id=fp.id)) class FlavorRepositoryTest(BaseRepositoryTest): PROVIDER_NAME = 'provider1' def create_flavor_profile(self): fp = self.flavor_profile_repo.create( self.session, id=uuidutils.generate_uuid(), name="fp1", provider_name=self.PROVIDER_NAME, flavor_data='{"image": "ubuntu"}') return fp def create_flavor(self, flavor_id, name): fp = self.create_flavor_profile() flavor = self.flavor_repo.create( self.session, id=flavor_id, name=name, flavor_profile_id=fp.id, description='test', enabled=True) return flavor def test_get(self): flavor = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='flavor') new_flavor = self.flavor_repo.get(self.session, id=flavor.id) self.assertIsInstance(new_flavor, models.Flavor) self.assertEqual(flavor, new_flavor) def test_get_all(self): fl1 = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='flavor1') fl2 = self.create_flavor(flavor_id=self.FAKE_UUID_3, name='flavor2') fl_list, _ = self.flavor_repo.get_all(self.session, query_options=defer('enabled')) self.assertIsInstance(fl_list, list) self.assertEqual(2, len(fl_list)) self.assertEqual(fl1, fl_list[0]) self.assertEqual(fl2, fl_list[1]) def test_create(self): fl = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') self.assertIsInstance(fl, models.Flavor) self.assertEqual(self.FAKE_UUID_2, fl.id) self.assertEqual("fl1", fl.name) def test_delete(self): fl = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') self.flavor_repo.delete(self.session, id=fl.id) self.assertIsNone(self.flavor_repo.get( self.session, id=fl.id)) def test_get_flavor_metadata_dict(self): ref_dict = {'image': 'ubuntu'} self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') flavor_metadata_dict = self.flavor_repo.get_flavor_metadata_dict( self.session, self.FAKE_UUID_2) self.assertEqual(ref_dict, flavor_metadata_dict) # Test missing flavor self.assertRaises(sa_exception.NoResultFound, self.flavor_repo.get_flavor_metadata_dict, self.session, self.FAKE_UUID_1) def test_get_flavor_provider(self): self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') provider_name = self.flavor_repo.get_flavor_provider(self.session, self.FAKE_UUID_2) self.assertEqual(self.PROVIDER_NAME, provider_name) # Test missing flavor self.assertRaises(sa_exception.NoResultFound, self.flavor_repo.get_flavor_provider, self.session, self.FAKE_UUID_1) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4102166 octavia-6.2.2/octavia/tests/unit/0000775000175000017500000000000000000000000016741 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/__init__.py0000664000175000017500000000107400000000000021054 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4102166 octavia-6.2.2/octavia/tests/unit/amphorae/0000775000175000017500000000000000000000000020535 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/__init__.py0000664000175000017500000000107400000000000022650 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4102166 octavia-6.2.2/octavia/tests/unit/amphorae/backends/0000775000175000017500000000000000000000000022307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/__init__.py0000664000175000017500000000107400000000000024422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/0000775000175000017500000000000000000000000023405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/__init__.py0000664000175000017500000000107400000000000025520 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/0000775000175000017500000000000000000000000025544 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/__init__.py0000664000175000017500000000107400000000000027657 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py0000664000175000017500000005220500000000000031623 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from unittest import mock from oslo_utils import uuidutils from octavia.amphorae.backends.agent import api_server from octavia.amphorae.backends.agent.api_server import amphora_info from octavia.amphorae.backends.agent.api_server import util from octavia.common.jinja.haproxy.combined_listeners import jinja_cfg from octavia.tests.common import utils as test_utils import octavia.tests.unit.base as base from octavia.tests.unit.common.sample_configs import sample_configs_combined class TestAmphoraInfo(base.TestCase): API_VERSION = random.randrange(0, 10000) BASE_AMP_PATH = '/var/lib/octavia' BASE_CRT_PATH = BASE_AMP_PATH + '/certs' HAPROXY_VERSION = random.randrange(0, 10000) KEEPALIVED_VERSION = random.randrange(0, 10000) IPVSADM_VERSION = random.randrange(0, 10000) FAKE_LISTENER_ID_1 = uuidutils.generate_uuid() FAKE_LISTENER_ID_2 = uuidutils.generate_uuid() FAKE_LISTENER_ID_3 = uuidutils.generate_uuid() FAKE_LISTENER_ID_4 = uuidutils.generate_uuid() LB_ID_1 = uuidutils.generate_uuid() def setUp(self): super(TestAmphoraInfo, self).setUp() self.osutils_mock = mock.MagicMock() self.amp_info = amphora_info.AmphoraInfo(self.osutils_mock) self.udp_driver = mock.MagicMock() # setup a fake haproxy config file templater = jinja_cfg.JinjaTemplater( base_amp_path=self.BASE_AMP_PATH, base_crt_dir=self.BASE_CRT_PATH) tls_tupel = {'cont_id_1': sample_configs_combined.sample_tls_container_tuple( id='tls_container_id', certificate='imaCert1', private_key='imaPrivateKey1', primary_cn='FakeCN')} self.rendered_haproxy_cfg = templater.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True, sni=True)], tls_tupel) path = util.config_path(self.LB_ID_1) self.useFixture(test_utils.OpenFixture(path, self.rendered_haproxy_cfg)) def _return_version(self, package_name): if package_name == 'ipvsadm': return self.IPVSADM_VERSION elif package_name == 'keepalived': return self.KEEPALIVED_VERSION else: return self.HAPROXY_VERSION @mock.patch.object(amphora_info, "webob") @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_version_of_installed_package', return_value=HAPROXY_VERSION) @mock.patch('socket.gethostname', return_value='FAKE_HOST') def test_compile_amphora_info(self, mock_gethostname, mock_pkg_version, mock_webob): original_version = api_server.VERSION api_server.VERSION = self.API_VERSION expected_dict = {'api_version': self.API_VERSION, 'hostname': 'FAKE_HOST', 'haproxy_version': self.HAPROXY_VERSION} self.amp_info.compile_amphora_info() mock_webob.Response.assert_called_once_with(json=expected_dict) api_server.VERSION = original_version @mock.patch.object(amphora_info, "webob") @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_version_of_installed_package') @mock.patch('socket.gethostname', return_value='FAKE_HOST') def test_compile_amphora_info_for_udp(self, mock_gethostname, mock_pkg_version, mock_webob): mock_pkg_version.side_effect = self._return_version self.udp_driver.get_subscribed_amp_compile_info.side_effect = [ ['keepalived', 'ipvsadm']] original_version = api_server.VERSION api_server.VERSION = self.API_VERSION expected_dict = {'api_version': self.API_VERSION, 'hostname': 'FAKE_HOST', 'haproxy_version': self.HAPROXY_VERSION, 'keepalived_version': self.KEEPALIVED_VERSION, 'ipvsadm_version': self.IPVSADM_VERSION } self.amp_info.compile_amphora_info(extend_udp_driver=self.udp_driver) mock_webob.Response.assert_called_once_with(json=expected_dict) api_server.VERSION = original_version @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_listeners', return_value=[FAKE_LISTENER_ID_1, FAKE_LISTENER_ID_2]) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_meminfo') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._cpu') @mock.patch('os.statvfs') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_networks') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._load') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_version_of_installed_package') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._count_haproxy_processes') @mock.patch('socket.gethostname', return_value='FAKE_HOST') def test_compile_amphora_details(self, mhostname, m_count, m_pkg_version, m_load, m_get_nets, m_os, m_cpu, mget_mem, mget_listener): mget_mem.return_value = {'SwapCached': 0, 'Buffers': 344792, 'MemTotal': 21692784, 'Cached': 4271856, 'Slab': 534384, 'MemFree': 12685624, 'Shmem': 9520} m_cpu.return_value = {'user': '252551', 'softirq': '8336', 'system': '52554', 'total': 7503411} m_pkg_version.side_effect = self._return_version mdisk_info = mock.MagicMock() m_os.return_value = mdisk_info mdisk_info.f_blocks = 34676992 mdisk_info.f_bfree = 28398016 mdisk_info.f_frsize = 4096 mdisk_info.f_bavail = 26630646 m_get_nets.return_value = {'eth1': {'network_rx': 996, 'network_tx': 418}, 'eth2': {'network_rx': 848, 'network_tx': 578}} m_load.return_value = ['0.09', '0.11', '0.10'] m_count.return_value = 5 original_version = api_server.VERSION api_server.VERSION = self.API_VERSION expected_dict = {u'active': True, u'api_version': self.API_VERSION, u'cpu': {u'soft_irq': u'8336', u'system': u'52554', u'total': 7503411, u'user': u'252551'}, u'disk': {u'available': 109079126016, u'used': 25718685696}, u'haproxy_count': 5, u'haproxy_version': self.HAPROXY_VERSION, u'hostname': u'FAKE_HOST', u'listeners': sorted([self.FAKE_LISTENER_ID_1, self.FAKE_LISTENER_ID_2]), u'load': [u'0.09', u'0.11', u'0.10'], u'memory': {u'buffers': 344792, u'cached': 4271856, u'free': 12685624, u'shared': 9520, u'slab': 534384, u'swap_used': 0, u'total': 21692784}, u'networks': {u'eth1': {u'network_rx': 996, u'network_tx': 418}, u'eth2': {u'network_rx': 848, u'network_tx': 578}}, u'packages': {}, u'topology': u'SINGLE', u'topology_status': u'OK'} actual = self.amp_info.compile_amphora_details() self.assertEqual(expected_dict, actual.json) api_server.VERSION = original_version @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_udp_listeners', return_value=[FAKE_LISTENER_ID_3, FAKE_LISTENER_ID_4]) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_loadbalancers') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_meminfo') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._cpu') @mock.patch('os.statvfs') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_networks') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._load') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_version_of_installed_package') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._count_haproxy_processes') @mock.patch('socket.gethostname', return_value='FAKE_HOST') def test_compile_amphora_details_for_udp(self, mhostname, m_count, m_pkg_version, m_load, m_get_nets, m_os, m_cpu, mget_mem, mock_get_lb, mget_udp_listener): mget_mem.return_value = {'SwapCached': 0, 'Buffers': 344792, 'MemTotal': 21692784, 'Cached': 4271856, 'Slab': 534384, 'MemFree': 12685624, 'Shmem': 9520} m_cpu.return_value = {'user': '252551', 'softirq': '8336', 'system': '52554', 'total': 7503411} m_pkg_version.side_effect = self._return_version mdisk_info = mock.MagicMock() m_os.return_value = mdisk_info mdisk_info.f_blocks = 34676992 mdisk_info.f_bfree = 28398016 mdisk_info.f_frsize = 4096 mdisk_info.f_bavail = 26630646 m_get_nets.return_value = {'eth1': {'network_rx': 996, 'network_tx': 418}, 'eth2': {'network_rx': 848, 'network_tx': 578}} m_load.return_value = ['0.09', '0.11', '0.10'] m_count.return_value = 5 self.udp_driver.get_subscribed_amp_compile_info.return_value = [ 'keepalived', 'ipvsadm'] self.udp_driver.is_listener_running.side_effect = [True, False] mock_get_lb.return_value = [self.LB_ID_1] original_version = api_server.VERSION api_server.VERSION = self.API_VERSION expected_dict = {u'active': True, u'api_version': self.API_VERSION, u'cpu': {u'soft_irq': u'8336', u'system': u'52554', u'total': 7503411, u'user': u'252551'}, u'disk': {u'available': 109079126016, u'used': 25718685696}, u'haproxy_count': 5, u'haproxy_version': self.HAPROXY_VERSION, u'keepalived_version': self.KEEPALIVED_VERSION, u'ipvsadm_version': self.IPVSADM_VERSION, u'udp_listener_process_count': 1, u'hostname': u'FAKE_HOST', u'listeners': sorted(list(set( [self.FAKE_LISTENER_ID_3, self.FAKE_LISTENER_ID_4, 'sample_listener_id_1']))), u'load': [u'0.09', u'0.11', u'0.10'], u'memory': {u'buffers': 344792, u'cached': 4271856, u'free': 12685624, u'shared': 9520, u'slab': 534384, u'swap_used': 0, u'total': 21692784}, u'networks': {u'eth1': {u'network_rx': 996, u'network_tx': 418}, u'eth2': {u'network_rx': 848, u'network_tx': 578}}, u'packages': {}, u'topology': u'SINGLE', u'topology_status': u'OK'} actual = self.amp_info.compile_amphora_details(self.udp_driver) self.assertEqual(expected_dict, actual.json) api_server.VERSION = original_version @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'is_lb_running') def test__count_haproxy_process(self, mock_is_running): # Test no listeners passed in result = self.amp_info._count_haproxy_processes([]) self.assertEqual(0, result) # Test with a listener specified mock_is_running.side_effect = [True, False] result = self.amp_info._count_haproxy_processes( [uuidutils.generate_uuid(), uuidutils.generate_uuid()]) self.assertEqual(1, result) def test__count_udp_listener_processes(self): self.udp_driver.is_listener_running.side_effect = [True, False, True] expected = 2 actual = self.amp_info._count_udp_listener_processes( self.udp_driver, [self.FAKE_LISTENER_ID_1, self.FAKE_LISTENER_ID_2, self.FAKE_LISTENER_ID_3]) self.assertEqual(expected, actual) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'amphora_info.AmphoraInfo._get_version_of_installed_package') def test__get_extend_body_from_udp_driver(self, m_get_version): self.udp_driver.get_subscribed_amp_compile_info.return_value = [ 'keepalived', 'ipvsadm'] m_get_version.side_effect = self._return_version expected = { "keepalived_version": self.KEEPALIVED_VERSION, "ipvsadm_version": self.IPVSADM_VERSION } actual = self.amp_info._get_extend_body_from_udp_driver( self.udp_driver) self.assertEqual(expected, actual) def test__get_meminfo(self): # Known data test meminfo = ('MemTotal: 21692784 kB\n' 'MemFree: 12685624 kB\n' 'MemAvailable: 17384072 kB\n' 'Buffers: 344792 kB\n' 'Cached: 4271856 kB\n' 'SwapCached: 0 kB\n' 'Active: 5808816 kB\n' 'Inactive: 2445236 kB\n' 'Active(anon): 3646184 kB\n' 'Inactive(anon): 8672 kB\n' 'Active(file): 2162632 kB\n' 'Inactive(file): 2436564 kB\n' 'Unevictable: 52664 kB\n' 'Mlocked: 52664 kB\n' 'SwapTotal: 20476924 kB\n' 'SwapFree: 20476924 kB\n' 'Dirty: 92 kB\n' 'Writeback: 0 kB\n' 'AnonPages: 3690088 kB\n' 'Mapped: 108520 kB\n' 'Shmem: 9520 kB\n' 'Slab: 534384 kB\n' 'SReclaimable: 458160 kB\n' 'SUnreclaim: 76224 kB\n' 'KernelStack: 11776 kB\n' 'PageTables: 33088 kB\n' 'NFS_Unstable: 0 kB\n' 'Bounce: 0 kB\n' 'WritebackTmp: 0 kB\n' 'CommitLimit: 31323316 kB\n' 'Committed_AS: 6930732 kB\n' 'VmallocTotal: 34359738367 kB\n' 'VmallocUsed: 0 kB\n' 'VmallocChunk: 0 kB\n' 'HardwareCorrupted: 0 kB\n' 'AnonHugePages: 1400832 kB\n' 'CmaTotal: 0 kB\n' 'CmaFree: 0 kB\n' 'HugePages_Total: 0\n' 'HugePages_Free: 0\n' 'HugePages_Rsvd: 0\n' 'HugePages_Surp: 0\n' 'Hugepagesize: 2048 kB\n' 'DirectMap4k: 130880 kB\n' 'DirectMap2M: 8376320 kB\n' 'DirectMap1G: 14680064 kB\n') self.useFixture(test_utils.OpenFixture('/proc/meminfo', contents=meminfo)) expected_result = {'SwapCached': 0, 'DirectMap2M': 8376320, 'CmaTotal': 0, 'Inactive': 2445236, 'KernelStack': 11776, 'SwapTotal': 20476924, 'VmallocUsed': 0, 'Buffers': 344792, 'MemTotal': 21692784, 'Mlocked': 52664, 'Cached': 4271856, 'AnonPages': 3690088, 'Unevictable': 52664, 'SUnreclaim': 76224, 'MemFree': 12685624, 'Writeback': 0, 'NFS_Unstable': 0, 'VmallocTotal': 34359738367, 'MemAvailable': 17384072, 'CmaFree': 0, 'SwapFree': 20476924, 'AnonHugePages': 1400832, 'DirectMap1G': 14680064, 'Hugepagesize': 2048, 'Dirty': 92, 'Bounce': 0, 'PageTables': 33088, 'SReclaimable': 458160, 'Active': 5808816, 'Mapped': 108520, 'Slab': 534384, 'Active(anon)': 3646184, 'VmallocChunk': 0, 'Inactive(file)': 2436564, 'WritebackTmp': 0, 'Shmem': 9520, 'Inactive(anon)': 8672, 'HardwareCorrupted': 0, 'Active(file)': 2162632, 'DirectMap4k': 130880, 'Committed_AS': 6930732, 'CommitLimit': 31323316} result = self.amp_info._get_meminfo() self.assertEqual(expected_result, result) def test__cpu(self): sample_stat = 'cpu 252551 802 52554 7181757 7411 0 8336 0 0 0' expected_result = {'user': '252551', 'iowait': '7411', 'nice': '802', 'softirq': '8336', 'idle': '7181757', 'system': '52554', 'total': 7503411, 'irq': '0'} self.useFixture(test_utils.OpenFixture('/proc/stat', contents=sample_stat)) result = self.amp_info._cpu() self.assertEqual(expected_result, result) def test__load(self): sample_loadavg = '0.09 0.11 0.10 2/630 15346' expected_result = ['0.09', '0.11', '0.10'] self.useFixture(test_utils.OpenFixture('/proc/loadavg', contents=sample_loadavg)) result = self.amp_info._load() self.assertEqual(expected_result, result) @mock.patch('pyroute2.NetNS', create=True) def test__get_networks(self, mock_netns): # The output of get_links is huge, just pulling out the parts we # care about for this test. sample_get_links_minimal = [ {'attrs': [('IFLA_IFNAME', 'lo')]}, {'attrs': [('IFLA_IFNAME', 'eth1'), ('IFLA_STATS64', {'tx_bytes': 418, 'rx_bytes': 996})]}, {'attrs': [('IFLA_IFNAME', 'eth2'), ('IFLA_STATS64', {'tx_bytes': 578, 'rx_bytes': 848})]}, {'attrs': [('IFLA_IFNAME', 'eth3')]}] netns_handle = mock_netns.return_value.__enter__.return_value netns_handle.get_links.return_value = sample_get_links_minimal expected_result = {'eth1': {'network_rx': 996, 'network_tx': 418}, 'eth2': {'network_rx': 848, 'network_tx': 578}} result = self.amp_info._get_networks() self.assertEqual(expected_result, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_haproxy_compatibility.py0000664000175000017500000001356400000000000033611 0ustar00zuulzuul00000000000000# Copyright 2017 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.amphorae.backends.agent.api_server import haproxy_compatibility from octavia.common import constants import octavia.tests.unit.base as base from octavia.tests.unit.common.sample_configs import sample_configs_combined class HAProxyCompatTestCase(base.TestCase): def setUp(self): super(HAProxyCompatTestCase, self).setUp() self.old_haproxy_global = ( "# Configuration for loadbalancer sample_loadbalancer_id_1\n" "global\n" " daemon\n" " user nobody\n" " log /run/rsyslog/octavia/log local0\n" " log /run/rsyslog/octavia/log local1 notice\n" " stats socket /var/lib/octavia/sample_loadbalancer_id_1.sock" " mode 0666 level user\n" " maxconn {maxconn}\n\n" "defaults\n" " log global\n" " retries 3\n" " option redispatch\n" " option splice-request\n" " option splice-response\n" " option http-keep-alive\n\n\n\n" "frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n" " log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ %{{+Q}}r\\ %ST\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{{+Q}}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) self.backend_without_external = ( "backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) self.backend_with_external = ( "backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus 418\n" " option external-check\n" " external-check command /var/lib/octavia/ping-wrapper.sh\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) @mock.patch('subprocess.check_output') def test_get_haproxy_versions(self, mock_process): mock_process.return_value = ( b"THIS-App version 1.6.3 2099/10/12\n" b"Some other data here \n") major, minor = haproxy_compatibility.get_haproxy_versions() self.assertEqual(1, major) self.assertEqual(6, minor) @mock.patch('subprocess.check_output') def test_get_haproxy_versions_devel(self, mock_process): mock_process.return_value = ( b"HA-Proxy version 2.3-dev0 2019/11/25 - https://haproxy.org/\n" b"Some other data here \n") major, minor = haproxy_compatibility.get_haproxy_versions() self.assertEqual(2, major) self.assertEqual(3, minor) @mock.patch('octavia.amphorae.backends.agent.api_server.' 'haproxy_compatibility.get_haproxy_versions') def test_process_cfg_for_version_compat(self, mock_get_version): # Test 1.6 version path, no change to config expected mock_get_version.return_value = [1, 6] test_config = sample_configs_combined.sample_base_expected_config( backend=self.backend_with_external) result_config = haproxy_compatibility.process_cfg_for_version_compat( test_config) self.assertEqual(test_config, result_config) # Test 1.5 version path, external-check should be removed mock_get_version.return_value = [1, 5] test_config = sample_configs_combined.sample_base_expected_config( backend=self.backend_with_external) result_config = haproxy_compatibility.process_cfg_for_version_compat( test_config) expected_config = (self.old_haproxy_global + self.backend_without_external) self.assertEqual(expected_config, result_config) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalived.py0000664000175000017500000000366400000000000031277 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subprocess from unittest import mock import flask from octavia.amphorae.backends.agent.api_server import keepalived import octavia.tests.unit.base as base class KeepalivedTestCase(base.TestCase): def setUp(self): super(KeepalivedTestCase, self).setUp() self.app = flask.Flask(__name__) self.client = self.app.test_client() self._ctx = self.app.test_request_context() self._ctx.push() self.test_keepalived = keepalived.Keepalived() @mock.patch('subprocess.check_output') def test_manager_keepalived_service(self, mock_check_output): res = self.test_keepalived.manager_keepalived_service('start') cmd = ("/usr/sbin/service octavia-keepalived {action}".format( action='start')) mock_check_output.assert_called_once_with(cmd.split(), stderr=subprocess.STDOUT) self.assertEqual(202, res.status_code) res = self.test_keepalived.manager_keepalived_service('restart') self.assertEqual(400, res.status_code) mock_check_output.side_effect = subprocess.CalledProcessError(1, 'blah!') res = self.test_keepalived.manager_keepalived_service('start') self.assertEqual(500, res.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalivedlvs.py0000664000175000017500000000455600000000000032025 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from octavia.amphorae.backends.agent.api_server import keepalivedlvs from octavia.amphorae.backends.agent.api_server import util from octavia.tests.unit import base class KeepalivedLvsTestCase(base.TestCase): FAKE_ID = uuidutils.generate_uuid() def setUp(self): super(KeepalivedLvsTestCase, self).setUp() self.test_keepalivedlvs = keepalivedlvs.KeepalivedLvs() @mock.patch.object(keepalivedlvs, "webob") @mock.patch('os.path.exists') def test_delete_udp_listener_not_exist(self, m_exist, m_webob): m_exist.return_value = False self.test_keepalivedlvs.delete_udp_listener(self.FAKE_ID) calls = [ mock.call( json=dict(message='UDP Listener Not Found', details="No UDP listener with UUID: " "{0}".format(self.FAKE_ID)), status=404), mock.call(json={'message': 'OK'}) ] m_webob.Response.assert_has_calls(calls) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_os_init_system') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_keepalivedlvs_pid') @mock.patch('subprocess.check_output') @mock.patch('os.remove') @mock.patch('os.path.exists') def test_delete_udp_listener_unsupported_sysinit(self, m_exist, m_remove, m_check_output, mget_pid, m_init_sys): m_exist.return_value = True mget_pid.return_value = '0' self.assertRaises( util.UnknownInitError, self.test_keepalivedlvs.delete_udp_listener, self.FAKE_ID) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py0000664000175000017500000002615000000000000031570 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subprocess from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.amphorae.backends.agent.api_server import loadbalancer from octavia.amphorae.backends.agent.api_server import util as agent_util from octavia.common import constants as consts from octavia.tests.common import utils as test_utils import octavia.tests.unit.base as base CONF = cfg.CONF LISTENER_ID1 = uuidutils.generate_uuid() LB_ID1 = uuidutils.generate_uuid() class ListenerTestCase(base.TestCase): def setUp(self): super(ListenerTestCase, self).setUp() self.mock_platform = mock.patch("distro.id").start() self.mock_platform.return_value = "ubuntu" self.test_loadbalancer = loadbalancer.Loadbalancer() @mock.patch('os.path.exists') @mock.patch('octavia.amphorae.backends.agent.api_server' + '.util.get_haproxy_pid') def test_check_haproxy_status(self, mock_pid, mock_exists): mock_pid.return_value = '1245' mock_exists.side_effect = [True, True] self.assertEqual( consts.ACTIVE, self.test_loadbalancer._check_haproxy_status(LISTENER_ID1)) mock_exists.side_effect = [True, False] self.assertEqual( consts.OFFLINE, self.test_loadbalancer._check_haproxy_status(LISTENER_ID1)) mock_exists.side_effect = [False] self.assertEqual( consts.OFFLINE, self.test_loadbalancer._check_haproxy_status(LISTENER_ID1)) @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' 'Loadbalancer._check_haproxy_status') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'vrrp_check_script_update') @mock.patch('os.path.exists') @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' 'Loadbalancer._check_lb_exists') @mock.patch('subprocess.check_output') def test_start_stop_lb(self, mock_check_output, mock_lb_exists, mock_path_exists, mock_vrrp_update, mock_check_status): listener_id = uuidutils.generate_uuid() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) mock_path_exists.side_effect = [False, True, True, False, False] mock_check_status.side_effect = ['bogus', consts.OFFLINE] # Happy path - No VRRP ref_command_split = ['/usr/sbin/service'] ref_command_split.append('haproxy-{}'.format(listener_id)) ref_command_split.append(consts.AMP_ACTION_START) result = self.test_loadbalancer.start_stop_lb( listener_id, consts.AMP_ACTION_START) mock_check_output.assert_called_once_with(ref_command_split, stderr=subprocess.STDOUT) mock_lb_exists.assert_called_once_with(listener_id) mock_vrrp_update.assert_not_called() self.assertEqual(202, result.status_code) self.assertEqual('OK', result.json['message']) ref_details = ('Configuration file is valid\n' 'haproxy daemon for {0} started'.format(listener_id)) self.assertEqual(ref_details, result.json['details']) # Happy path - VRRP - RELOAD conf.config(group="controller_worker", loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY) mock_lb_exists.reset_mock() mock_vrrp_update.reset_mock() mock_check_output.reset_mock() ref_command_split = ['/usr/sbin/service'] ref_command_split.append('haproxy-{}'.format(listener_id)) ref_command_split.append(consts.AMP_ACTION_RELOAD) result = self.test_loadbalancer.start_stop_lb( listener_id, consts.AMP_ACTION_RELOAD) mock_check_output.assert_called_once_with(ref_command_split, stderr=subprocess.STDOUT) mock_lb_exists.assert_called_once_with(listener_id) mock_vrrp_update.assert_called_once_with(listener_id, consts.AMP_ACTION_RELOAD) self.assertEqual(202, result.status_code) self.assertEqual('OK', result.json['message']) ref_details = ('Listener {0} {1}ed'.format(listener_id, consts.AMP_ACTION_RELOAD)) self.assertEqual(ref_details, result.json['details']) # Happy path - VRRP - RELOAD - OFFLINE mock_lb_exists.reset_mock() mock_vrrp_update.reset_mock() mock_check_output.reset_mock() ref_command_split = ['/usr/sbin/service'] ref_command_split.append('haproxy-{}'.format(listener_id)) ref_command_split.append(consts.AMP_ACTION_START) result = self.test_loadbalancer.start_stop_lb( listener_id, consts.AMP_ACTION_RELOAD) mock_check_output.assert_called_once_with(ref_command_split, stderr=subprocess.STDOUT) mock_lb_exists.assert_called_once_with(listener_id) mock_vrrp_update.assert_called_once_with(listener_id, consts.AMP_ACTION_RELOAD) self.assertEqual(202, result.status_code) self.assertEqual('OK', result.json['message']) ref_details = ('Configuration file is valid\n' 'haproxy daemon for {0} started'.format(listener_id)) self.assertEqual(ref_details, result.json['details']) # Unhappy path - Not already running conf.config(group="controller_worker", loadbalancer_topology=consts.TOPOLOGY_SINGLE) mock_lb_exists.reset_mock() mock_vrrp_update.reset_mock() mock_check_output.reset_mock() ref_command_split = ['/usr/sbin/service'] ref_command_split.append('haproxy-{}'.format(listener_id)) ref_command_split.append(consts.AMP_ACTION_START) mock_check_output.side_effect = subprocess.CalledProcessError( output=b'bogus', returncode=-2, cmd='sit') result = self.test_loadbalancer.start_stop_lb( listener_id, consts.AMP_ACTION_START) mock_check_output.assert_called_once_with(ref_command_split, stderr=subprocess.STDOUT) mock_lb_exists.assert_called_once_with(listener_id) mock_vrrp_update.assert_not_called() self.assertEqual(500, result.status_code) self.assertEqual('Error {}ing haproxy'.format(consts.AMP_ACTION_START), result.json['message']) self.assertEqual('bogus', result.json['details']) # Unhappy path - Already running mock_lb_exists.reset_mock() mock_vrrp_update.reset_mock() mock_check_output.reset_mock() ref_command_split = ['/usr/sbin/service'] ref_command_split.append('haproxy-{}'.format(listener_id)) ref_command_split.append(consts.AMP_ACTION_START) mock_check_output.side_effect = subprocess.CalledProcessError( output=b'Job is already running', returncode=-2, cmd='sit') result = self.test_loadbalancer.start_stop_lb( listener_id, consts.AMP_ACTION_START) mock_check_output.assert_called_once_with(ref_command_split, stderr=subprocess.STDOUT) mock_lb_exists.assert_called_once_with(listener_id) mock_vrrp_update.assert_not_called() self.assertEqual(202, result.status_code) self.assertEqual('OK', result.json['message']) ref_details = ('Configuration file is valid\n' 'haproxy daemon for {0} started'.format(listener_id)) self.assertEqual(ref_details, result.json['details']) # Invalid action mock_check_output.reset_mock() mock_lb_exists.reset_mock() mock_path_exists.reset_mock() mock_vrrp_update.reset_mock() result = self.test_loadbalancer.start_stop_lb(listener_id, 'bogus') self.assertEqual(400, result.status_code) self.assertEqual('Invalid Request', result.json['message']) self.assertEqual('Unknown action: bogus', result.json['details']) mock_lb_exists.assert_not_called() mock_path_exists.assert_not_called() mock_vrrp_update.assert_not_called() mock_check_output.assert_not_called() @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'config_path') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_haproxy_pid') @mock.patch('os.path.exists') def test_get_listeners_on_lb(self, mock_exists, mock_get_haproxy_pid, mock_config_path): fake_cfg_path = '/some/fake/cfg/file.cfg' mock_config_path.return_value = fake_cfg_path mock_get_haproxy_pid.return_value = 'fake_pid' # Finds two listeners mock_exists.side_effect = [True, True] fake_cfg_data = 'frontend list1\nbackend foo\nfrontend list2' self.useFixture( test_utils.OpenFixture(fake_cfg_path, fake_cfg_data)).mock_open result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) self.assertEqual(['list1', 'list2'], result) mock_exists.assert_has_calls([mock.call(agent_util.pid_path(LB_ID1)), mock.call('/proc/fake_pid')]) # No PID file, no listeners mock_exists.reset_mock() mock_exists.side_effect = [False] result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) self.assertEqual([], result) mock_exists.assert_called_once_with(agent_util.pid_path(LB_ID1)) # PID file, no running process, no listeners mock_exists.reset_mock() mock_exists.side_effect = [True, False] result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) self.assertEqual([], result) mock_exists.assert_has_calls([mock.call(agent_util.pid_path(LB_ID1)), mock.call('/proc/fake_pid')]) # PID file, running process, no listeners mock_exists.reset_mock() mock_exists.side_effect = [True, True] fake_cfg_data = 'backend only' self.useFixture( test_utils.OpenFixture(fake_cfg_path, fake_cfg_data)).mock_open result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) self.assertEqual([], result) mock_exists.assert_has_calls([mock.call(agent_util.pid_path(LB_ID1)), mock.call('/proc/fake_pid')]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py0000664000175000017500000005351700000000000030672 0ustar00zuulzuul00000000000000# Copyright 2017 Redhat. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import os import shutil from unittest import mock from oslo_config import fixture as oslo_fixture from octavia.amphorae.backends.agent.api_server import osutils from octavia.common import config from octavia.common import constants as consts from octavia.common import exceptions as octavia_exceptions from octavia.common import utils from octavia.tests.common import utils as test_utils from octavia.tests.unit import base class TestOSUtils(base.TestCase): def setUp(self): super(TestOSUtils, self).setUp() self.base_os_util = osutils.BaseOS('unknown') with mock.patch('distro.id', return_value='ubuntu'): self.ubuntu_os_util = osutils.BaseOS.get_os_util() with mock.patch('distro.id', return_value='rhel'): self.rh_os_util = osutils.BaseOS.get_os_util() with mock.patch('distro.id', return_value='centos'): with mock.patch('distro.version', return_value='8'): self.centos_os_util = osutils.BaseOS.get_os_util() with mock.patch('distro.id', return_value='centos'): with mock.patch('distro.version', return_value='7'): self.centos7_os_util = osutils.BaseOS.get_os_util() def test_get_os_util(self): with mock.patch('distro.id', return_value='ubuntu'): returned_cls = osutils.BaseOS.get_os_util() self.assertIsInstance(returned_cls, osutils.Ubuntu) with mock.patch('distro.id', return_value='fedora'): returned_cls = osutils.BaseOS.get_os_util() self.assertIsInstance(returned_cls, osutils.RH) with mock.patch('distro.id', return_value='rhel'): returned_cls = osutils.BaseOS.get_os_util() self.assertIsInstance(returned_cls, osutils.RH) with mock.patch('distro.id', return_value='centos'): returned_cls = osutils.BaseOS.get_os_util() self.assertIsInstance(returned_cls, osutils.CentOS) with mock.patch('distro.id', return_value='FakeOS'): self.assertRaises( octavia_exceptions.InvalidAmphoraOperatingSystem, osutils.BaseOS.get_os_util) def test_get_network_interface_file(self): conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) fake_agent_server_network_dir = "/path/to/interface" fake_agent_server_network_file = "/path/to/interfaces_file" base_fake_nic_path = os.path.join(fake_agent_server_network_dir, consts.NETNS_PRIMARY_INTERFACE) base_real_nic_path = os.path.join( consts.UBUNTU_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE), consts.NETNS_PRIMARY_INTERFACE) rh_interface_name = 'ifcfg-{nic}'.format( nic=consts.NETNS_PRIMARY_INTERFACE) rh_fake_nic_path = os.path.join(fake_agent_server_network_dir, rh_interface_name) rh_real_nic_path = os.path.join( consts.RH_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE), rh_interface_name) ubuntu_interface_name = '{nic}.cfg'.format( nic=consts.NETNS_PRIMARY_INTERFACE) ubuntu_fake_nic_path = os.path.join(fake_agent_server_network_dir, ubuntu_interface_name) ubuntu_real_nic_path = os.path.join( consts.UBUNTU_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE), ubuntu_interface_name) # Check that agent_server_network_file is returned, when provided conf.config(group="amphora_agent", agent_server_network_file=fake_agent_server_network_file) base_interface_file = ( self.base_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(fake_agent_server_network_file, base_interface_file) rh_interface_file = ( self.rh_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(fake_agent_server_network_file, rh_interface_file) ubuntu_interface_file = ( self.ubuntu_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(fake_agent_server_network_file, ubuntu_interface_file) # Check that agent_server_network_dir is used, when provided conf.config(group="amphora_agent", agent_server_network_file=None) conf.config(group="amphora_agent", agent_server_network_dir=fake_agent_server_network_dir) base_interface_file = ( self.base_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(base_fake_nic_path, base_interface_file) rh_interface_file = ( self.rh_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(rh_fake_nic_path, rh_interface_file) ubuntu_interface_file = ( self.ubuntu_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(ubuntu_fake_nic_path, ubuntu_interface_file) # Check When neither agent_server_network_dir or # agent_server_network_file where provided. conf.config(group="amphora_agent", agent_server_network_file=None) conf.config(group="amphora_agent", agent_server_network_dir=None) base_interface_file = ( self.base_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(base_real_nic_path, base_interface_file) rh_interface_file = ( self.rh_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(rh_real_nic_path, rh_interface_file) ubuntu_interface_file = ( self.ubuntu_os_util. get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(ubuntu_real_nic_path, ubuntu_interface_file) def _test_RH_get_static_routes_interface_file(self, version): conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) fake_agent_server_network_dir = "/path/to/interface" fake_agent_server_network_file = "/path/to/interfaces_file" route = 'route6' if version == 6 else 'route' rh_route_name = '{route}-{nic}'.format( route=route, nic=consts.NETNS_PRIMARY_INTERFACE) rh_fake_route_path = os.path.join(fake_agent_server_network_dir, rh_route_name) rh_real_route_path = os.path.join( consts.RH_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE), rh_route_name) # Check that agent_server_network_file is returned, when provided conf.config(group="amphora_agent", agent_server_network_file=fake_agent_server_network_file) rh_route_file = ( self.rh_os_util. get_static_routes_interface_file(consts.NETNS_PRIMARY_INTERFACE, version)) self.assertEqual(fake_agent_server_network_file, rh_route_file) # Check that agent_server_network_dir is used, when provided conf.config(group="amphora_agent", agent_server_network_file=None) conf.config(group="amphora_agent", agent_server_network_dir=fake_agent_server_network_dir) rh_route_file = ( self.rh_os_util. get_static_routes_interface_file(consts.NETNS_PRIMARY_INTERFACE, version)) self.assertEqual(rh_fake_route_path, rh_route_file) # Check When neither agent_server_network_dir or # agent_server_network_file where provided. conf.config(group="amphora_agent", agent_server_network_file=None) conf.config(group="amphora_agent", agent_server_network_dir=None) rh_route_file = ( self.rh_os_util. get_static_routes_interface_file(consts.NETNS_PRIMARY_INTERFACE, version)) self.assertEqual(rh_real_route_path, rh_route_file) def test_RH_get_static_routes_interface_file(self): self._test_RH_get_static_routes_interface_file(4) def test_RH_get_static_routes_interface_file_ipv6(self): self._test_RH_get_static_routes_interface_file(6) def _test_RH_get_route_rules_interface_file(self, version): conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) fake_agent_server_network_dir = "/path/to/interface" fake_agent_server_network_file = "/path/to/interfaces_file" rule = 'rule6' if version == 6 else 'rule' rh_route_rules_name = '{rule}-{nic}'.format( rule=rule, nic=consts.NETNS_PRIMARY_INTERFACE) rh_fake_route_rules_path = os.path.join(fake_agent_server_network_dir, rh_route_rules_name) rh_real_route_rules_path = os.path.join( consts.RH_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE), rh_route_rules_name) # Check that agent_server_network_file is returned, when provided conf.config(group="amphora_agent", agent_server_network_file=fake_agent_server_network_file) rh_route_rules_file = ( self.rh_os_util. get_route_rules_interface_file(consts.NETNS_PRIMARY_INTERFACE, version)) self.assertEqual(fake_agent_server_network_file, rh_route_rules_file) # Check that agent_server_network_dir is used, when provided conf.config(group="amphora_agent", agent_server_network_file=None) conf.config(group="amphora_agent", agent_server_network_dir=fake_agent_server_network_dir) rh_route_rules_file = ( self.rh_os_util. get_route_rules_interface_file(consts.NETNS_PRIMARY_INTERFACE, version)) self.assertEqual(rh_fake_route_rules_path, rh_route_rules_file) # Check When neither agent_server_network_dir or # agent_server_network_file where provided. conf.config(group="amphora_agent", agent_server_network_file=None) conf.config(group="amphora_agent", agent_server_network_dir=None) rh_route_rules_file = ( self.rh_os_util. get_route_rules_interface_file(consts.NETNS_PRIMARY_INTERFACE, version)) self.assertEqual(rh_real_route_rules_path, rh_route_rules_file) def test_RH_get_route_rules_interface_file(self): self._test_RH_get_route_rules_interface_file(4) def test_RH_get_route_rules_interface_file_ipv6(self): self._test_RH_get_route_rules_interface_file(6) def test_cmd_get_version_of_installed_package(self): package_name = 'foo' ubuntu_cmd = "dpkg-query -W -f=${{Version}} {name}".format( name=package_name) rh_cmd = "rpm -q --queryformat %{{VERSION}} {name}".format( name=package_name) returned_ubuntu_cmd = ( self.ubuntu_os_util.cmd_get_version_of_installed_package( package_name)) self.assertEqual(ubuntu_cmd, returned_ubuntu_cmd) returned_rh_cmd = (self.rh_os_util. cmd_get_version_of_installed_package(package_name)) self.assertEqual(rh_cmd, returned_rh_cmd) def test_cmd_get_version_of_installed_package_mapped(self): package_name = 'haproxy' centos7_cmd = "rpm -q --queryformat %{VERSION} haproxy18" returned_centos7_cmd = ( self.centos7_os_util.cmd_get_version_of_installed_package( package_name)) self.assertEqual(centos7_cmd, returned_centos7_cmd) centos_cmd = "rpm -q --queryformat %{VERSION} haproxy" returned_centos_cmd = ( self.centos_os_util.cmd_get_version_of_installed_package( package_name)) self.assertEqual(centos_cmd, returned_centos_cmd) def test_has_ifup_all(self): self.assertTrue(self.base_os_util.has_ifup_all()) self.assertTrue(self.ubuntu_os_util.has_ifup_all()) self.assertFalse(self.rh_os_util.has_ifup_all()) def test_write_vip_interface_file(self): netns_interface = u'eth1234' FIXED_IP = u'192.0.2.2' SUBNET_CIDR = u'192.0.2.0/24' GATEWAY = u'192.51.100.1' DEST1 = u'198.51.100.0/24' DEST2 = u'203.0.113.0/24' NEXTHOP = u'192.0.2.1' MTU = 1450 FIXED_IP_IPV6 = u'2001:0db8:0000:0000:0000:0000:0000:0001' # Subnet prefix is purposefully not 32, because that coincidentally # matches the result of any arbitrary IPv4->prefixlen conversion SUBNET_CIDR_IPV6 = u'2001:db8::/70' ip = ipaddress.ip_address(FIXED_IP) network = ipaddress.ip_network(SUBNET_CIDR) broadcast = network.broadcast_address.exploded netmask = network.netmask.exploded netmask_prefix = utils.netmask_to_prefix(netmask) ipv6 = ipaddress.ip_address(FIXED_IP_IPV6) networkv6 = ipaddress.ip_network(SUBNET_CIDR_IPV6) broadcastv6 = networkv6.broadcast_address.exploded netmaskv6 = networkv6.prefixlen host_routes = [ {'gw': NEXTHOP, 'network': ipaddress.ip_network(DEST1)}, {'gw': NEXTHOP, 'network': ipaddress.ip_network(DEST2)} ] path = self.ubuntu_os_util.get_network_interface_file(netns_interface) mock_open = self.useFixture(test_utils.OpenFixture(path)).mock_open mock_template = mock.MagicMock() # Test an IPv4 VIP with mock.patch('os.open'), mock.patch.object( os, 'fdopen', mock_open): self.ubuntu_os_util.write_vip_interface_file( interface_file_path=path, primary_interface=netns_interface, vip=FIXED_IP, ip=ip, broadcast=broadcast, netmask=netmask, gateway=GATEWAY, mtu=MTU, vrrp_ip=None, vrrp_version=None, render_host_routes=host_routes, template_vip=mock_template) mock_template.render.assert_called_once_with( consts=consts, interface=netns_interface, vip=FIXED_IP, vip_ipv6=False, prefix=netmask_prefix, broadcast=broadcast, netmask=netmask, gateway=GATEWAY, network=SUBNET_CIDR, mtu=MTU, vrrp_ip=None, vrrp_ipv6=False, host_routes=host_routes, topology="SINGLE", ) # Now test with an IPv6 VIP mock_template.reset_mock() with mock.patch('os.open'), mock.patch.object( os, 'fdopen', mock_open): self.ubuntu_os_util.write_vip_interface_file( interface_file_path=path, primary_interface=netns_interface, vip=FIXED_IP_IPV6, ip=ipv6, broadcast=broadcastv6, netmask=netmaskv6, gateway=GATEWAY, mtu=MTU, vrrp_ip=None, vrrp_version=None, render_host_routes=host_routes, template_vip=mock_template) mock_template.render.assert_called_once_with( consts=consts, interface=netns_interface, vip=FIXED_IP_IPV6, vip_ipv6=True, prefix=netmaskv6, broadcast=broadcastv6, netmask=netmaskv6, gateway=GATEWAY, network=SUBNET_CIDR_IPV6, mtu=MTU, vrrp_ip=None, vrrp_ipv6=False, host_routes=host_routes, topology="SINGLE", ) def test_write_port_interface_file(self): FIXED_IP = u'192.0.2.2' NEXTHOP = u'192.0.2.1' DEST = u'198.51.100.0/24' host_routes = [ {'nexthop': NEXTHOP, 'destination': ipaddress.ip_network(DEST)} ] FIXED_IP_IPV6 = u'2001:db8::2' NEXTHOP_IPV6 = u'2001:db8::1' DEST_IPV6 = u'2001:db8:51:100::/64' host_routes_ipv6 = [ {'nexthop': NEXTHOP_IPV6, 'destination': ipaddress.ip_network(DEST_IPV6)} ] ip_addr = {'ip_address': FIXED_IP, 'host_routes': host_routes} ipv6_addr = {'ip_address': FIXED_IP_IPV6, 'host_routes': host_routes_ipv6} netns_interface = 'eth1234' MTU = 1450 fixed_ips = [ip_addr, ipv6_addr] path = 'mypath' mock_template = mock.MagicMock() mock_open = self.useFixture(test_utils.OpenFixture(path)).mock_open mock_gen_text = mock.MagicMock() mock_local_scripts = mock.MagicMock() mock_wr_fi = mock.MagicMock() with mock.patch('os.open'), mock.patch.object( os, 'fdopen', mock_open), mock.patch.object( osutils.BaseOS, '_generate_network_file_text', mock_gen_text): self.base_os_util.write_port_interface_file( netns_interface=netns_interface, fixed_ips=fixed_ips, mtu=MTU, interface_file_path=path, template_port=mock_template) mock_gen_text.assert_called_once_with( netns_interface, fixed_ips, MTU, mock_template) mock_gen_text.reset_mock() with mock.patch('os.open'), mock.patch.object( os, 'fdopen', mock_open), mock.patch.object( osutils.BaseOS, '_generate_network_file_text', mock_gen_text), mock.patch.object( osutils.RH, '_write_ifup_ifdown_local_scripts_if_possible', mock_local_scripts), mock.patch.object( osutils.RH, 'write_static_routes_interface_file', mock_wr_fi): self.rh_os_util.write_port_interface_file( netns_interface=netns_interface, fixed_ips=fixed_ips, mtu=MTU, interface_file_path=path, template_port=mock_template) rh_route_name = 'route-{nic}'.format(nic=netns_interface) rh_real_route_path = os.path.join( consts.RH_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE), rh_route_name) rh_route_name_ipv6 = 'route6-{nic}'.format(nic=netns_interface) rh_real_route_path_ipv6 = os.path.join( consts.RH_AMP_NET_DIR_TEMPLATE.format( netns=consts.AMPHORA_NAMESPACE), rh_route_name_ipv6) exp_routes = [ {'network': ipaddress.ip_network(DEST), 'gw': NEXTHOP} ] exp_routes_ipv6 = [ {'network': ipaddress.ip_network(DEST_IPV6), 'gw': NEXTHOP_IPV6} ] expected_calls = [ mock.call(rh_real_route_path, netns_interface, exp_routes, mock.ANY, None, None, None), mock.call(rh_real_route_path_ipv6, netns_interface, exp_routes_ipv6, mock.ANY, None, None, None)] mock_gen_text.assert_called_once_with( netns_interface, fixed_ips, MTU, mock_template) self.assertEqual(2, mock_wr_fi.call_count) mock_wr_fi.assert_has_calls(expected_calls) mock_local_scripts.assert_called_once() @mock.patch('shutil.copy2') @mock.patch('os.makedirs') @mock.patch('shutil.copytree') def test_create_netns_dir(self, mock_copytree, mock_makedirs, mock_copy2): network_dir = 'foo' netns_network_dir = 'fake_netns_network' ignore = shutil.ignore_patterns('fake_eth*', 'fake_loopback*') self.rh_os_util.create_netns_dir(network_dir, netns_network_dir, ignore) mock_copytree.assert_any_call( network_dir, os.path.join('/etc/netns/', consts.AMPHORA_NAMESPACE, netns_network_dir), ignore=ignore, symlinks=True) mock_makedirs.assert_any_call(os.path.join('/etc/netns/', consts.AMPHORA_NAMESPACE)) mock_copy2.assert_any_call( '/etc/sysconfig/network', '/etc/netns/{netns}/sysconfig'.format( netns=consts.AMPHORA_NAMESPACE)) mock_copytree.reset_mock() mock_makedirs.reset_mock() mock_copy2.reset_mock() self.ubuntu_os_util.create_netns_dir(network_dir, netns_network_dir, ignore) mock_copytree.assert_any_call( network_dir, os.path.join('/etc/netns/', consts.AMPHORA_NAMESPACE, netns_network_dir), ignore=ignore, symlinks=True) mock_makedirs.assert_any_call(os.path.join('/etc/netns/', consts.AMPHORA_NAMESPACE)) mock_copy2.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py0000664000175000017500000003710300000000000030130 0ustar00zuulzuul00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from werkzeug import exceptions as wz_exceptions from octavia.amphorae.backends.agent.api_server import osutils from octavia.amphorae.backends.agent.api_server import plug from octavia.common import constants import octavia.tests.unit.base as base FAKE_CIDR_IPV4 = '10.0.0.0/24' FAKE_GATEWAY_IPV4 = '10.0.0.1' FAKE_IP_IPV4 = '10.0.0.2' FAKE_CIDR_IPV6 = '2001:db8::/32' FAKE_GATEWAY_IPV6 = '2001:db8::1' FAKE_IP_IPV6 = '2001:db8::2' FAKE_IP_IPV6_EXPANDED = '2001:0db8:0000:0000:0000:0000:0000:0002' FAKE_MAC_ADDRESS = 'ab:cd:ef:00:ff:22' FAKE_INTERFACE = 'eth33' class TestPlug(base.TestCase): def setUp(self): super(TestPlug, self).setUp() self.mock_platform = mock.patch("distro.id").start() self.mock_platform.return_value = "ubuntu" self.osutil = osutils.BaseOS.get_os_util() self.test_plug = plug.Plug(self.osutil) self.addCleanup(self.mock_platform.stop) @mock.patch('pyroute2.IPRoute', create=True) def test__interface_by_mac_case_insensitive_ubuntu(self, mock_ipr): mock_ipr_instance = mock.MagicMock() mock_ipr_instance.link_lookup.return_value = [33] mock_ipr_instance.get_links.return_value = ({ 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) mock_ipr().__enter__.return_value = mock_ipr_instance interface = self.test_plug._interface_by_mac(FAKE_MAC_ADDRESS.upper()) self.assertEqual(FAKE_INTERFACE, interface) mock_ipr_instance.get_links.assert_called_once_with(33) @mock.patch('pyroute2.IPRoute', create=True) def test__interface_by_mac_not_found(self, mock_ipr): mock_ipr_instance = mock.MagicMock() mock_ipr_instance.link_lookup.return_value = [] mock_ipr().__enter__.return_value = mock_ipr_instance fd_mock = mock.mock_open() open_mock = mock.Mock() isfile_mock = mock.Mock() with mock.patch('os.open', open_mock), mock.patch.object( os, 'fdopen', fd_mock), mock.patch.object( os.path, 'isfile', isfile_mock): self.assertRaises(wz_exceptions.HTTPException, self.test_plug._interface_by_mac, FAKE_MAC_ADDRESS.upper()) open_mock.assert_called_once_with('/sys/bus/pci/rescan', os.O_WRONLY) fd_mock().write.assert_called_once_with('1') @mock.patch('pyroute2.IPRoute', create=True) def test__interface_by_mac_case_insensitive_rh(self, mock_ipr): mock_ipr_instance = mock.MagicMock() mock_ipr_instance.link_lookup.return_value = [33] mock_ipr_instance.get_links.return_value = ({ 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) mock_ipr().__enter__.return_value = mock_ipr_instance with mock.patch('distro.id', return_value='centos'): osutil = osutils.BaseOS.get_os_util() self.test_plug = plug.Plug(osutil) interface = self.test_plug._interface_by_mac( FAKE_MAC_ADDRESS.upper()) self.assertEqual(FAKE_INTERFACE, interface) mock_ipr_instance.get_links.assert_called_once_with(33) @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' '_interface_by_mac', return_value=FAKE_INTERFACE) @mock.patch('pyroute2.NSPopen', create=True) @mock.patch.object(plug, "webob") @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.netns.create', create=True) @mock.patch('pyroute2.NetNS', create=True) @mock.patch('subprocess.check_output') @mock.patch('shutil.copytree') @mock.patch('os.makedirs') def test_plug_vip_ipv4(self, mock_makedirs, mock_copytree, mock_check_output, mock_netns, mock_netns_create, mock_pyroute2, mock_webob, mock_nspopen, mock_by_mac): m = mock.mock_open() with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): self.test_plug.plug_vip( vip=FAKE_IP_IPV4, subnet_cidr=FAKE_CIDR_IPV4, gateway=FAKE_GATEWAY_IPV4, mac_address=FAKE_MAC_ADDRESS ) mock_webob.Response.assert_any_call(json={ 'message': 'OK', 'details': 'VIP {vip} plugged on interface {interface}'.format( vip=FAKE_IP_IPV4, interface='eth1') }, status=202) calls = [mock.call('amphora-haproxy', ['/sbin/sysctl', '--system'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['modprobe', 'ip_vs'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv4.ip_forward=1'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv4.vs.conntrack=1'], stdout=subprocess.PIPE)] mock_nspopen.assert_has_calls(calls, any_order=True) @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' '_interface_by_mac', return_value=FAKE_INTERFACE) @mock.patch('pyroute2.NSPopen', create=True) @mock.patch.object(plug, "webob") @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.netns.create', create=True) @mock.patch('pyroute2.NetNS', create=True) @mock.patch('subprocess.check_output') @mock.patch('shutil.copytree') @mock.patch('os.makedirs') def test_plug_vip_ipv6(self, mock_makedirs, mock_copytree, mock_check_output, mock_netns, mock_netns_create, mock_pyroute2, mock_webob, mock_nspopen, mock_by_mac): conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='controller_worker', loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) m = mock.mock_open() with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): self.test_plug.plug_vip( vip=FAKE_IP_IPV6, subnet_cidr=FAKE_CIDR_IPV6, gateway=FAKE_GATEWAY_IPV6, mac_address=FAKE_MAC_ADDRESS ) mock_webob.Response.assert_any_call(json={ 'message': 'OK', 'details': 'VIP {vip} plugged on interface {interface}'.format( vip=FAKE_IP_IPV6_EXPANDED, interface='eth1') }, status=202) calls = [mock.call('amphora-haproxy', ['/sbin/sysctl', '--system'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['modprobe', 'ip_vs'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv6.conf.all.forwarding=1'], stdout=subprocess.PIPE), mock.call('amphora-haproxy', ['/sbin/sysctl', '-w', 'net.ipv4.vs.conntrack=1'], stdout=subprocess.PIPE)] mock_nspopen.assert_has_calls(calls, any_order=True) @mock.patch.object(plug, "webob") @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.netns.create', create=True) @mock.patch('pyroute2.NetNS', create=True) @mock.patch('subprocess.check_output') @mock.patch('shutil.copytree') @mock.patch('os.makedirs') def test_plug_vip_bad_ip(self, mock_makedirs, mock_copytree, mock_check_output, mock_netns, mock_netns_create, mock_pyroute2, mock_webob): m = mock.mock_open() with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): self.test_plug.plug_vip( vip="error", subnet_cidr=FAKE_CIDR_IPV4, gateway=FAKE_GATEWAY_IPV4, mac_address=FAKE_MAC_ADDRESS ) mock_webob.Response.assert_any_call(json={'message': 'Invalid VIP'}, status=400) @mock.patch('pyroute2.NetNS', create=True) def test__netns_interface_exists(self, mock_netns): netns_handle = mock_netns.return_value.__enter__.return_value netns_handle.get_links.return_value = [{ 'attrs': [['IFLA_ADDRESS', '123']]}] # Interface is found in netns self.assertTrue(self.test_plug._netns_interface_exists('123')) # Interface is not found in netns self.assertFalse(self.test_plug._netns_interface_exists('321')) class TestPlugNetwork(base.TestCase): def setUp(self): super(TestPlugNetwork, self).setUp() self.mock_platform = mock.patch("distro.id").start() def __generate_network_file_text_static_ip(self): netns_interface = 'eth1234' FIXED_IP = '192.0.2.2' BROADCAST = '192.0.2.255' SUBNET_CIDR = '192.0.2.0/24' NETMASK = '255.255.255.0' DEST1 = '198.51.100.0/24' DEST2 = '203.0.113.0/24' NEXTHOP = '192.0.2.1' MTU = 1450 fixed_ips = [{'ip_address': FIXED_IP, 'subnet_cidr': SUBNET_CIDR, 'host_routes': [ {'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP} ]}] format_text = ( '\n\n# Generated by Octavia agent\n' 'auto {netns_interface}\n' 'iface {netns_interface} inet static\n' 'address {fixed_ip}\n' 'broadcast {broadcast}\n' 'netmask {netmask}\n' 'mtu {mtu}\n' 'up route add -net {dest1} gw {nexthop} dev {netns_interface}\n' 'down route del -net {dest1} gw {nexthop} dev {netns_interface}\n' 'up route add -net {dest2} gw {nexthop} dev {netns_interface}\n' 'down route del -net {dest2} gw {nexthop} dev {netns_interface}\n' 'post-up /sbin/iptables -t nat -A POSTROUTING -p udp -o ' 'eth1234 -j MASQUERADE\n' 'post-down /sbin/iptables -t nat -D POSTROUTING -p udp -o eth1234 ' '-j MASQUERADE\n') template_port = osutils.j2_env.get_template('plug_port_ethX.conf.j2') text = self.test_plug._osutils._generate_network_file_text( netns_interface, fixed_ips, MTU, template_port) expected_text = format_text.format(netns_interface=netns_interface, fixed_ip=FIXED_IP, broadcast=BROADCAST, netmask=NETMASK, mtu=MTU, dest1=DEST1, dest2=DEST2, nexthop=NEXTHOP) self.assertEqual(expected_text, text) def __generate_network_file_text_two_static_ips(self): netns_interface = 'eth1234' FIXED_IP = '192.0.2.2' BROADCAST = '192.0.2.255' SUBNET_CIDR = '192.0.2.0/24' NETMASK = '255.255.255.0' DEST1 = '198.51.100.0/24' DEST2 = '203.0.113.0/24' NEXTHOP = '192.0.2.1' MTU = 1450 FIXED_IP_IPV6 = '2001:0db8:0000:0000:0000:0000:0000:0001' BROADCAST_IPV6 = '2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff' SUBNET_CIDR_IPV6 = '2001:db8::/32' NETMASK_IPV6 = '32' fixed_ips = [{'ip_address': FIXED_IP, 'subnet_cidr': SUBNET_CIDR, 'host_routes': [ {'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP} ]}, {'ip_address': FIXED_IP_IPV6, 'subnet_cidr': SUBNET_CIDR_IPV6, 'host_routes': []} ] format_text = ( '\n\n# Generated by Octavia agent\n' 'auto {netns_interface}\n' 'iface {netns_interface} inet static\n' 'address {fixed_ip}\n' 'broadcast {broadcast}\n' 'netmask {netmask}\n' 'mtu {mtu}\n' 'up route add -net {dest1} gw {nexthop} dev {netns_interface}\n' 'down route del -net {dest1} gw {nexthop} dev {netns_interface}\n' 'up route add -net {dest2} gw {nexthop} dev {netns_interface}\n' 'down route del -net {dest2} gw {nexthop} dev {netns_interface}\n' 'post-up /sbin/iptables -t nat -A POSTROUTING -p udp -o ' '{netns_interface} -j MASQUERADE\n' 'post-down /sbin/iptables -t nat -D POSTROUTING -p udp -o ' '{netns_interface} -j MASQUERADE\n' '\n\n# Generated by Octavia agent\n' 'auto {netns_interface}\n' 'iface {netns_interface} inet6 static\n' 'address {fixed_ip_ipv6}\n' 'broadcast {broadcast_ipv6}\n' 'netmask {netmask_ipv6}\n' 'mtu {mtu}\n' 'post-up /sbin/ip6tables -t nat -A POSTROUTING -p udp -o ' '{netns_interface} -j MASQUERADE\n' 'post-down /sbin/ip6tables -t nat -D POSTROUTING -p udp -o ' '{netns_interface} -j MASQUERADE\n') template_port = osutils.j2_env.get_template('plug_port_ethX.conf.j2') text = self.test_plug._osutils._generate_network_file_text( netns_interface, fixed_ips, MTU, template_port) expected_text = format_text.format(netns_interface=netns_interface, fixed_ip=FIXED_IP, broadcast=BROADCAST, netmask=NETMASK, mtu=MTU, dest1=DEST1, dest2=DEST2, nexthop=NEXTHOP, fixed_ip_ipv6=FIXED_IP_IPV6, broadcast_ipv6=BROADCAST_IPV6, netmask_ipv6=NETMASK_IPV6) self.assertEqual(expected_text, text) def _setup(self, os): self.mock_platform.return_value = os self.osutil = osutils.BaseOS.get_os_util() self.test_plug = plug.Plug(self.osutil) def test__generate_network_file_text_static_ip_ubuntu(self): self._setup("ubuntu") self.__generate_network_file_text_static_ip() def test__generate_network_file_text_static_ip_centos(self): self._setup("centos") self.__generate_network_file_text_static_ip() def test__generate_network_file_text_two_static_ips_ubuntu(self): self._setup("ubuntu") self.__generate_network_file_text_two_static_ips() def test__generate_network_file_text_two_static_ips_centos(self): self._setup("centos") self.__generate_network_file_text_two_static_ips() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py0000664000175000017500000004231600000000000030140 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.amphorae.backends.agent.api_server import util from octavia.common import constants as consts from octavia.common.jinja.haproxy.combined_listeners import jinja_cfg from octavia.tests.common import utils as test_utils import octavia.tests.unit.base as base from octavia.tests.unit.common.sample_configs import sample_configs_combined BASE_AMP_PATH = '/var/lib/octavia' BASE_CRT_PATH = BASE_AMP_PATH + '/certs' CONF = cfg.CONF LISTENER_ID1 = uuidutils.generate_uuid() LB_ID1 = uuidutils.generate_uuid() class TestUtil(base.TestCase): def setUp(self): super(TestUtil, self).setUp() self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.listener_id = uuidutils.generate_uuid() self.jinja_cfg = jinja_cfg.JinjaTemplater( base_amp_path=BASE_AMP_PATH, base_crt_dir=BASE_CRT_PATH) def test_keepalived_lvs_dir(self): fake_path = '/fake/path' self.CONF.config(group="haproxy_amphora", base_path=fake_path) result = util.keepalived_lvs_dir() fake_path = fake_path + '/lvs' self.assertEqual(fake_path, result) def test_keepalived_lvs_init_path(self): # Test systemd ref_path = (consts.SYSTEMD_DIR + '/' + consts.KEEPALIVED_SYSTEMD_PREFIX % str(self.listener_id)) result = util.keepalived_lvs_init_path(consts.INIT_SYSTEMD, self.listener_id) self.assertEqual(ref_path, result) # Test upstart ref_path = (consts.UPSTART_DIR + '/' + consts.KEEPALIVED_UPSTART_PREFIX % str(self.listener_id)) result = util.keepalived_lvs_init_path(consts.INIT_UPSTART, self.listener_id) self.assertEqual(ref_path, result) # Test sysvinit ref_path = (consts.SYSVINIT_DIR + '/' + consts.KEEPALIVED_SYSVINIT_PREFIX % str(self.listener_id)) result = util.keepalived_lvs_init_path(consts.INIT_SYSVINIT, self.listener_id) self.assertEqual(ref_path, result) # Test bad init system self.assertRaises(util.UnknownInitError, util.keepalived_lvs_init_path, 'bogus_init', self.listener_id) def test_keepalived_lvs_pids_path(self): fake_path = '/fake/path' self.CONF.config(group="haproxy_amphora", base_path=fake_path) pid_path = (fake_path + '/' + 'lvs/octavia-keepalivedlvs-' + self.listener_id + '.' + 'pid') vrrp_pid_path = (fake_path + '/' + 'lvs/octavia-keepalivedlvs-' + self.listener_id + '.' + 'vrrp.pid') check_pid_path = (fake_path + '/' + 'lvs/octavia-keepalivedlvs-' + self.listener_id + '.' + 'check.pid') result1, result2, result3 = util.keepalived_lvs_pids_path( self.listener_id) self.assertEqual(pid_path, result1) self.assertEqual(vrrp_pid_path, result2) self.assertEqual(check_pid_path, result3) def test_keepalived_lvs_cfg_path(self): fake_path = '/fake/path' self.CONF.config(group="haproxy_amphora", base_path=fake_path) ref_path = (fake_path + '/lvs/octavia-keepalivedlvs-' + self.listener_id + '.conf') result = util.keepalived_lvs_cfg_path(self.listener_id) self.assertEqual(ref_path, result) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'keepalived_lvs_pids_path') def test_get_keepalivedlvs_pid(self, mock_path): fake_path = '/fake/path' mock_path.return_value = [fake_path] self.useFixture(test_utils.OpenFixture( fake_path, ' space data ')).mock_open result = util.get_keepalivedlvs_pid(self.listener_id) self.assertEqual(' space data', result) @mock.patch('jinja2.FileSystemLoader') @mock.patch('jinja2.Environment') @mock.patch('os.path') @mock.patch('octavia.amphorae.backends.agent.api_server.osutils.' 'BaseOS.get_os_util') def test_install_netns_systemd_service(self, mock_get_os_util, mock_os_path, mock_jinja2_env, mock_fsloader): mock_os_util = mock.MagicMock() mock_os_util.has_ifup_all.return_value = True mock_get_os_util.return_value = mock_os_util mock_os_path.realpath.return_value = '/dir/file' mock_os_path.dirname.return_value = '/dir/' mock_os_path.exists.return_value = False mock_fsloader.return_value = 'fake_loader' mock_jinja_env = mock.MagicMock() mock_jinja2_env.return_value = mock_jinja_env mock_template = mock.MagicMock() mock_template.render.return_value = 'script' mock_jinja_env.get_template.return_value = mock_template m = mock.mock_open() with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): util.install_netns_systemd_service() mock_jinja2_env.assert_called_with(autoescape=True, loader='fake_loader') mock_jinja_env.get_template.assert_called_once_with( consts.AMP_NETNS_SVC_PREFIX + '.systemd.j2') mock_template.render.assert_called_once_with( amphora_nsname=consts.AMPHORA_NAMESPACE, HasIFUPAll=True) handle = m() handle.write.assert_called_with('script') # Test file exists path we don't over write mock_jinja_env.get_template.reset_mock() mock_os_path.exists.return_value = True util.install_netns_systemd_service() self.assertFalse(mock_jinja_env.get_template.called) @mock.patch('subprocess.check_output') def test_run_systemctl_command(self, mock_check_output): util.run_systemctl_command('test', 'world') mock_check_output.assert_called_once_with( ['systemctl', 'test', 'world'], stderr=subprocess.STDOUT) mock_check_output.side_effect = subprocess.CalledProcessError(1, 'boom') util.run_systemctl_command('test', 'world') @mock.patch('octavia.amphorae.backends.agent.api_server.util.config_path') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'keepalived_lvs_cfg_path') @mock.patch('os.path.exists') def test_get_listener_protocol(self, mock_path_exists, mock_lvs_path, mock_cfg_path): mock_lvs_path.return_value = '/here' mock_cfg_path.return_value = '/there' mock_path_exists.side_effect = [True, False, True, False, False] result = util.get_protocol_for_lb_object('1') mock_cfg_path.assert_called_once_with('1') mock_path_exists.assert_called_once_with('/there') self.assertFalse(mock_lvs_path.called) self.assertEqual(consts.PROTOCOL_TCP, result) mock_cfg_path.reset_mock() result = util.get_protocol_for_lb_object('2') mock_cfg_path.assert_called_once_with('2') mock_lvs_path.assert_called_once_with('2') self.assertEqual(consts.PROTOCOL_UDP, result) mock_cfg_path.reset_mock() mock_lvs_path.reset_mock() result = util.get_protocol_for_lb_object('3') mock_cfg_path.assert_called_once_with('3') mock_lvs_path.assert_called_once_with('3') self.assertIsNone(result) def test_parse_haproxy_config(self): self.CONF.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') FAKE_CRT_LIST_FILENAME = os.path.join( CONF.haproxy_amphora.base_cert_dir, 'sample_loadbalancer_id_1/sample_listener_id_1.pem') rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True, sni=True)]) path = util.config_path(LISTENER_ID1) self.useFixture(test_utils.OpenFixture(path, rendered_obj)) res = util.parse_haproxy_file(LISTENER_ID1) listener_dict = res[1]['sample_listener_id_1'] # NOTE: parse_haproxy_file makes mode TERMINATED_HTTPS even though # the haproxy.cfg needs mode HTTP self.assertEqual('TERMINATED_HTTPS', listener_dict['mode']) self.assertEqual('/var/lib/octavia/sample_loadbalancer_id_1.sock', res[0]) self.assertEqual(FAKE_CRT_LIST_FILENAME, listener_dict['ssl_crt']) # render_template_tls_no_sni rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True)]) self.useFixture(test_utils.OpenFixture(path, rendered_obj)) res = util.parse_haproxy_file(LISTENER_ID1) listener_dict = res[1]['sample_listener_id_1'] self.assertEqual('TERMINATED_HTTPS', listener_dict['mode']) self.assertEqual(BASE_AMP_PATH + '/sample_loadbalancer_id_1.sock', res[0]) self.assertEqual(FAKE_CRT_LIST_FILENAME, listener_dict['ssl_crt']) # render_template_http rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple()]) self.useFixture(test_utils.OpenFixture(path, rendered_obj)) res = util.parse_haproxy_file(LISTENER_ID1) listener_dict = res[1]['sample_listener_id_1'] self.assertEqual('HTTP', listener_dict['mode']) self.assertEqual(BASE_AMP_PATH + '/sample_loadbalancer_id_1.sock', res[0]) self.assertIsNone(listener_dict.get('ssl_crt', None)) # template_https rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple(proto='HTTPS')]) self.useFixture(test_utils.OpenFixture(path, rendered_obj)) res = util.parse_haproxy_file(LISTENER_ID1) listener_dict = res[1]['sample_listener_id_1'] self.assertEqual('TCP', listener_dict['mode']) self.assertEqual(BASE_AMP_PATH + '/sample_loadbalancer_id_1.sock', res[0]) self.assertIsNone(listener_dict.get('ssl_crt', None)) # Bogus format self.useFixture(test_utils.OpenFixture(path, 'Bogus')) try: res = util.parse_haproxy_file(LISTENER_ID1) self.fail("No Exception?") except util.ParsingError: pass # Bad listener mode fake_cfg = 'stats socket foo\nfrontend {}\nmode\n'.format(LISTENER_ID1) self.useFixture(test_utils.OpenFixture(path, fake_cfg)) self.assertRaises(util.ParsingError, util.parse_haproxy_file, LISTENER_ID1) @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_udp_listeners') @mock.patch('os.makedirs') @mock.patch('os.path.exists') @mock.patch('os.listdir') @mock.patch('os.path.join') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_loadbalancers') @mock.patch('octavia.amphorae.backends.agent.api_server.util' '.haproxy_sock_path') def test_vrrp_check_script_update(self, mock_sock_path, mock_get_lbs, mock_join, mock_listdir, mock_exists, mock_makedirs, mock_get_listeners): mock_get_lbs.return_value = ['abc', LB_ID1] mock_sock_path.return_value = 'listener.sock' mock_exists.side_effect = [False, False, True] mock_get_lbs.side_effect = [['abc', LB_ID1], ['abc', LB_ID1], []] mock_get_listeners.return_value = [] # Test the stop action path cmd = 'haproxy-vrrp-check ' + ' '.join(['listener.sock']) + '; exit $?' path = util.keepalived_dir() m = self.useFixture(test_utils.OpenFixture(path)).mock_open util.vrrp_check_script_update(LB_ID1, 'stop') handle = m() handle.write.assert_called_once_with(cmd) # Test the start action path cmd = ('haproxy-vrrp-check ' + ' '.join(['listener.sock', 'listener.sock']) + '; exit ' '$?') m = self.useFixture(test_utils.OpenFixture(path)).mock_open util.vrrp_check_script_update(LB_ID1, 'start') handle = m() handle.write.assert_called_once_with(cmd) # Test the path with existing keepalived directory and no LBs mock_makedirs.reset_mock() cmd = 'exit 1' m = self.useFixture(test_utils.OpenFixture(path)).mock_open util.vrrp_check_script_update(LB_ID1, 'start') handle = m() handle.write.assert_called_once_with(cmd) mock_makedirs.assert_has_calls( [mock.call(util.keepalived_dir(), exist_ok=True), mock.call(util.keepalived_check_scripts_dir(), exist_ok=True)]) @mock.patch('octavia.amphorae.backends.agent.api_server.util.config_path') def test_get_haproxy_vip_addresses(self, mock_cfg_path): FAKE_PATH = 'fake_path' mock_cfg_path.return_value = FAKE_PATH self.useFixture( test_utils.OpenFixture(FAKE_PATH, 'no match')).mock_open() # Test with no matching lines in the config file self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1)) mock_cfg_path.assert_called_once_with(LB_ID1) # Test with a matching bind line mock_cfg_path.reset_mock() test_data = 'no match\nbind 203.0.113.43:1\nbogus line' self.useFixture( test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() expected_result = ['203.0.113.43'] self.assertEqual(expected_result, util.get_haproxy_vip_addresses(LB_ID1)) mock_cfg_path.assert_called_once_with(LB_ID1) # Test with a matching bind line multiple binds mock_cfg_path.reset_mock() test_data = 'no match\nbind 203.0.113.44:1234, 203.0.113.45:4321' self.useFixture( test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() expected_result = ['203.0.113.44', '203.0.113.45'] self.assertEqual(expected_result, util.get_haproxy_vip_addresses(LB_ID1)) mock_cfg_path.assert_called_once_with(LB_ID1) # Test with a bogus bind line mock_cfg_path.reset_mock() test_data = 'no match\nbind\nbogus line' self.useFixture( test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1)) mock_cfg_path.assert_called_once_with(LB_ID1) @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' 'send_ip_advertisement') @mock.patch('octavia.amphorae.backends.utils.network_utils.' 'get_interface_name') @mock.patch('octavia.amphorae.backends.agent.api_server.util.' 'get_haproxy_vip_addresses') def test_send_vip_advertisements(self, mock_get_vip_addrs, mock_get_int_name, mock_send_advert): mock_get_vip_addrs.side_effect = [[], ['203.0.113.46'], Exception('boom')] mock_get_int_name.return_value = 'fake0' # Test no VIPs util.send_vip_advertisements(LB_ID1) mock_get_vip_addrs.assert_called_once_with(LB_ID1) mock_get_int_name.assert_not_called() mock_send_advert.assert_not_called() # Test with a VIP mock_get_vip_addrs.reset_mock() mock_get_int_name.reset_mock() mock_send_advert.reset_mock() util.send_vip_advertisements(LB_ID1) mock_get_vip_addrs.assert_called_once_with(LB_ID1) mock_get_int_name.assert_called_once_with( '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) mock_send_advert.assert_called_once_with( 'fake0', '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) # Test with an exception (should not raise) mock_get_vip_addrs.reset_mock() mock_get_int_name.reset_mock() mock_send_advert.reset_mock() util.send_vip_advertisements(LB_ID1) mock_get_int_name.assert_not_called() mock_send_advert.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/agent/test_agent_jinja_cfg.py0000664000175000017500000002342500000000000030114 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.amphorae.backends.agent import agent_jinja_cfg from octavia.common import constants import octavia.tests.unit.base as base AMP_ID = uuidutils.generate_uuid() class AgentJinjaTestCase(base.TestCase): def setUp(self): super(AgentJinjaTestCase, self).setUp() self.conf = oslo_fixture.Config(cfg.CONF) self.conf.config(debug=False) self.conf.config(group="amphora_agent", agent_server_ca='/etc/octavia/certs/client_ca.pem') self.conf.config(group="amphora_agent", agent_server_cert='/etc/octavia/certs/server.pem') self.conf.config(group="amphora_agent", agent_server_network_dir='/etc/network/interfaces.d/') self.conf.config(group='amphora_agent', amphora_udp_driver='keepalived_lvs'), self.conf.config(group="haproxy_amphora", base_cert_dir='/var/lib/octavia/certs') self.conf.config(group="haproxy_amphora", use_upstart='True') self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia') self.conf.config(group="haproxy_amphora", bind_host='0.0.0.0') self.conf.config(group="haproxy_amphora", bind_port=9443) self.conf.config(group="haproxy_amphora", haproxy_cmd='/usr/sbin/haproxy') self.conf.config(group="haproxy_amphora", respawn_count=2) self.conf.config(group="haproxy_amphora", respawn_interval=2) self.conf.config(group="health_manager", controller_ip_port_list=['192.0.2.10:5555']) self.conf.config(group="health_manager", heartbeat_interval=10) self.conf.config(group="health_manager", heartbeat_key='TEST') def test_build_agent_config(self): ajc = agent_jinja_cfg.AgentJinjaTemplater() # Test execution order could influence this with the test below self.conf.config(group='amphora_agent', agent_server_network_file=None) self.conf.config(group="amphora_agent", administrative_log_facility=1) self.conf.config(group="amphora_agent", user_log_facility=0) expected_config = ('\n[DEFAULT]\n' 'debug = False\n' 'use_syslog = True\n' 'syslog_log_facility = LOG_LOCAL1\n\n' '[haproxy_amphora]\n' 'base_cert_dir = /var/lib/octavia/certs\n' 'base_path = /var/lib/octavia\n' 'bind_host = 0.0.0.0\n' 'bind_port = 9443\n' 'haproxy_cmd = /usr/sbin/haproxy\n' 'respawn_count = 2\n' 'respawn_interval = 2\n' 'use_upstart = True\n' 'user_log_facility = 0\n' 'administrative_log_facility = 1\n\n' '[health_manager]\n' 'controller_ip_port_list = 192.0.2.10:5555\n' 'heartbeat_interval = 10\n' 'heartbeat_key = TEST\n\n' '[amphora_agent]\n' 'agent_server_ca = ' '/etc/octavia/certs/client_ca.pem\n' 'agent_server_cert = ' '/etc/octavia/certs/server.pem\n' 'agent_server_network_dir = ' '/etc/network/interfaces.d/\n' 'agent_request_read_timeout = 180\n' 'amphora_id = ' + AMP_ID + '\n' 'amphora_udp_driver = keepalived_lvs\n' 'agent_tls_protocol = TLSv1.2\n\n' '[controller_worker]\n' 'loadbalancer_topology = ' + constants.TOPOLOGY_SINGLE) agent_cfg = ajc.build_agent_config(AMP_ID, constants.TOPOLOGY_SINGLE) self.assertEqual(expected_config, agent_cfg) def test_build_agent_config_with_interfaces_file(self): ajc = agent_jinja_cfg.AgentJinjaTemplater() self.conf.config(group="amphora_agent", agent_server_network_file='/etc/network/interfaces') self.conf.config(group="haproxy_amphora", use_upstart='False') self.conf.config(group="amphora_agent", administrative_log_facility=1) self.conf.config(group="amphora_agent", user_log_facility=0) expected_config = ('\n[DEFAULT]\n' 'debug = False\n' 'use_syslog = True\n' 'syslog_log_facility = LOG_LOCAL1\n\n' '[haproxy_amphora]\n' 'base_cert_dir = /var/lib/octavia/certs\n' 'base_path = /var/lib/octavia\n' 'bind_host = 0.0.0.0\n' 'bind_port = 9443\n' 'haproxy_cmd = /usr/sbin/haproxy\n' 'respawn_count = 2\n' 'respawn_interval = 2\n' 'use_upstart = False\n' 'user_log_facility = 0\n' 'administrative_log_facility = 1\n\n' '[health_manager]\n' 'controller_ip_port_list = 192.0.2.10:5555\n' 'heartbeat_interval = 10\n' 'heartbeat_key = TEST\n\n' '[amphora_agent]\n' 'agent_server_ca = ' '/etc/octavia/certs/client_ca.pem\n' 'agent_server_cert = ' '/etc/octavia/certs/server.pem\n' 'agent_server_network_dir = ' '/etc/network/interfaces.d/\n' 'agent_server_network_file = ' '/etc/network/interfaces\n' 'agent_request_read_timeout = 180\n' 'amphora_id = ' + AMP_ID + '\n' 'amphora_udp_driver = keepalived_lvs\n' 'agent_tls_protocol = TLSv1.2\n\n' '[controller_worker]\n' 'loadbalancer_topology = ' + constants.TOPOLOGY_ACTIVE_STANDBY) agent_cfg = ajc.build_agent_config(AMP_ID, constants.TOPOLOGY_ACTIVE_STANDBY) self.assertEqual(expected_config, agent_cfg) def test_build_agent_config_with_new_udp_driver(self): ajc = agent_jinja_cfg.AgentJinjaTemplater() self.conf.config(group='amphora_agent', agent_server_network_file=None) self.conf.config(group="amphora_agent", amphora_udp_driver='new_udp_driver') self.conf.config(group="amphora_agent", administrative_log_facility=1) self.conf.config(group="amphora_agent", user_log_facility=0) expected_config = ('\n[DEFAULT]\n' 'debug = False\n' 'use_syslog = True\n' 'syslog_log_facility = LOG_LOCAL1\n\n' '[haproxy_amphora]\n' 'base_cert_dir = /var/lib/octavia/certs\n' 'base_path = /var/lib/octavia\n' 'bind_host = 0.0.0.0\n' 'bind_port = 9443\n' 'haproxy_cmd = /usr/sbin/haproxy\n' 'respawn_count = 2\n' 'respawn_interval = 2\n' 'use_upstart = True\n' 'user_log_facility = 0\n' 'administrative_log_facility = 1\n\n' '[health_manager]\n' 'controller_ip_port_list = 192.0.2.10:5555\n' 'heartbeat_interval = 10\n' 'heartbeat_key = TEST\n\n' '[amphora_agent]\n' 'agent_server_ca = ' '/etc/octavia/certs/client_ca.pem\n' 'agent_server_cert = ' '/etc/octavia/certs/server.pem\n' 'agent_server_network_dir = ' '/etc/network/interfaces.d/\n' 'agent_request_read_timeout = 180\n' 'amphora_id = ' + AMP_ID + '\n' 'amphora_udp_driver = new_udp_driver\n' 'agent_tls_protocol = TLSv1.2\n\n' '[controller_worker]\n' 'loadbalancer_topology = ' + constants.TOPOLOGY_SINGLE) agent_cfg = ajc.build_agent_config(AMP_ID, constants.TOPOLOGY_SINGLE) self.assertEqual(expected_config, agent_cfg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/backends/health_daemon/0000775000175000017500000000000000000000000025077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/health_daemon/__init__.py0000664000175000017500000000107400000000000027212 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/health_daemon/test_envelope.py0000664000175000017500000000420500000000000030326 0ustar00zuulzuul00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from octavia.amphorae.backends.health_daemon import status_message from octavia.common import exceptions from octavia.tests.unit import base class TestEnvelope(base.TestCase): def setUp(self): super(TestEnvelope, self).setUp() def test_message_hmac(self): seq = 42 for i in range(0, 16): statusMsg = {'seq': seq, 'status': 'OK', 'id': str(uuid.uuid4())} envelope = status_message.wrap_envelope(statusMsg, 'samplekey1') obj = status_message.unwrap_envelope(envelope, 'samplekey1') self.assertEqual('OK', obj['status']) self.assertEqual(seq, obj['seq']) seq += 1 args = (envelope, 'samplekey?') self.assertRaises(exceptions.InvalidHMACException, status_message.unwrap_envelope, *args) def test_message_hmac_compatibility(self): seq = 42 statusMsg = {'seq': seq, 'status': 'OK', 'id': str(uuid.uuid4())} envelope = status_message.wrap_envelope(statusMsg, 'samplekey1', hex=False) obj = status_message.unwrap_envelope(envelope, 'samplekey1') self.assertEqual('OK', obj['status']) self.assertEqual(seq, obj['seq']) args = (envelope, 'samplekey?') self.assertRaises(exceptions.InvalidHMACException, status_message.unwrap_envelope, *args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py0000664000175000017500000004300300000000000031300 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import queue from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.amphorae.backends.health_daemon import health_daemon from octavia.common import constants import octavia.tests.unit.base as base LISTENER_ID1 = uuidutils.generate_uuid() LISTENER_ID2 = uuidutils.generate_uuid() LISTENER_IDS = [LISTENER_ID1, LISTENER_ID2] BASE_PATH = '/tmp/test' SAMPLE_POOL_STATUS = { '432fc8b3-d446-48d4-bb64-13beb90e22bc': { 'status': 'UP', 'uuid': '432fc8b3-d446-48d4-bb64-13beb90e22bc', 'members': {'302e33d9-dee1-4de9-98d5-36329a06fb58': 'DOWN'}}, '3661ed10-99db-4d2c-bffb-99b60eb876ff': { 'status': 'UP', 'uuid': '3661ed10-99db-4d2c-bffb-99b60eb876ff', 'members': {'e657f950-a6a2-4d28-bffa-0c8a8c05f815': 'DOWN'}}} SAMPLE_BOGUS_POOL_STATUS = {LISTENER_ID1: { 'status': 'UP', 'uuid': LISTENER_ID1, 'members': { '302e33d9-dee1-4de9-98d5-36329a06fb58': 'DOWN'}}} SAMPLE_STATS = ({'': '', 'status': 'OPEN', 'lastchg': '', 'weight': '', 'slim': '2000', 'pid': '1', 'comp_byp': '0', 'lastsess': '', 'rate_lim': '0', 'check_duration': '', 'rate': '0', 'req_rate': '0', 'check_status': '', 'econ': '', 'comp_out': '0', 'wredis': '', 'dresp': '0', 'ereq': '0', 'tracked': '', 'comp_in': '0', 'pxname': LISTENER_ID1, 'dreq': '0', 'hrsp_5xx': '0', 'last_chk': '', 'check_code': '', 'sid': '0', 'bout': '0', 'hrsp_1xx': '0', 'qlimit': '', 'hrsp_other': '0', 'bin': '0', 'rtime': '', 'smax': '0', 'req_tot': '0', 'lbtot': '', 'stot': '0', 'wretr': '', 'req_rate_max': '0', 'ttime': '', 'iid': '2', 'hrsp_4xx': '0', 'chkfail': '', 'hanafail': '', 'downtime': '', 'qcur': '', 'eresp': '', 'comp_rsp': '0', 'cli_abrt': '', 'ctime': '', 'qtime': '', 'srv_abrt': '', 'throttle': '', 'last_agt': '', 'scur': '0', 'type': '0', 'bck': '', 'qmax': '', 'rate_max': '0', 'hrsp_2xx': '0', 'act': '', 'chkdown': '', 'svname': 'FRONTEND', 'hrsp_3xx': '0'}, {'': '', 'status': 'no check', 'lastchg': '', 'weight': '1', 'slim': '', 'pid': '1', 'comp_byp': '', 'lastsess': '-1', 'rate_lim': '', 'check_duration': '', 'rate': '0', 'req_rate': '', 'check_status': '', 'econ': '0', 'comp_out': '', 'wredis': '0', 'dresp': '0', 'ereq': '', 'tracked': '', 'comp_in': '', 'pxname': '432fc8b3-d446-48d4-bb64-13beb90e22bc', 'dreq': '', 'hrsp_5xx': '0', 'last_chk': '', 'check_code': '', 'sid': '1', 'bout': '0', 'hrsp_1xx': '0', 'qlimit': '', 'hrsp_other': '0', 'bin': '0', 'rtime': '0', 'smax': '0', 'req_tot': '', 'lbtot': '0', 'stot': '0', 'wretr': '0', 'req_rate_max': '', 'ttime': '0', 'iid': '3', 'hrsp_4xx': '0', 'chkfail': '', 'hanafail': '0', 'downtime': '', 'qcur': '0', 'eresp': '0', 'comp_rsp': '', 'cli_abrt': '0', 'ctime': '0', 'qtime': '0', 'srv_abrt': '0', 'throttle': '', 'last_agt': '', 'scur': '0', 'type': '2', 'bck': '0', 'qmax': '0', 'rate_max': '0', 'hrsp_2xx': '0', 'act': '1', 'chkdown': '', 'svname': '302e33d9-dee1-4de9-98d5-36329a06fb58', 'hrsp_3xx': '0'}, {'': '', 'status': 'UP', 'lastchg': '122', 'weight': '1', 'slim': '200', 'pid': '1', 'comp_byp': '0', 'lastsess': '-1', 'rate_lim': '', 'check_duration': '', 'rate': '0', 'req_rate': '', 'check_status': '', 'econ': '0', 'comp_out': '0', 'wredis': '0', 'dresp': '0', 'ereq': '', 'tracked': '', 'comp_in': '0', 'pxname': '432fc8b3-d446-48d4-bb64-13beb90e22bc', 'dreq': '0', 'hrsp_5xx': '0', 'last_chk': '', 'check_code': '', 'sid': '0', 'bout': '0', 'hrsp_1xx': '0', 'qlimit': '', 'hrsp_other': '0', 'bin': '0', 'rtime': '0', 'smax': '0', 'req_tot': '', 'lbtot': '0', 'stot': '0', 'wretr': '0', 'req_rate_max': '', 'ttime': '0', 'iid': '3', 'hrsp_4xx': '0', 'chkfail': '', 'hanafail': '', 'downtime': '0', 'qcur': '0', 'eresp': '0', 'comp_rsp': '0', 'cli_abrt': '0', 'ctime': '0', 'qtime': '0', 'srv_abrt': '0', 'throttle': '', 'last_agt': '', 'scur': '0', 'type': '1', 'bck': '0', 'qmax': '0', 'rate_max': '0', 'hrsp_2xx': '0', 'act': '1', 'chkdown': '0', 'svname': 'BACKEND', 'hrsp_3xx': '0'}) SAMPLE_STATS_MSG = { 'listeners': { LISTENER_ID1: { 'stats': { 'totconns': 0, 'conns': 0, 'tx': 0, 'rx': 0, 'ereq': 0}, 'status': 'OPEN'}, }, 'pools': { '432fc8b3-d446-48d4-bb64-13beb90e22bc': { 'members': {'302e33d9-dee1-4de9-98d5-36329a06fb58': 'DOWN'}, 'status': 'UP'}, '3661ed10-99db-4d2c-bffb-99b60eb876ff': { 'members': {'e657f950-a6a2-4d28-bffa-0c8a8c05f815': 'DOWN'}, 'status': 'UP'}, }, 'id': None, 'seq': 0, 'ver': health_daemon.MSG_VER } class TestHealthDaemon(base.TestCase): def setUp(self): super(TestHealthDaemon, self).setUp() conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_path=BASE_PATH) @mock.patch('octavia.amphorae.backends.agent.' 'api_server.util.get_loadbalancers') def test_list_sock_stat_files(self, mock_get_listener): mock_get_listener.return_value = LISTENER_IDS health_daemon.list_sock_stat_files() files = health_daemon.list_sock_stat_files(BASE_PATH) expected_files = {LISTENER_ID1: BASE_PATH + '/' + LISTENER_ID1 + '.sock', LISTENER_ID2: BASE_PATH + '/' + LISTENER_ID2 + '.sock'} self.assertEqual(expected_files, files) @mock.patch('os.kill') @mock.patch('os.path.isfile') @mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.time.sleep') @mock.patch('oslo_config.cfg.CONF.reload_config_files') @mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.build_stats_message') @mock.patch('octavia.amphorae.backends.health_daemon.' 'health_sender.UDPStatusSender') def test_run_sender(self, mock_UDPStatusSender, mock_build_msg, mock_reload_cfg, mock_sleep, mock_isfile, mock_kill): sender_mock = mock.MagicMock() dosend_mock = mock.MagicMock() sender_mock.dosend = dosend_mock mock_UDPStatusSender.return_value = sender_mock mock_build_msg.side_effect = ['TEST'] mock_isfile.return_value = False test_queue = queue.Queue() with mock.patch('time.sleep') as mock_sleep: mock_sleep.side_effect = Exception('break') self.assertRaisesRegex(Exception, 'break', health_daemon.run_sender, test_queue) sender_mock.dosend.assert_called_once_with('TEST') # Test a reload event mock_build_msg.reset_mock() mock_build_msg.side_effect = ['TEST'] test_queue.put('reload') with mock.patch('time.sleep') as mock_sleep: mock_sleep.side_effect = Exception('break') self.assertRaisesRegex(Exception, 'break', health_daemon.run_sender, test_queue) mock_reload_cfg.assert_called_once_with() # Test the shutdown path sender_mock.reset_mock() dosend_mock.reset_mock() mock_build_msg.reset_mock() mock_build_msg.side_effect = ['TEST', 'TEST'] test_queue.put('shutdown') health_daemon.run_sender(test_queue) sender_mock.dosend.assert_called_once_with('TEST') # Test an unknown command mock_build_msg.reset_mock() mock_build_msg.side_effect = ['TEST'] test_queue.put('bogus') with mock.patch('time.sleep') as mock_sleep: mock_sleep.side_effect = Exception('break') self.assertRaisesRegex(Exception, 'break', health_daemon.run_sender, test_queue) # Test keepalived config, but no PID mock_build_msg.reset_mock() dosend_mock.reset_mock() mock_isfile.return_value = True with mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.open', mock.mock_open()) as mock_open: mock_open.side_effect = FileNotFoundError test_queue.put('shutdown') health_daemon.run_sender(test_queue) mock_build_msg.assert_not_called() dosend_mock.assert_not_called() # Test keepalived config, but PID file error mock_build_msg.reset_mock() dosend_mock.reset_mock() mock_isfile.return_value = True with mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.open', mock.mock_open()) as mock_open: mock_open.side_effect = IOError test_queue.put('shutdown') health_daemon.run_sender(test_queue) mock_build_msg.assert_not_called() dosend_mock.assert_not_called() # Test keepalived config, but bogus PID mock_build_msg.reset_mock() dosend_mock.reset_mock() mock_isfile.return_value = True with mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.open', mock.mock_open(read_data='foo')) as mock_open: test_queue.put('shutdown') health_daemon.run_sender(test_queue) mock_build_msg.assert_not_called() dosend_mock.assert_not_called() # Test keepalived config, but not running mock_build_msg.reset_mock() dosend_mock.reset_mock() mock_isfile.return_value = True with mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.open', mock.mock_open(read_data='999999')) as mock_open: mock_kill.side_effect = ProccessNotFoundError test_queue.put('shutdown') health_daemon.run_sender(test_queue) mock_build_msg.assert_not_called() dosend_mock.assert_not_called() # Test keepalived config, but process error mock_build_msg.reset_mock() dosend_mock.reset_mock() mock_isfile.return_value = True with mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.open', mock.mock_open(read_data='999999')) as mock_open: mock_kill.side_effect = OSError test_queue.put('shutdown') health_daemon.run_sender(test_queue) mock_build_msg.assert_not_called() dosend_mock.assert_not_called() # Test with happy keepalive sender_mock.reset_mock() dosend_mock.reset_mock() mock_kill.side_effect = [True] mock_build_msg.reset_mock() mock_build_msg.side_effect = ['TEST', 'TEST'] mock_isfile.return_value = True test_queue.put('shutdown') with mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.open', mock.mock_open(read_data='999999')) as mock_open: health_daemon.run_sender(test_queue) sender_mock.dosend.assert_called_once_with('TEST') @mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery') def test_get_stats(self, mock_query): stats_query_mock = mock.MagicMock() mock_query.return_value = stats_query_mock health_daemon.get_stats('TEST') stats_query_mock.show_stat.assert_called_once_with() stats_query_mock.get_pool_status.assert_called_once_with() @mock.patch('octavia.amphorae.backends.agent.api_server.' 'util.is_lb_running') @mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.get_stats') @mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.list_sock_stat_files') def test_build_stats_message(self, mock_list_files, mock_get_stats, mock_is_running): mock_list_files.return_value = {LISTENER_ID1: 'TEST', LISTENER_ID2: 'TEST2'} mock_is_running.return_value = True mock_get_stats.return_value = SAMPLE_STATS, SAMPLE_POOL_STATUS msg = health_daemon.build_stats_message() self.assertEqual(SAMPLE_STATS_MSG, msg) mock_get_stats.assert_any_call('TEST') mock_get_stats.assert_any_call('TEST2') @mock.patch('octavia.amphorae.backends.agent.api_server.' 'util.is_lb_running') @mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.get_stats') @mock.patch('octavia.amphorae.backends.health_daemon.' 'health_daemon.list_sock_stat_files') def test_build_stats_message_no_listener(self, mock_list_files, mock_get_stats, mock_is_running): mock_list_files.return_value = {LISTENER_ID1: 'TEST', LISTENER_ID2: 'TEST2'} mock_is_running.side_effect = [True, False] mock_get_stats.return_value = SAMPLE_STATS, SAMPLE_POOL_STATUS health_daemon.build_stats_message() self.assertEqual(1, mock_get_stats.call_count) @mock.patch("octavia.amphorae.backends.utils.keepalivedlvs_query." "get_udp_listener_pool_status") @mock.patch("octavia.amphorae.backends.utils.keepalivedlvs_query." "get_udp_listeners_stats") @mock.patch("octavia.amphorae.backends.agent.api_server.util." "get_udp_listeners") def test_build_stats_message_with_udp_listener( self, mock_get_udp_listeners, mock_get_listener_stats, mock_get_pool_status): udp_listener_id1 = uuidutils.generate_uuid() udp_listener_id2 = uuidutils.generate_uuid() udp_listener_id3 = uuidutils.generate_uuid() pool_id = uuidutils.generate_uuid() member_id1 = uuidutils.generate_uuid() member_id2 = uuidutils.generate_uuid() mock_get_udp_listeners.return_value = [udp_listener_id1, udp_listener_id2, udp_listener_id3] mock_get_listener_stats.return_value = { udp_listener_id1: { 'status': constants.OPEN, 'stats': {'bin': 6387472, 'stot': 5, 'bout': 7490, 'ereq': 0, 'scur': 0}}, udp_listener_id3: { 'status': constants.DOWN, 'stats': {'bin': 0, 'stot': 0, 'bout': 0, 'ereq': 0, 'scur': 0}} } udp_pool_status = { 'lvs': { 'uuid': pool_id, 'status': constants.UP, 'members': {member_id1: constants.UP, member_id2: constants.UP}}} mock_get_pool_status.side_effect = ( lambda x: udp_pool_status if x == udp_listener_id1 else {}) # the first listener can get all necessary info. # the second listener can not get listener stats, so we won't report it # the third listener can get listener stats, but can not get pool # status, so the result will just contain the listener status for it. expected = { 'listeners': { udp_listener_id1: { 'status': constants.OPEN, 'stats': {'conns': 0, 'totconns': 5, 'ereq': 0, 'rx': 6387472, 'tx': 7490}}, udp_listener_id3: { 'status': constants.DOWN, 'stats': {'conns': 0, 'totconns': 0, 'ereq': 0, 'rx': 0, 'tx': 0}}}, 'pools': { pool_id: { 'status': constants.UP, 'members': { member_id1: constants.UP, member_id2: constants.UP}}}, 'id': None, 'seq': mock.ANY, 'ver': health_daemon.MSG_VER} msg = health_daemon.build_stats_message() self.assertEqual(expected, msg) class FileNotFoundError(IOError): errno = 2 class ProccessNotFoundError(OSError): errno = 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py0000664000175000017500000001522300000000000031320 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import random import socket from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.amphorae.backends.health_daemon import health_sender from octavia.tests.unit import base IP_PORT = ['192.0.2.10:5555', '192.0.2.10:5555'] KEY = 'TEST' PORT = random.randrange(1, 9000) SAMPLE_MSG = {'testkey': 'TEST'} SAMPLE_MSG_BIN = binascii.unhexlify('78daab562a492d2ec94ead54b252500a710d0e51a' 'a050041b506243538303665356331393731653739' '39353138313833393465613665373161643938396' '66639353039343566393537336634616236663833' '653235646238656437') class TestHealthSender(base.TestCase): def setUp(self): super(TestHealthSender, self).setUp() self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group="health_manager", controller_ip_port_list=IP_PORT) self.conf.config(group="health_manager", heartbeat_key=KEY) @mock.patch('socket.getaddrinfo') @mock.patch('socket.socket') def test_sender(self, mock_socket, mock_getaddrinfo): socket_mock = mock.MagicMock() mock_socket.return_value = socket_mock sendto_mock = mock.MagicMock() socket_mock.sendto = sendto_mock # Test when no addresses are returned self.conf.config(group="health_manager", controller_ip_port_list='') sender = health_sender.UDPStatusSender() sender.dosend(SAMPLE_MSG) sendto_mock.reset_mock() # Test IPv4 path self.conf.config(group="health_manager", controller_ip_port_list=['192.0.2.20:80']) mock_getaddrinfo.return_value = [(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('192.0.2.20', 80))] sender = health_sender.UDPStatusSender() sender.dosend(SAMPLE_MSG) sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, ('192.0.2.20', 80)) sendto_mock.reset_mock() # Test IPv6 path self.conf.config(group="health_manager", controller_ip_port_list=['2001:0db8::f00d:80']) mock_getaddrinfo.return_value = [(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('2001:db8::f00d', 80, 0, 0))] sender = health_sender.UDPStatusSender() sender.dosend(SAMPLE_MSG) sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, ('2001:db8::f00d', 80, 0, 0)) sendto_mock.reset_mock() # Test IPv6 link-local address path self.conf.config( group="health_manager", controller_ip_port_list=['fe80::00ff:fe00:cafe%eth0:80']) mock_getaddrinfo.return_value = [(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('fe80::ff:fe00:cafe', 80, 0, 2))] sender = health_sender.UDPStatusSender() sender.dosend(SAMPLE_MSG) sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, ('fe80::ff:fe00:cafe', 80, 0, 2)) sendto_mock.reset_mock() # Test socket error self.conf.config(group="health_manager", controller_ip_port_list=['2001:0db8::f00d:80']) mock_getaddrinfo.return_value = [(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('2001:db8::f00d', 80, 0, 0))] socket_mock.sendto.side_effect = socket.error sender = health_sender.UDPStatusSender() # Should not raise an exception sender.dosend(SAMPLE_MSG) # Test an controller_ip_port_list update sendto_mock.reset_mock() mock_getaddrinfo.reset_mock() self.conf.config(group="health_manager", controller_ip_port_list=['192.0.2.20:80']) mock_getaddrinfo.return_value = [(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('192.0.2.20', 80))] sender = health_sender.UDPStatusSender() sender.dosend(SAMPLE_MSG) sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, ('192.0.2.20', 80)) mock_getaddrinfo.assert_called_once_with('192.0.2.20', '80', 0, socket.SOCK_DGRAM) sendto_mock.reset_mock() mock_getaddrinfo.reset_mock() self.conf.config(group="health_manager", controller_ip_port_list=['192.0.2.21:81']) mock_getaddrinfo.return_value = [(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('192.0.2.21', 81))] sender.dosend(SAMPLE_MSG) mock_getaddrinfo.assert_called_once_with('192.0.2.21', '81', 0, socket.SOCK_DGRAM) sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, ('192.0.2.21', 81)) sendto_mock.reset_mock() mock_getaddrinfo.reset_mock() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/backends/utils/0000775000175000017500000000000000000000000023447 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/utils/__init__.py0000664000175000017500000000107400000000000025562 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/utils/test_haproxy_query.py0000664000175000017500000001536400000000000030010 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from unittest import mock from octavia.amphorae.backends.utils import haproxy_query as query from octavia.common import constants from octavia.common import utils as octavia_utils import octavia.tests.unit.base as base STATS_SOCKET_SAMPLE = ( "# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq," "econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg," "downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim," "rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp" "_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot" ",cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk," "last_agt,qtime,ctime,rtime,ttime,\n" "http-servers:listener-id,id-34821,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0," "1,1,575,575,,1,3,1,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,," ",0,0,0,0,\n" "http-servers:listener-id,id-34824,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0," "1,1,567,567,,1,3,2,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,," ",0,0,0,0,\n" "http-servers:listener-id,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,DOWN,0,0," "0,,1,567,567,,1,3,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,0,0,0,0,-1,,,0,0,0," "0,\n" "tcp-servers:listener-id,id-34833,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,1,1," "560,560,,1,5,1,,0,,2,0,,0,L4TOUT,,30000,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," "\n" "tcp-servers:listener-id,id-34836,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,1,1," "552,552,,1,5,2,,0,,2,0,,0,L4TOUT,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," "\n" "tcp-servers:listener-id,id-34839,0,0,0,0,,0,0,0,,0,,0,0,0,0,DRAIN,0,1,0," "0,0,552,0,,1,5,2,,0,,2,0,,0,L7OK,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," "\n" "tcp-servers:listener-id,id-34842,0,0,0,0,,0,0,0,,0,,0,0,0,0,MAINT,0,1,0," "0,0,552,0,,1,5,2,,0,,2,0,,0,L7OK,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," "\n" "tcp-servers:listener-id,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,UP,0,0,0,," "1,552,552,,1,5,0,,0,,1,0,,0,,,,,,,,,,,,,,0,0,0,0,0,0,-1,,,0,0,0,0," ) INFO_SOCKET_SAMPLE = ( 'Name: HAProxy\nVersion: 1.5.3\nRelease_date: 2014/07/25\nNbproc: 1\n' 'Process_num: 1\nPid: 2238\nUptime: 0d 2h22m17s\nUptime_sec: 8537\n' 'Memmax_MB: 0\nUlimit-n: 4031\nMaxsock: 4031\nMaxconn: 2000\n' 'Hard_maxconn: 2000\nCurrConns: 0\nCumConns: 32\nCumReq: 32\n' 'MaxSslConns: 0\nCurrSslConns: 0\nCumSslConns: 0\nMaxpipes: 0\n' 'PipesUsed: 0\nPipesFree: 0\nConnRate: 0\nConnRateLimit: 0\n' 'MaxConnRate: 0\nSessRate: 0\nSessRateLimit: 0\nMaxSessRate: 0\n' 'SslRate:0\nSslRateLimit: 0\nMaxSslRate: 0\nSslFrontendKeyRate: 0\n' 'SslFrontendMaxKeyRate: 0\nSslFrontendSessionReuse_pct: 0\n' 'SslBackendKeyRate: 0\nSslBackendMaxKeyRate: 0\nSslCacheLookups: 0\n' 'SslCacheMisses: 0\nCompressBpsIn: 0\nCompressBpsOut: 0\n' 'CompressBpsRateLim: 0\nZlibMemUsage: 0\nMaxZlibMemUsage: 0\nTasks: 4\n' 'Run_queue: 1\nIdle_pct: 100\nnode: amphora-abd35de5-e377-49c5-be32\n' 'description:' ) class QueryTestCase(base.TestCase): def setUp(self): self.q = query.HAProxyQuery('') super(QueryTestCase, self).setUp() @mock.patch('socket.socket') def test_query(self, mock_socket): sock = mock.MagicMock() sock.connect.side_effect = [None, socket.error] sock.recv.side_effect = ['testdata', None] mock_socket.return_value = sock self.q._query('test') sock.connect.assert_called_once_with('') sock.send.assert_called_once_with(octavia_utils.b('test\n')) sock.recv.assert_called_with(1024) self.assertTrue(sock.close.called) self.assertRaisesRegex(Exception, 'HAProxy \'test\' query failed.', self.q._query, 'test') def test_get_pool_status(self): query_mock = mock.Mock() self.q._query = query_mock query_mock.return_value = STATS_SOCKET_SAMPLE self.assertEqual( {'tcp-servers:listener-id': { 'status': constants.UP, 'listener_uuid': 'listener-id', 'pool_uuid': 'tcp-servers', 'members': {'id-34833': constants.UP, 'id-34836': constants.UP, 'id-34839': constants.DRAIN, 'id-34842': constants.MAINT}}, 'http-servers:listener-id': { 'status': constants.DOWN, 'listener_uuid': 'listener-id', 'pool_uuid': 'http-servers', 'members': {'id-34821': constants.DOWN, 'id-34824': constants.DOWN}}}, self.q.get_pool_status() ) def test_show_info(self): query_mock = mock.Mock() self.q._query = query_mock query_mock.return_value = INFO_SOCKET_SAMPLE self.assertEqual( {'SslRateLimit': '0', 'SessRateLimit': '0', 'Version': '1.5.3', 'Hard_maxconn': '2000', 'Ulimit-n': '4031', 'PipesFree': '0', 'SslRate': '0', 'ZlibMemUsage': '0', 'CumConns': '32', 'ConnRate': '0', 'Memmax_MB': '0', 'CompressBpsOut': '0', 'MaxConnRate': '0', 'Uptime_sec': '8537', 'SslCacheMisses': '0', 'MaxZlibMemUsage': '0', 'SslCacheLookups': '0', 'CurrSslConns': '0', 'SslBackendKeyRate': '0', 'CompressBpsRateLim': '0', 'Run_queue': '1', 'CumReq': '32', 'SslBackendMaxKeyRate': '0', 'SslFrontendSessionReuse_pct': '0', 'Nbproc': '1', 'Tasks': '4', 'Maxpipes': '0', 'Maxconn': '2000', 'Pid': '2238', 'Maxsock': '4031', 'CurrConns': '0', 'Idle_pct': '100', 'CompressBpsIn': '0', 'SslFrontendKeyRate': '0', 'MaxSessRate': '0', 'Process_num': '1', 'Uptime': '0d 2h22m17s', 'PipesUsed': '0', 'SessRate': '0', 'MaxSslRate': '0', 'ConnRateLimit': '0', 'CumSslConns': '0', 'Name': 'HAProxy', 'SslFrontendMaxKeyRate': '0', 'MaxSslConns': '0', 'node': 'amphora-abd35de5-e377-49c5-be32', 'description': '', 'Release_date': '2014/07/25'}, self.q.show_info() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py0000664000175000017500000002154400000000000030430 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from binascii import a2b_hex import socket from struct import pack from unittest import mock from octavia.amphorae.backends.utils import ip_advertisement from octavia.common import constants import octavia.tests.unit.base as base class TestIPAdvertisement(base.TestCase): def setUp(self): super(TestIPAdvertisement, self).setUp() @mock.patch('octavia.amphorae.backends.utils.network_namespace.' 'NetworkNamespace') @mock.patch('socket.AF_PACKET', create=True) @mock.patch('socket.socket') def test_garp(self, mock_socket, mock_socket_packet, mock_netns): ARP_ETHERTYPE = 0x0806 EXPECTED_PACKET_DATA = (b'\xff\xff\xff\xff\xff\xff\x00\x00^\x00S3\x08' b'\x06\x00\x01\x08\x00\x06\x04\x00\x01\x00' b'\x00^\x00S3\xcb\x00q\x02\xff\xff\xff\xff' b'\xff\xff\xcb\x00q\x02') FAKE_INTERFACE = 'fake0' FAKE_MAC = '00005E005333' FAKE_NETNS = 'fake_netns' mock_garp_socket = mock.MagicMock() mock_garp_socket.getsockname.return_value = [None, None, None, None, a2b_hex(FAKE_MAC)] mock_socket.return_value = mock_garp_socket # Test with a network namespace ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2', net_ns=FAKE_NETNS) mock_netns.assert_called_once_with(FAKE_NETNS) mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE, ARP_ETHERTYPE)) mock_garp_socket.getsockname.assert_called_once_with() mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA) mock_garp_socket.close.assert_called_once_with() # Test without a network namespace mock_netns.reset_mock() mock_garp_socket.reset_mock() ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2') mock_netns.assert_not_called() mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE, ARP_ETHERTYPE)) mock_garp_socket.getsockname.assert_called_once_with() mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA) mock_garp_socket.close.assert_called_once_with() def test_calculate_icmpv6_checksum(self): TEST_PACKET1 = ( b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00' b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00') TEST_PACKET2 = ( b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00' b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00\x01') self.assertEqual( 35645, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET1)) self.assertEqual( 35389, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET2)) @mock.patch('fcntl.ioctl') @mock.patch('octavia.amphorae.backends.utils.network_namespace.' 'NetworkNamespace') @mock.patch('socket.socket') def test_neighbor_advertisement(self, mock_socket, mock_netns, mock_ioctl): ALL_NODES_ADDR = 'ff02::1' EXPECTED_PACKET_DATA = (b'\x88\x00\x1dk\xa0\x00\x00\x00 \x01\r\xb8\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003' b'\x02\x01') FAKE_INTERFACE = 'fake0' FAKE_MAC = '00005E005333' FAKE_NETNS = 'fake_netns' ICMPV6_PROTO = socket.getprotobyname(constants.IPV6_ICMP) SIOCGIFHWADDR = 0x8927 SOURCE_IP = '2001:db8::33' mock_na_socket = mock.MagicMock() mock_socket.return_value = mock_na_socket mock_ioctl.return_value = a2b_hex(FAKE_MAC) # Test with a network namespace ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP, net_ns=FAKE_NETNS) mock_netns.assert_called_once_with(FAKE_NETNS) mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW, ICMPV6_PROTO) mock_na_socket.setsockopt.assert_called_once_with( socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0)) mock_ioctl.assert_called_once_with( mock_na_socket.fileno(), SIOCGIFHWADDR, pack('256s', bytes(FAKE_INTERFACE, 'utf-8'))) mock_na_socket.sendto.assert_called_once_with( EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0)) mock_na_socket.close.assert_called_once_with() # Test without a network namespace mock_na_socket.reset_mock() mock_netns.reset_mock() mock_ioctl.reset_mock() mock_socket.reset_mock() ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP) mock_netns.assert_not_called() mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW, ICMPV6_PROTO) mock_na_socket.setsockopt.assert_called_once_with( socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0)) mock_ioctl.assert_called_once_with( mock_na_socket.fileno(), SIOCGIFHWADDR, pack('256s', bytes(FAKE_INTERFACE, 'utf-8'))) mock_na_socket.sendto.assert_called_once_with( EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0)) mock_na_socket.close.assert_called_once_with() @mock.patch('octavia.common.utils.is_ipv6') @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.garp') @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' 'neighbor_advertisement') def test_send_ip_advertisement(self, mock_na, mock_garp, mock_is_ipv6): FAKE_INTERFACE = 'fake0' FAKE_NETNS = 'fake_netns' IPV4_ADDRESS = '203.0.113.9' IPV6_ADDRESS = '2001:db8::33' mock_is_ipv6.side_effect = [mock.DEFAULT, mock.DEFAULT, False] # Test IPv4 advertisement ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS) mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS, None) mock_na.assert_not_called() # Test IPv4 advertisement with a network namespace mock_garp.reset_mock() mock_na.reset_mock() ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS, net_ns=FAKE_NETNS) mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS, FAKE_NETNS) mock_na.assert_not_called() # Test IPv6 advertisement mock_garp.reset_mock() mock_na.reset_mock() ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS) mock_garp.assert_not_called() mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS, None) # Test IPv6 advertisement with a network namespace mock_garp.reset_mock() mock_na.reset_mock() ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS, net_ns=FAKE_NETNS) mock_garp.assert_not_called() mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS, FAKE_NETNS) # Test bogus IP mock_garp.reset_mock() mock_na.reset_mock() ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, 'not an IP') mock_garp.assert_not_called() mock_na.assert_not_called() # Test unknown IP version mock_garp.reset_mock() mock_na.reset_mock() ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS) mock_garp.assert_not_called() mock_na.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py0000664000175000017500000006053100000000000031150 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from octavia.amphorae.backends.agent.api_server import util from octavia.amphorae.backends.utils import keepalivedlvs_query as lvs_query from octavia.common import constants from octavia.tests.common import utils as test_utils from octavia.tests.unit import base # Kernal_file_sample which is in /proc/net/ip_vs # The realservers and the listened ports are # 10.0.0.25:2222, 10.0.0.35:3333. # Realserver 10.0.0.45:4444 is not listed because healthcheck failed. # The virtual server and the listened port is # 10.0.0.37:7777. KERNAL_FILE_SAMPLE_V4 = ( "IP Virtual Server version 1.2.1 (size=4096)\n" "Prot LocalAddress:Port Scheduler Flags\n" " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" "UDP 0A000025:1E61 rr\n" " -> 0A000023:0D05 Masq 2 0 0\n" " -> 0A000019:08AE Masq 3 0 0") # Kernal_file_sample which is in /proc/net/ip_vs # The realservers and the listened ports are # [fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222, # [fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333. # Readlserver [fd79:35e2:9963:0:f816:3eff:fe9d:8f3f]:4444 is not listed # because healthcheck failed. # The virtual server and the listened port is # [fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777. KERNAL_FILE_SAMPLE_V6 = ( "IP Virtual Server version 1.2.1 (size=4096)\n" "Prot LocalAddress:Port Scheduler Flags\n" " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" "UDP [fd79:35e2:9963:0000:f816:3eff:fe6d:7a2a]:1E61 rr\n" " -> [fd79:35e2:9963:0000:f816:3eff:feca:b7bf]:08AE " "Masq 3 0 0\n" " -> [fd79:35e2:9963:0000:f816:3eff:fe9d:94df]:0D05 " "Masq 2 0 0\n" " -> [fd79:35e2::8f3f]:115C " "Masq 2 0 0") CFG_FILE_TEMPLATE_v4 = ( "# Configuration for Listener %(listener_id)s\n\n" "net_namespace %(ns_name)s\n\n" "virtual_server 10.0.0.37 7777 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol udp\n\n\n" " # Configuration for Pool %(pool_id)s\n" " # Configuration for Member %(member_id1)s\n" " real_server 10.0.0.25 2222 {\n" " weight 3\n" " persistence_timeout 5\n" " persistence_granularity 255.0.0.0\n\n" " MISC_CHECK {\n\n" " misc_path \"/usr/bin/check_script.sh\"\n\n" " misc_timeout 5\n\n" " }\n\n" " }\n\n" " # Configuration for Member %(member_id2)s\n" " real_server 10.0.0.35 3333 {\n" " weight 2\n" " persistence_timeout 5\n" " persistence_granularity 255.0.0.0\n\n" " MISC_CHECK {\n\n" " misc_path \"/usr/bin/check_script.sh\"\n\n" " misc_timeout 5\n\n" " }\n\n" " }\n\n" " # Configuration for Member %(member_id3)s\n" " real_server 10.0.0.45 4444 {\n" " weight 2\n" " persistence_timeout 5\n" " persistence_granularity 255.0.0.0\n\n" " MISC_CHECK {\n\n" " misc_path \"/usr/bin/check_script.sh\"\n\n" " misc_timeout 5\n\n" " }\n\n" " }\n\n" " # Member %(member_id4)s is disabled\n\n" "}") CFG_FILE_TEMPLATE_v6 = ( "# Configuration for Listener %(listener_id)s\n\n" "net_namespace %(ns_name)s\n\n" "virtual_server fd79:35e2:9963:0:f816:3eff:fe6d:7a2a 7777 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol udp\n\n\n" " # Configuration for Pool %(pool_id)s\n" " # Configuration for Member %(member_id1)s\n" " real_server fd79:35e2:9963:0:f816:3eff:feca:b7bf 2222 {\n" " weight 3\n" " MISC_CHECK {\n\n" " misc_path \"/usr/bin/check_script.sh\"\n\n" " misc_timeout 5\n\n" " }\n\n" " }\n\n" " # Configuration for Member %(member_id2)s\n" " real_server fd79:35e2:9963:0:f816:3eff:fe9d:94df 3333 {\n" " weight 2\n" " MISC_CHECK {\n\n" " misc_path \"/usr/bin/check_script.sh\"\n\n" " misc_timeout 5\n\n" " }\n\n" " }\n\n" " # Configuration for Member %(member_id3)s\n" " real_server fd79:35e2:9963:0:f816:3eff:fe9d:8f3f 4444 {\n" " weight 2\n" " MISC_CHECK {\n\n" " misc_path \"/usr/bin/check_script.sh\"\n\n" " misc_timeout 5\n\n" " }\n\n" " }\n\n" " # Member %(member_id4)s is disabled\n\n" " # Configuration for Member %(member_id5)s\n" " real_server fd79:35e2:0:0:0:0:0:8f3f 4444 {\n" " weight 2\n" " MISC_CHECK {\n\n" " misc_path \"/usr/bin/check_script.sh\"\n\n" " misc_timeout 5\n\n" " }\n\n" " }\n\n" "}") CFG_FILE_TEMPLATE_DISABLED_LISTENER = ( "# Listener %(listener_id)s is disabled \n\n" "net_namespace %(ns_name)s\n\n" ) IPVSADM_OUTPUT_TEMPLATE = ( "IP Virtual Server version 1.2.1 (size=4096)\n" "Prot LocalAddress:Port Scheduler Flags\n" " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" "UDP %(listener_ipport)s rr\n" " -> %(member1_ipport)s Masq 3 0 0\n" " -> %(member2_ipport)s Masq 2 0 0") IPVSADM_STATS_OUTPUT_TEMPLATE = ( "IP Virtual Server version 1.2.1 (size=4096)\n" "Prot LocalAddress:Port Conns InPkts OutPkts " "InBytes OutBytes\n" " -> RemoteAddress:Port\n" "UDP %(listener_ipport)s 5 4264 5" " 6387472 7490\n" " -> %(member1_ipport)s 2 1706 2" " 2555588 2996\n" " -> %(member2_ipport)s 3 2558 3" " 3831884 4494") class LvsQueryTestCase(base.TestCase): def setUp(self): super(LvsQueryTestCase, self).setUp() self.listener_id_v4 = uuidutils.generate_uuid() self.pool_id_v4 = uuidutils.generate_uuid() self.member_id1_v4 = uuidutils.generate_uuid() self.member_id2_v4 = uuidutils.generate_uuid() self.member_id3_v4 = uuidutils.generate_uuid() self.member_id4_v4 = uuidutils.generate_uuid() self.listener_id_v6 = uuidutils.generate_uuid() self.pool_id_v6 = uuidutils.generate_uuid() self.member_id1_v6 = uuidutils.generate_uuid() self.member_id2_v6 = uuidutils.generate_uuid() self.member_id3_v6 = uuidutils.generate_uuid() self.member_id4_v6 = uuidutils.generate_uuid() self.member_id5_v6 = uuidutils.generate_uuid() self.disabled_listener_id = uuidutils.generate_uuid() cfg_content_v4 = CFG_FILE_TEMPLATE_v4 % { 'listener_id': self.listener_id_v4, 'ns_name': constants.AMPHORA_NAMESPACE, 'pool_id': self.pool_id_v4, 'member_id1': self.member_id1_v4, 'member_id2': self.member_id2_v4, 'member_id3': self.member_id3_v4, 'member_id4': self.member_id4_v4, } cfg_content_v6 = CFG_FILE_TEMPLATE_v6 % { 'listener_id': self.listener_id_v6, 'ns_name': constants.AMPHORA_NAMESPACE, 'pool_id': self.pool_id_v6, 'member_id1': self.member_id1_v6, 'member_id2': self.member_id2_v6, 'member_id3': self.member_id3_v6, 'member_id4': self.member_id4_v6, 'member_id5': self.member_id5_v6 } cfg_content_disabled_listener = ( CFG_FILE_TEMPLATE_DISABLED_LISTENER % { 'listener_id': self.listener_id_v6, 'ns_name': constants.AMPHORA_NAMESPACE, } ) self.useFixture(test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v4), cfg_content_v4)) self.useFixture(test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v6), cfg_content_v6)) self.useFixture(test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.disabled_listener_id), cfg_content_disabled_listener)) @mock.patch('subprocess.check_output') def test_get_listener_realserver_mapping(self, mock_check_output): # Ipv4 resolver input_listener_ip_port = '10.0.0.37:7777' target_ns = constants.AMPHORA_NAMESPACE mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 result = lvs_query.get_listener_realserver_mapping( target_ns, input_listener_ip_port, health_monitor_enabled=True) expected = {'10.0.0.25:2222': {'status': 'UP', 'Forward': 'Masq', 'Weight': '3', 'ActiveConn': '0', 'InActConn': '0'}, '10.0.0.35:3333': {'status': 'UP', 'Forward': 'Masq', 'Weight': '2', 'ActiveConn': '0', 'InActConn': '0'}} self.assertEqual((True, expected), result) # Ipv6 resolver input_listener_ip_port = '[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777' mock_check_output.return_value = KERNAL_FILE_SAMPLE_V6 result = lvs_query.get_listener_realserver_mapping( target_ns, input_listener_ip_port, health_monitor_enabled=True) expected = {'[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222': {'status': constants.UP, 'Forward': 'Masq', 'Weight': '3', 'ActiveConn': '0', 'InActConn': '0'}, '[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333': {'status': constants.UP, 'Forward': 'Masq', 'Weight': '2', 'ActiveConn': '0', 'InActConn': '0'}, '[fd79:35e2::8f3f]:4444': {'status': constants.UP, 'Forward': 'Masq', 'Weight': '2', 'ActiveConn': '0', 'InActConn': '0'}} self.assertEqual((True, expected), result) # negetive cases mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 for listener_ip_port in ['10.0.0.37:7776', '10.0.0.31:7777']: result = lvs_query.get_listener_realserver_mapping( target_ns, listener_ip_port, health_monitor_enabled=True) self.assertEqual((False, {}), result) mock_check_output.return_value = KERNAL_FILE_SAMPLE_V6 for listener_ip_port in [ '[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7776', '[fd79:35e2:9973:0:f816:3eff:fe6d:7a2a]:7777']: result = lvs_query.get_listener_realserver_mapping( target_ns, listener_ip_port, health_monitor_enabled=True) self.assertEqual((False, {}), result) def test_get_udp_listener_resource_ipports_nsname(self): # ipv4 res = lvs_query.get_udp_listener_resource_ipports_nsname( self.listener_id_v4) expected = {'Listener': {'id': self.listener_id_v4, 'ipport': '10.0.0.37:7777'}, 'Pool': {'id': self.pool_id_v4}, 'Members': [{'id': self.member_id1_v4, 'ipport': '10.0.0.25:2222'}, {'id': self.member_id2_v4, 'ipport': '10.0.0.35:3333'}, {'id': self.member_id3_v4, 'ipport': '10.0.0.45:4444'}, {'id': self.member_id4_v4, 'ipport': None}]} self.assertEqual((expected, constants.AMPHORA_NAMESPACE), res) # ipv6 res = lvs_query.get_udp_listener_resource_ipports_nsname( self.listener_id_v6) expected = {'Listener': { 'id': self.listener_id_v6, 'ipport': '[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777'}, 'Pool': {'id': self.pool_id_v6}, 'Members': [ {'id': self.member_id1_v6, 'ipport': '[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222'}, {'id': self.member_id2_v6, 'ipport': '[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333'}, {'id': self.member_id3_v6, 'ipport': '[fd79:35e2:9963:0:f816:3eff:fe9d:8f3f]:4444'}, {'id': self.member_id5_v6, 'ipport': '[fd79:35e2::8f3f]:4444'}, {'id': self.member_id4_v6, 'ipport': None}]} self.assertEqual((expected, constants.AMPHORA_NAMESPACE), res) # disabled res = lvs_query.get_udp_listener_resource_ipports_nsname( self.disabled_listener_id) self.assertEqual((None, constants.AMPHORA_NAMESPACE), res) @mock.patch('os.stat') @mock.patch('subprocess.check_output') def test_get_udp_listener_pool_status(self, mock_check_output, mock_os_stat): mock_os_stat.side_effect = ( mock.Mock(st_mtime=1234), mock.Mock(st_mtime=1234), ) # test with ipv4 and ipv6 mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = { 'lvs': {'uuid': self.pool_id_v4, 'status': constants.UP, 'members': {self.member_id1_v4: constants.UP, self.member_id2_v4: constants.UP, self.member_id3_v4: constants.DOWN, self.member_id4_v4: constants.MAINT}}} self.assertEqual(expected, res) mock_os_stat.side_effect = ( mock.Mock(st_mtime=1234), mock.Mock(st_mtime=1234), ) mock_check_output.return_value = KERNAL_FILE_SAMPLE_V6 res = lvs_query.get_udp_listener_pool_status(self.listener_id_v6) expected = { 'lvs': {'uuid': self.pool_id_v6, 'status': constants.UP, 'members': {self.member_id1_v6: constants.UP, self.member_id2_v6: constants.UP, self.member_id3_v6: constants.DOWN, self.member_id4_v6: constants.MAINT, self.member_id5_v6: constants.UP}}} self.assertEqual(expected, res) @mock.patch('os.stat') @mock.patch('subprocess.check_output') def test_get_udp_listener_pool_status_restarting(self, mock_check_output, mock_os_stat): mock_os_stat.side_effect = ( mock.Mock(st_mtime=1234), # config file mock.Mock(st_mtime=1220), # pid file ) # test with ipv4 and ipv6 mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = { 'lvs': {'uuid': self.pool_id_v4, 'status': constants.UP, 'members': {self.member_id1_v4: constants.UP, self.member_id2_v4: constants.UP, self.member_id3_v4: constants.RESTARTING, self.member_id4_v4: constants.MAINT}}} self.assertEqual(expected, res) @mock.patch('octavia.amphorae.backends.utils.keepalivedlvs_query.' 'get_udp_listener_resource_ipports_nsname') def test_get_udp_listener_pool_status_when_no_pool( self, mock_get_resource_ipports): # Just test with ipv4, ipv6 tests is same. # the returned resource_ipport_mapping doesn't contains the 'Pool' # resource, that means the listener doesn't have a pool resource, it # isn't usable at this moment, then the pool status will # return nothing. mock_get_resource_ipports.return_value = ( { 'Listener': { 'id': self.listener_id_v4, 'ipport': '10.0.0.37:7777'}}, constants.AMPHORA_NAMESPACE) res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) self.assertEqual({}, res) @mock.patch('octavia.amphorae.backends.utils.keepalivedlvs_query.' 'get_udp_listener_resource_ipports_nsname') def test_get_udp_listener_pool_status_when_no_members( self, mock_get_resource_ipports): # Just test with ipv4, ipv6 tests is same. # the returned resource_ipport_mapping doesn't contains the 'Members' # resources, that means the pool of listener doesn't have a enabled # pool resource, so the pool is not usable, then the pool status will # return UP. mock_get_resource_ipports.return_value = ( { 'Listener': {'id': self.listener_id_v4, 'ipport': '10.0.0.37:7777'}, 'Pool': {'id': self.pool_id_v4}}, constants.AMPHORA_NAMESPACE) res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = {'lvs': { 'uuid': self.pool_id_v4, 'status': constants.UP, 'members': {} }} self.assertEqual(expected, res) @mock.patch('os.stat') @mock.patch('octavia.amphorae.backends.utils.keepalivedlvs_query.' 'get_listener_realserver_mapping') def test_get_udp_listener_pool_status_when_not_get_realserver_result( self, mock_get_mapping, mock_os_stat): # This will hit if the kernel lvs file (/proc/net/ip_vs) # lose its content. So at this moment, eventhough we configure the # pool and member into udp keepalived config file, we have to set # ths status of pool and its members to DOWN. mock_os_stat.side_effect = ( mock.Mock(st_mtime=1234), mock.Mock(st_mtime=1234), ) mock_get_mapping.return_value = (False, {}) res = lvs_query.get_udp_listener_pool_status(self.listener_id_v4) expected = { 'lvs': {'uuid': self.pool_id_v4, 'status': constants.DOWN, 'members': {self.member_id1_v4: constants.DOWN, self.member_id2_v4: constants.DOWN, self.member_id3_v4: constants.DOWN, self.member_id4_v4: constants.MAINT}}} self.assertEqual(expected, res) @mock.patch('subprocess.check_output') def test_get_ipvsadm_info(self, mock_check_output): for ip_list in [["10.0.0.37:7777", "10.0.0.25:2222", "10.0.0.35:3333"], ["[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777", "[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222", "[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333"]]: mock_check_output.return_value = IPVSADM_OUTPUT_TEMPLATE % { "listener_ipport": ip_list[0], "member1_ipport": ip_list[1], "member2_ipport": ip_list[2]} res = lvs_query.get_ipvsadm_info(constants.AMPHORA_NAMESPACE) # This expected result can reference on IPVSADM_OUTPUT_TEMPLATE, # that means the function can get every element of the virtual # server and the real servers. expected = { ip_list[0]: { 'Listener': [('Prot', 'UDP'), ('LocalAddress:Port', ip_list[0]), ('Scheduler', 'rr')], 'Members': [[('RemoteAddress:Port', ip_list[1]), ('Forward', 'Masq'), ('Weight', '3'), ('ActiveConn', '0'), ('InActConn', '0')], [('RemoteAddress:Port', ip_list[2]), ('Forward', 'Masq'), ('Weight', '2'), ('ActiveConn', '0'), ('InActConn', '0')]]}} self.assertEqual(expected, res) # ipvsadm stats mock_check_output.return_value = IPVSADM_STATS_OUTPUT_TEMPLATE % { "listener_ipport": ip_list[0], "member1_ipport": ip_list[1], "member2_ipport": ip_list[2]} res = lvs_query.get_ipvsadm_info(constants.AMPHORA_NAMESPACE, is_stats_cmd=True) expected = { ip_list[0]: {'Listener': [('Prot', 'UDP'), ('LocalAddress:Port', ip_list[0]), ('Conns', '5'), ('InPkts', '4264'), ('OutPkts', '5'), ('InBytes', '6387472'), ('OutBytes', '7490')], 'Members': [[('RemoteAddress:Port', ip_list[1]), ('Conns', '2'), ('InPkts', '1706'), ('OutPkts', '2'), ('InBytes', '2555588'), ('OutBytes', '2996')], [('RemoteAddress:Port', ip_list[2]), ('Conns', '3'), ('InPkts', '2558'), ('OutPkts', '3'), ('InBytes', '3831884'), ('OutBytes', '4494')]]}} self.assertEqual(expected, res) @mock.patch('subprocess.check_output') @mock.patch("octavia.amphorae.backends.agent.api_server.util." "is_udp_listener_running", return_value=True) @mock.patch("octavia.amphorae.backends.agent.api_server.util." "get_udp_listeners") def test_get_udp_listeners_stats( self, mock_get_listener, mock_is_running, mock_check_output): # The ipv6 test is same with ipv4, so just test ipv4 here mock_get_listener.return_value = [self.listener_id_v4] output_list = list() output_list.append(IPVSADM_OUTPUT_TEMPLATE % { "listener_ipport": "10.0.0.37:7777", "member1_ipport": "10.0.0.25:2222", "member2_ipport": "10.0.0.35:3333"}) output_list.append(IPVSADM_STATS_OUTPUT_TEMPLATE % { "listener_ipport": "10.0.0.37:7777", "member1_ipport": "10.0.0.25:2222", "member2_ipport": "10.0.0.35:3333"}) mock_check_output.side_effect = output_list res = lvs_query.get_udp_listeners_stats() # We can check the expected result reference the stats sample, # that means this func can compute the stats info of single listener. expected = {self.listener_id_v4: { 'status': constants.OPEN, 'stats': {'bin': 6387472, 'stot': 5, 'bout': 7490, 'ereq': 0, 'scur': 0}}} self.assertEqual(expected, res) # if no udp listener need to be collected. # Then this function will return nothing. mock_is_running.return_value = False res = lvs_query.get_udp_listeners_stats() self.assertEqual({}, res) @mock.patch('subprocess.check_output') @mock.patch("octavia.amphorae.backends.agent.api_server.util." "is_udp_listener_running", return_value=True) @mock.patch("octavia.amphorae.backends.agent.api_server.util." "get_udp_listeners") def test_get_udp_listeners_stats_disabled_listener( self, mock_get_listener, mock_is_running, mock_check_output): mock_get_listener.return_value = [self.disabled_listener_id] res = lvs_query.get_udp_listeners_stats() self.assertEqual({}, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py0000664000175000017500000001043000000000000030563 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from unittest import mock from octavia.amphorae.backends.utils import network_namespace from octavia.tests.common import utils as test_utils import octavia.tests.unit.base as base class TestNetworkNamespace(base.TestCase): def setUp(self): super(TestNetworkNamespace, self).setUp() @mock.patch('ctypes.get_errno') @mock.patch('ctypes.CDLL') def test_error_handler(self, mock_cdll, mock_get_errno): FAKE_NETNS = 'fake-netns' netns = network_namespace.NetworkNamespace(FAKE_NETNS) # Test result 0 netns._error_handler(0, None, None) mock_get_errno.assert_not_called() # Test result -1 mock_get_errno.reset_mock() self.assertRaises(OSError, netns._error_handler, -1, None, None) mock_get_errno.assert_called_once_with() @mock.patch('os.getpid') @mock.patch('ctypes.CDLL') def test_init(self, mock_cdll, mock_getpid): FAKE_NETNS = 'fake-netns' FAKE_PID = random.randrange(100000) mock_cdll_obj = mock.MagicMock() mock_cdll.return_value = mock_cdll_obj mock_getpid.return_value = FAKE_PID expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID) expected_target_netns = '/var/run/netns/{netns}'.format( netns=FAKE_NETNS) netns = network_namespace.NetworkNamespace(FAKE_NETNS) self.assertEqual(expected_current_netns, netns.current_netns) self.assertEqual(expected_target_netns, netns.target_netns) self.assertEqual(mock_cdll_obj.setns, netns.set_netns) self.assertEqual(netns.set_netns.errcheck, netns._error_handler) @mock.patch('os.getpid') @mock.patch('ctypes.CDLL') def test_enter(self, mock_cdll, mock_getpid): CLONE_NEWNET = 0x40000000 FAKE_NETNS = 'fake-netns' FAKE_PID = random.randrange(100000) current_netns_fd = random.randrange(100000) target_netns_fd = random.randrange(100000) mock_getpid.return_value = FAKE_PID mock_cdll_obj = mock.MagicMock() mock_cdll.return_value = mock_cdll_obj expected_current_netns = '/proc/{pid}/ns/net'.format(pid=FAKE_PID) expected_target_netns = '/var/run/netns/{netns}'.format( netns=FAKE_NETNS) netns = network_namespace.NetworkNamespace(FAKE_NETNS) current_mock_open = self.useFixture( test_utils.OpenFixture(expected_current_netns)).mock_open current_mock_open.return_value = current_netns_fd target_mock_open = self.useFixture( test_utils.OpenFixture(expected_target_netns)).mock_open handle = target_mock_open() handle.fileno.return_value = target_netns_fd netns.__enter__() self.assertEqual(current_netns_fd, netns.current_netns_fd) netns.set_netns.assert_called_once_with(target_netns_fd, CLONE_NEWNET) @mock.patch('os.getpid') @mock.patch('ctypes.CDLL') def test_exit(self, mock_cdll, mock_getpid): CLONE_NEWNET = 0x40000000 FAKE_NETNS = 'fake-netns' FAKE_PID = random.randrange(100000) current_netns_fileno = random.randrange(100000) mock_getpid.return_value = FAKE_PID mock_cdll_obj = mock.MagicMock() mock_cdll.return_value = mock_cdll_obj mock_current_netns_fd = mock.MagicMock() mock_current_netns_fd.fileno.return_value = current_netns_fileno netns = network_namespace.NetworkNamespace(FAKE_NETNS) netns.current_netns_fd = mock_current_netns_fd netns.__exit__() netns.set_netns.assert_called_once_with(current_netns_fileno, CLONE_NEWNET) mock_current_netns_fd.close.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/backends/utils/test_network_utils.py0000664000175000017500000001314300000000000027773 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.amphorae.backends.utils import network_utils from octavia.common import exceptions from octavia.tests.common import sample_network_data import octavia.tests.unit.base as base class TestNetworkUtils(base.TestCase): def setUp(self): super(TestNetworkUtils, self).setUp() def test_find_interface(self): FAKE_INTERFACE = 'fake0' IPV4_ADDRESS = '203.0.113.55' BROADCAST_ADDRESS = '203.0.113.55' IPV6_ADDRESS = '2001:db8::55' SAMPLE_IPV4_ADDR = sample_network_data.create_iproute_ipv4_address( IPV4_ADDRESS, BROADCAST_ADDRESS, FAKE_INTERFACE) SAMPLE_IPV6_ADDR = sample_network_data.create_iproute_ipv6_address( IPV6_ADDRESS, FAKE_INTERFACE) SAMPLE_INTERFACE = sample_network_data.create_iproute_interface( FAKE_INTERFACE) BROKEN_INTERFACE = [{'attrs': []}] mock_ip_addr = mock.MagicMock() mock_rtnl_api = mock.MagicMock() mock_rtnl_api.get_addr.side_effect = [[], SAMPLE_IPV4_ADDR, SAMPLE_IPV6_ADDR, SAMPLE_IPV6_ADDR] mock_rtnl_api.get_links.side_effect = [SAMPLE_INTERFACE, SAMPLE_INTERFACE, BROKEN_INTERFACE] # Test no match IPV4_ADDRESS = '203.0.113.55' mock_ip_addr.version = 4 self.assertIsNone(network_utils._find_interface(IPV4_ADDRESS, mock_rtnl_api, IPV4_ADDRESS)) # Test with IPv4 address mock_rtnl_api.reset_mock() mock_ip_addr.version = 4 result = network_utils._find_interface(IPV4_ADDRESS, mock_rtnl_api, IPV4_ADDRESS) self.assertEqual(FAKE_INTERFACE, result) mock_rtnl_api.get_addr.assert_called_once_with(address=IPV4_ADDRESS) mock_rtnl_api.get_links.assert_called_once_with(2) # Test with IPv6 address mock_rtnl_api.reset_mock() mock_ip_addr.version = 6 result = network_utils._find_interface(IPV6_ADDRESS, mock_rtnl_api, IPV6_ADDRESS) self.assertEqual(FAKE_INTERFACE, result) mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS) mock_rtnl_api.get_links.assert_called_once_with(2) # Test with a broken interface mock_rtnl_api.reset_mock() mock_ip_addr.version = 6 self.assertIsNone(network_utils._find_interface(IPV6_ADDRESS, mock_rtnl_api, IPV6_ADDRESS)) mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS) mock_rtnl_api.get_links.assert_called_once_with(2) @mock.patch('octavia.amphorae.backends.utils.network_utils.' '_find_interface') @mock.patch('pyroute2.IPRoute', create=True) @mock.patch('pyroute2.NetNS', create=True) def test_get_interface_name(self, mock_netns, mock_ipr, mock_find_int): FAKE_INTERFACE = 'fake0' FAKE_NETNS = 'fake-ns' IPV4_ADDRESS = '203.0.113.64' mock_ipr_enter_obj = mock.MagicMock() mock_ipr_obj = mock.MagicMock() mock_ipr_obj.__enter__.return_value = mock_ipr_enter_obj mock_ipr.return_value = mock_ipr_obj mock_netns_enter_obj = mock.MagicMock() mock_netns_obj = mock.MagicMock() mock_netns_obj.__enter__.return_value = mock_netns_enter_obj mock_netns.return_value = mock_netns_obj mock_find_int.side_effect = [FAKE_INTERFACE, FAKE_INTERFACE, None] # Test a bogus IP address self.assertRaises(exceptions.InvalidIPAddress, network_utils.get_interface_name, 'not an IP', None) # Test with no network namespace result = network_utils.get_interface_name(IPV4_ADDRESS) self.assertEqual(FAKE_INTERFACE, result) mock_ipr.assert_called_once_with() mock_find_int.assert_called_once_with(IPV4_ADDRESS, mock_ipr_enter_obj, IPV4_ADDRESS) # Test with network namespace mock_ipr.reset_mock() mock_find_int.reset_mock() result = network_utils.get_interface_name(IPV4_ADDRESS, net_ns=FAKE_NETNS) self.assertEqual(FAKE_INTERFACE, result) mock_netns.assert_called_once_with(FAKE_NETNS) mock_find_int.assert_called_once_with(IPV4_ADDRESS, mock_netns_enter_obj, IPV4_ADDRESS) # Test no interface found mock_ipr.reset_mock() mock_find_int.reset_mock() self.assertRaises( exceptions.NotFound, network_utils.get_interface_name, IPV4_ADDRESS, net_ns=FAKE_NETNS) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/0000775000175000017500000000000000000000000022213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/__init__.py0000664000175000017500000000107400000000000024326 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/haproxy/0000775000175000017500000000000000000000000023705 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/haproxy/__init__.py0000664000175000017500000000107400000000000026020 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py0000664000175000017500000000347300000000000027506 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.amphorae.drivers.haproxy import exceptions import octavia.tests.unit.base as base class TestHAProxyExceptions(base.TestCase): def setUp(self): super(TestHAProxyExceptions, self).setUp() @mock.patch('octavia.amphorae.drivers.haproxy.exceptions.LOG') def test_check_exception(self, mock_logger): response_mock = mock.MagicMock() # Test exception that should raise and log response_mock.status_code = 404 self.assertRaises(exceptions.NotFound, exceptions.check_exception, response_mock) mock_logger.error.assert_called_once() # Test exception that should raise but not log mock_logger.reset_mock() response_mock.status_code = 403 self.assertRaises(exceptions.Forbidden, exceptions.check_exception, response_mock, log_error=False) mock_logger.error.assert_not_called() # Test exception that should be ignored mock_logger.reset_mock() response_mock.status_code = 401 result = exceptions.check_exception(response_mock, ignore=[401]) mock_logger.error.assert_not_called() self.assertEqual(response_mock, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py0000664000175000017500000016241600000000000031154 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils import requests import requests_mock from octavia.amphorae.driver_exceptions import exceptions as driver_except from octavia.amphorae.drivers.haproxy import exceptions as exc from octavia.amphorae.drivers.haproxy import rest_api_driver as driver from octavia.common import constants from octavia.common import data_models from octavia.common import utils as octavia_utils from octavia.db import models from octavia.network import data_models as network_models from octavia.tests.common import sample_certs from octavia.tests.unit import base from octavia.tests.unit.common.sample_configs import sample_configs_split API_VERSION = '0.5' FAKE_CIDR = '198.51.100.0/24' FAKE_GATEWAY = '192.51.100.1' FAKE_IP = '192.0.2.10' FAKE_IPV6 = '2001:db8::cafe' FAKE_IPV6_LLA = 'fe80::00ff:fe00:cafe' FAKE_PEM_FILENAME = "file_name" FAKE_UUID_1 = uuidutils.generate_uuid() FAKE_VRRP_IP = '10.1.0.1' FAKE_MAC_ADDRESS = '123' FAKE_MTU = 1450 FAKE_MEMBER_IP_PORT_NAME_1 = "10.0.0.10:1003" FAKE_MEMBER_IP_PORT_NAME_2 = "10.0.0.11:1004" class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase): def setUp(self): super(TestHaproxyAmphoraLoadBalancerDriverTest, self).setUp() DEST1 = '198.51.100.0/24' DEST2 = '203.0.113.0/24' NEXTHOP = '192.0.2.1' self.driver = driver.HaproxyAmphoraLoadBalancerDriver() self.driver.cert_manager = mock.MagicMock() self.driver.cert_parser = mock.MagicMock() self.driver.clients = { 'base': mock.MagicMock(), API_VERSION: mock.MagicMock()} self.driver.clients['base'].get_api_version.return_value = { 'api_version': API_VERSION} self.driver.clients[ API_VERSION].get_info.return_value = { 'haproxy_version': u'1.6.3-1ubuntu0.1', 'api_version': API_VERSION} self.driver.jinja_split = mock.MagicMock() self.driver.udp_jinja = mock.MagicMock() # Build sample Listener and VIP configs self.sl = sample_configs_split.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, recursive_nest=True) self.sl_udp = sample_configs_split.sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.255.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT) self.pool_has_cert = sample_configs_split.sample_pool_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True) self.amp = self.sl.load_balancer.amphorae[0] self.sv = sample_configs_split.sample_vip_tuple() self.lb = self.sl.load_balancer self.lb_udp = ( sample_configs_split.sample_lb_with_udp_listener_tuple()) self.fixed_ip = mock.MagicMock() self.fixed_ip.ip_address = '198.51.100.5' self.fixed_ip.subnet.cidr = '198.51.100.0/24' self.network = network_models.Network(mtu=FAKE_MTU) self.port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[self.fixed_ip], network=self.network) self.host_routes = [network_models.HostRoute(destination=DEST1, nexthop=NEXTHOP), network_models.HostRoute(destination=DEST2, nexthop=NEXTHOP)] host_routes_data = [{'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP}] self.subnet_info = {'subnet_cidr': FAKE_CIDR, 'gateway': FAKE_GATEWAY, 'mac_address': FAKE_MAC_ADDRESS, 'vrrp_ip': self.amp.vrrp_ip, 'mtu': FAKE_MTU, 'host_routes': host_routes_data} self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, constants.REQ_READ_TIMEOUT: 2, constants.CONN_MAX_RETRIES: 3, constants.CONN_RETRY_INTERVAL: 4} @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_amphora_listeners(self, mock_load_cert, mock_secret): mock_amphora = mock.MagicMock() mock_amphora.id = 'mock_amphora_id' mock_amphora.api_version = API_VERSION mock_secret.return_value = 'filename.pem' mock_load_cert.return_value = { 'tls_cert': self.sl.default_tls_container, 'sni_certs': [], 'client_ca_cert': None} self.driver.jinja_split.build_config.return_value = 'the_config' mock_empty_lb = mock.MagicMock() mock_empty_lb.listeners = [] self.driver.update_amphora_listeners(mock_empty_lb, mock_amphora, self.timeout_dict) mock_load_cert.assert_not_called() self.driver.jinja_split.build_config.assert_not_called() self.driver.clients[API_VERSION].upload_config.assert_not_called() self.driver.clients[API_VERSION].reload_listener.assert_not_called() self.driver.update_amphora_listeners(self.lb, mock_amphora, self.timeout_dict) self.driver.clients[API_VERSION].upload_config.assert_called_once_with( mock_amphora, self.sl.id, 'the_config', timeout_dict=self.timeout_dict) self.driver.clients[API_VERSION].reload_listener( mock_amphora, self.sl.id, timeout_dict=self.timeout_dict) mock_load_cert.reset_mock() self.driver.jinja_split.build_config.reset_mock() self.driver.clients[API_VERSION].upload_config.reset_mock() self.driver.clients[API_VERSION].reload_listener.reset_mock() mock_amphora.status = constants.DELETED self.driver.update_amphora_listeners(self.lb, mock_amphora, self.timeout_dict) mock_load_cert.assert_not_called() self.driver.jinja_split.build_config.assert_not_called() self.driver.clients[API_VERSION].upload_config.assert_not_called() self.driver.clients[API_VERSION].reload_listener.assert_not_called() @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.ListenerRepository.update') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_amphora_listeners_bad_cert( self, mock_load_cert, mock_list_update, mock_get_session): mock_amphora = mock.MagicMock() mock_amphora.id = 'mock_amphora_id' mock_amphora.api_version = API_VERSION mock_get_session.return_value = 'fake_session' mock_load_cert.side_effect = [Exception] self.driver.update_amphora_listeners(self.lb, mock_amphora, self.timeout_dict) mock_list_update.assert_called_once_with( 'fake_session', self.lb.listeners[0].id, provisioning_status=constants.ERROR, operating_status=constants.ERROR) self.driver.jinja_split.build_config.assert_not_called() self.driver.clients[API_VERSION].delete_listener.assert_not_called() @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') def test_update(self, mock_cert, mock_load_crt, mock_secret): mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} mock_secret.side_effect = ['filename.pem', 'crl-filename.pem'] sconts = [] for sni_container in self.sl.sni_containers: sconts.append(sni_container.tls_container) mock_load_crt.side_effect = [{ 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, {'tls_cert': None, 'sni_certs': []}] self.driver.clients[API_VERSION].get_cert_md5sum.side_effect = [ exc.NotFound, 'Fake_MD5', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'CA_CERT_MD5'] self.driver.jinja_split.build_config.side_effect = ['fake_config'] # Execute driver method self.driver.update(self.lb) # verify result gcm_calls = [ mock.call(self.amp, self.sl.id, self.sl.default_tls_container.id + '.pem', ignore=(404,)), mock.call(self.amp, self.sl.id, sconts[0].id + '.pem', ignore=(404,)), mock.call(self.amp, self.sl.id, sconts[1].id + '.pem', ignore=(404,)), ] self.driver.clients[API_VERSION].get_cert_md5sum.assert_has_calls( gcm_calls, any_order=True) # this is called three times (last MD5 matches) fp1 = b'\n'.join([sample_certs.X509_CERT, sample_certs.X509_CERT_KEY, sample_certs.X509_IMDS]) + b'\n' fp2 = b'\n'.join([sample_certs.X509_CERT_2, sample_certs.X509_CERT_KEY_2, sample_certs.X509_IMDS]) + b'\n' fp3 = b'\n'.join([sample_certs.X509_CERT_3, sample_certs.X509_CERT_KEY_3, sample_certs.X509_IMDS]) + b'\n' ucp_calls = [ mock.call(self.amp, self.sl.id, self.sl.default_tls_container.id + '.pem', fp1), mock.call(self.amp, self.sl.id, sconts[0].id + '.pem', fp2), mock.call(self.amp, self.sl.id, sconts[1].id + '.pem', fp3), ] self.driver.clients[API_VERSION].upload_cert_pem.assert_has_calls( ucp_calls, any_order=True) # upload only one config file self.driver.clients[API_VERSION].upload_config.assert_called_once_with( self.amp, self.sl.id, 'fake_config', timeout_dict=None) # start should be called once self.driver.clients[ API_VERSION].reload_listener.assert_called_once_with( self.amp, self.sl.id, timeout_dict=None) secret_calls = [ mock.call(self.sl, self.sl.client_ca_tls_certificate_id, self.amp, self.sl.id), mock.call(self.sl, self.sl.client_crl_container_id, self.amp, self.sl.id) ] mock_secret.assert_has_calls(secret_calls) def test_udp_update(self): self.driver.udp_jinja.build_config.side_effect = ['fake_udp_config'] # Execute driver method self.driver.update(self.lb_udp) # upload only one config file self.driver.clients[ API_VERSION].upload_udp_config.assert_called_once_with( self.amp, self.sl_udp.id, 'fake_udp_config', timeout_dict=None) # start should be called once self.driver.clients[ API_VERSION].reload_listener.assert_called_once_with( self.amp, self.sl_udp.id, timeout_dict=None) def test_upload_cert_amp(self): self.driver.upload_cert_amp(self.amp, octavia_utils.b('test')) self.driver.clients[ API_VERSION].update_cert_for_rotation.assert_called_once_with( self.amp, octavia_utils.b('test')) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test__process_tls_certificates_no_ca_cert(self, mock_load_crt): sample_listener = sample_configs_split.sample_listener_tuple( tls=True, sni=True) sconts = [] for sni_container in sample_listener.sni_containers: sconts.append(sni_container.tls_container) mock_load_crt.return_value = { 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts } self.driver.clients[API_VERSION].get_cert_md5sum.side_effect = [ exc.NotFound, 'Fake_MD5', 'aaaaa', 'aaaaaaaa'] self.driver._process_tls_certificates( sample_listener, self.amp, sample_listener.id) gcm_calls = [ mock.call(self.amp, sample_listener.id, self.sl.default_tls_container.id + '.pem', ignore=(404,)), mock.call(self.amp, sample_listener.id, sconts[0].id + '.pem', ignore=(404,)), mock.call(self.amp, sample_listener.id, sconts[1].id + '.pem', ignore=(404,)) ] self.driver.clients[API_VERSION].get_cert_md5sum.assert_has_calls( gcm_calls, any_order=True) fp1 = b'\n'.join([sample_certs.X509_CERT, sample_certs.X509_CERT_KEY, sample_certs.X509_IMDS]) + b'\n' fp2 = b'\n'.join([sample_certs.X509_CERT_2, sample_certs.X509_CERT_KEY_2, sample_certs.X509_IMDS]) + b'\n' fp3 = b'\n'.join([sample_certs.X509_CERT_3, sample_certs.X509_CERT_KEY_3, sample_certs.X509_IMDS]) + b'\n' ucp_calls = [ mock.call(self.amp, sample_listener.id, self.sl.default_tls_container.id + '.pem', fp1), mock.call(self.amp, sample_listener.id, sconts[0].id + '.pem', fp2), mock.call(self.amp, sample_listener.id, sconts[1].id + '.pem', fp3) ] self.driver.clients[API_VERSION].upload_cert_pem.assert_has_calls( ucp_calls, any_order=True) self.assertEqual( 4, self.driver.clients[API_VERSION].upload_cert_pem.call_count) @mock.patch('oslo_context.context.RequestContext') @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._upload_cert') def test_process_secret(self, mock_upload_cert, mock_oslo): # Test bypass if no secret_ref sample_listener = sample_configs_split.sample_listener_tuple( tls=True, sni=True) result = self.driver._process_secret(sample_listener, None) self.assertIsNone(result) self.driver.cert_manager.get_secret.assert_not_called() # Test the secret process sample_listener = sample_configs_split.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True) fake_context = 'fake context' fake_secret = b'fake cert' mock_oslo.return_value = fake_context self.driver.cert_manager.get_secret.reset_mock() self.driver.cert_manager.get_secret.return_value = fake_secret ref_md5 = hashlib.md5(fake_secret).hexdigest() # nosec ref_id = hashlib.sha1(fake_secret).hexdigest() # nosec ref_name = '{id}.pem'.format(id=ref_id) result = self.driver._process_secret( sample_listener, sample_listener.client_ca_tls_certificate_id, self.amp, sample_listener.id) mock_oslo.assert_called_once_with( project_id=sample_listener.project_id) self.driver.cert_manager.get_secret.assert_called_once_with( fake_context, sample_listener.client_ca_tls_certificate_id) mock_upload_cert.assert_called_once_with( self.amp, sample_listener.id, pem=fake_secret, md5=ref_md5, name=ref_name) self.assertEqual(ref_name, result) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_pool_certs') def test__process_listener_pool_certs(self, mock_pool_cert): sample_listener = sample_configs_split.sample_listener_tuple( l7=True) ref_pool_cert_1 = {'client_cert': '/some/fake/cert-1.pem'} ref_pool_cert_2 = {'client_cert': '/some/fake/cert-2.pem'} mock_pool_cert.side_effect = [ref_pool_cert_1, ref_pool_cert_2] ref_cert_dict = {'sample_pool_id_1': ref_pool_cert_1, 'sample_pool_id_2': ref_pool_cert_2} result = self.driver._process_listener_pool_certs( sample_listener, self.amp, sample_listener.id) pool_certs_calls = [ mock.call(sample_listener, sample_listener.default_pool, self.amp, sample_listener.id), mock.call(sample_listener, sample_listener.pools[1], self.amp, sample_listener.id) ] mock_pool_cert.assert_has_calls(pool_certs_calls, any_order=True) self.assertEqual(ref_cert_dict, result) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._upload_cert') @mock.patch('octavia.common.tls_utils.cert_parser.build_pem') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test__process_pool_certs(self, mock_load_certs, mock_build_pem, mock_upload_cert, mock_secret): fake_cert_dir = '/fake/cert/dir' conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_cert_dir=fake_cert_dir) sample_listener = sample_configs_split.sample_listener_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True) pool_cert = data_models.TLSContainer( id=uuidutils.generate_uuid(), certificate='pool cert') pool_data = {'tls_cert': pool_cert, 'sni_certs': []} mock_load_certs.return_value = pool_data fake_pem = b'fake pem' mock_build_pem.return_value = fake_pem ref_md5 = hashlib.md5(fake_pem).hexdigest() # nosec ref_name = '{id}.pem'.format(id=pool_cert.id) ref_path = '{cert_dir}/{list_id}/{name}'.format( cert_dir=fake_cert_dir, list_id=sample_listener.id, name=ref_name) ref_ca_name = 'fake_ca.pem' ref_ca_path = '{cert_dir}/{list_id}/{name}'.format( cert_dir=fake_cert_dir, list_id=sample_listener.id, name=ref_ca_name) ref_crl_name = 'fake_crl.pem' ref_crl_path = '{cert_dir}/{list_id}/{name}'.format( cert_dir=fake_cert_dir, list_id=sample_listener.id, name=ref_crl_name) ref_result = {'client_cert': ref_path, 'ca_cert': ref_ca_path, 'crl': ref_crl_path} mock_secret.side_effect = [ref_ca_name, ref_crl_name] result = self.driver._process_pool_certs( sample_listener, sample_listener.default_pool, self.amp, sample_listener.id) secret_calls = [ mock.call(sample_listener, sample_listener.default_pool.ca_tls_certificate_id, self.amp, sample_listener.id), mock.call(sample_listener, sample_listener.default_pool.crl_container_id, self.amp, sample_listener.id)] mock_build_pem.assert_called_once_with(pool_cert) mock_upload_cert.assert_called_once_with( self.amp, sample_listener.id, pem=fake_pem, md5=ref_md5, name=ref_name) mock_secret.assert_has_calls(secret_calls) self.assertEqual(ref_result, result) def test_start(self): amp1 = mock.MagicMock() amp1.api_version = API_VERSION amp2 = mock.MagicMock() amp2.api_version = API_VERSION amp2.status = constants.DELETED loadbalancer = mock.MagicMock() loadbalancer.id = uuidutils.generate_uuid() loadbalancer.amphorae = [amp1, amp2] loadbalancer.vip = self.sv listener = mock.MagicMock() listener.id = uuidutils.generate_uuid() listener.protocol = constants.PROTOCOL_HTTP loadbalancer.listeners = [listener] listener.load_balancer = loadbalancer self.driver.clients[ API_VERSION].start_listener.__name__ = 'start_listener' # Execute driver method self.driver.start(loadbalancer) self.driver.clients[ API_VERSION].start_listener.assert_called_once_with( amp1, listener.id, None) def test_reload(self): amp1 = mock.MagicMock() amp1.api_version = API_VERSION amp2 = mock.MagicMock() amp2.api_version = API_VERSION amp2.status = constants.DELETED loadbalancer = mock.MagicMock() loadbalancer.id = uuidutils.generate_uuid() loadbalancer.amphorae = [amp1, amp2] loadbalancer.vip = self.sv listener = mock.MagicMock() listener.id = uuidutils.generate_uuid() listener.protocol = constants.PROTOCOL_HTTP loadbalancer.listeners = [listener] listener.load_balancer = loadbalancer self.driver.clients[ API_VERSION].reload_listener.__name__ = 'reload_listener' # Execute driver method self.driver.reload(loadbalancer) self.driver.clients[ API_VERSION].reload_listener.assert_called_once_with( amp1, listener.id, None) def test_start_with_amphora(self): # Execute driver method amp = mock.MagicMock() self.driver.clients[ API_VERSION].start_listener.__name__ = 'start_listener' self.driver.start(self.lb, self.amp) self.driver.clients[ API_VERSION].start_listener.assert_called_once_with( self.amp, self.sl.id, None) self.driver.clients[API_VERSION].start_listener.reset_mock() amp.status = constants.DELETED self.driver.start(self.lb, amp) self.driver.clients[API_VERSION].start_listener.assert_not_called() def test_udp_start(self): self.driver.clients[ API_VERSION].start_listener.__name__ = 'start_listener' # Execute driver method self.driver.start(self.lb_udp) self.driver.clients[ API_VERSION].start_listener.assert_called_once_with( self.amp, self.sl_udp.id, None) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') def test_delete_second_listener(self, mock_cert, mock_load_crt, mock_secret): self.driver.clients[ API_VERSION].delete_listener.__name__ = 'delete_listener' sl = sample_configs_split.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, recursive_nest=True) sl2 = sample_configs_split.sample_listener_tuple( id='sample_listener_id_2') sl.load_balancer.listeners.append(sl2) mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} mock_secret.side_effect = ['filename.pem', 'crl-filename.pem'] sconts = [] for sni_container in self.sl.sni_containers: sconts.append(sni_container.tls_container) mock_load_crt.side_effect = [{ 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, {'tls_cert': None, 'sni_certs': []}] self.driver.jinja_split.build_config.side_effect = ['fake_config'] # Execute driver method self.driver.delete(sl) # Now just make sure we did a delete self.driver.clients[ API_VERSION].delete_listener.assert_called_once_with( self.amp, self.sl.id) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_delete_last_listener(self, mock_load_crt): self.driver.clients[ API_VERSION].delete_listener.__name__ = 'delete_listener' sl = sample_configs_split.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, recursive_nest=True) mock_load_crt.side_effect = [{ 'tls_cert': sl.default_tls_container, 'sni_certs': None}] # Execute driver method self.driver.delete(sl) self.driver.clients[ API_VERSION].delete_listener.assert_called_once_with( self.amp, sl.id) def test_udp_delete(self): self.driver.clients[ API_VERSION].delete_listener.__name__ = 'delete_listener' # Execute driver method self.driver.delete(self.sl_udp) self.driver.clients[ API_VERSION].delete_listener.assert_called_once_with( self.amp, self.sl_udp.id) def test_get_info(self): expected_info = {'haproxy_version': '1.6.3-1ubuntu0.1', 'api_version': API_VERSION} result = self.driver.get_info(self.amp) self.assertEqual(expected_info, result) def test_get_diagnostics(self): # TODO(johnsom) Implement once this exists on the amphora agent. result = self.driver.get_diagnostics(self.amp) self.assertIsNone(result) def test_finalize_amphora(self): # TODO(johnsom) Implement once this exists on the amphora agent. result = self.driver.finalize_amphora(self.amp) self.assertIsNone(result) def test_post_vip_plug(self): amphorae_network_config = mock.MagicMock() amphorae_network_config.get().vip_subnet.cidr = FAKE_CIDR amphorae_network_config.get().vip_subnet.gateway_ip = FAKE_GATEWAY amphorae_network_config.get().vip_subnet.host_routes = self.host_routes amphorae_network_config.get().vrrp_port = self.port self.driver.post_vip_plug(self.amp, self.lb, amphorae_network_config) self.driver.clients[API_VERSION].plug_vip.assert_called_once_with( self.amp, self.lb.vip.ip_address, self.subnet_info) def test_post_network_plug(self): # Test dhcp path port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[], network=self.network) self.driver.post_network_plug(self.amp, port) self.driver.clients[API_VERSION].plug_network.assert_called_once_with( self.amp, dict(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[], mtu=FAKE_MTU)) self.driver.clients[API_VERSION].plug_network.reset_mock() # Test fixed IP path self.driver.post_network_plug(self.amp, self.port) self.driver.clients[API_VERSION].plug_network.assert_called_once_with( self.amp, dict(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[dict(ip_address='198.51.100.5', subnet_cidr='198.51.100.0/24', host_routes=[])], mtu=FAKE_MTU)) def test_post_network_plug_with_host_routes(self): SUBNET_ID = 'SUBNET_ID' FIXED_IP1 = '192.0.2.2' FIXED_IP2 = '192.0.2.3' SUBNET_CIDR = '192.0.2.0/24' DEST1 = '198.51.100.0/24' DEST2 = '203.0.113.0/24' NEXTHOP = '192.0.2.1' host_routes = [network_models.HostRoute(destination=DEST1, nexthop=NEXTHOP), network_models.HostRoute(destination=DEST2, nexthop=NEXTHOP)] subnet = network_models.Subnet(id=SUBNET_ID, cidr=SUBNET_CIDR, ip_version=4, host_routes=host_routes) fixed_ips = [ network_models.FixedIP(subnet_id=subnet.id, ip_address=FIXED_IP1, subnet=subnet), network_models.FixedIP(subnet_id=subnet.id, ip_address=FIXED_IP2, subnet=subnet) ] port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, fixed_ips=fixed_ips, network=self.network) self.driver.post_network_plug(self.amp, port) expected_fixed_ips = [ {'ip_address': FIXED_IP1, 'subnet_cidr': SUBNET_CIDR, 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP}]}, {'ip_address': FIXED_IP2, 'subnet_cidr': SUBNET_CIDR, 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP}]} ] self.driver.clients[API_VERSION].plug_network.assert_called_once_with( self.amp, dict(mac_address=FAKE_MAC_ADDRESS, fixed_ips=expected_fixed_ips, mtu=FAKE_MTU)) def test_get_haproxy_versions(self): ref_haproxy_versions = ['1', '6'] result = self.driver._get_haproxy_versions(self.amp) self.driver.clients[API_VERSION].get_info.assert_called_once_with( self.amp) self.assertEqual(ref_haproxy_versions, result) def test_populate_amphora_api_version(self): # Normal path, populate the version # clear out any previous values ref_haproxy_version = list(map(int, API_VERSION.split('.'))) mock_amp = mock.MagicMock() mock_amp.api_version = None result = self.driver._populate_amphora_api_version(mock_amp) self.assertEqual(API_VERSION, mock_amp.api_version) self.assertEqual(ref_haproxy_version, result) # Existing version passed in fake_version = '9999.9999' ref_haproxy_version = list(map(int, fake_version.split('.'))) mock_amp = mock.MagicMock() mock_amp.api_version = fake_version result = self.driver._populate_amphora_api_version(mock_amp) self.assertEqual(fake_version, mock_amp.api_version) self.assertEqual(ref_haproxy_version, result) def test_update_amphora_agent_config(self): self.driver.update_amphora_agent_config( self.amp, octavia_utils.b('test')) self.driver.clients[ API_VERSION].update_agent_config.assert_called_once_with( self.amp, octavia_utils.b('test'), timeout_dict=None) class TestAmphoraAPIClientTest(base.TestCase): def setUp(self): super(TestAmphoraAPIClientTest, self).setUp() self.driver = driver.AmphoraAPIClient0_5() self.base_url = "https://192.0.2.77:9443/" self.base_url_ver = self.base_url + API_VERSION self.amp = models.Amphora(lb_network_ip='192.0.2.77', compute_id='123') self.amp.api_version = API_VERSION self.port_info = dict(mac_address=FAKE_MAC_ADDRESS) # Override with much lower values for testing purposes.. conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", connection_max_retries=2) self.subnet_info = {'subnet_cidr': FAKE_CIDR, 'gateway': FAKE_GATEWAY, 'mac_address': FAKE_MAC_ADDRESS, 'vrrp_ip': self.amp.vrrp_ip} patcher = mock.patch('time.sleep').start() self.addCleanup(patcher.stop) self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, constants.REQ_READ_TIMEOUT: 2, constants.CONN_MAX_RETRIES: 3, constants.CONN_RETRY_INTERVAL: 4} def test_base_url(self): url = self.driver._base_url(FAKE_IP) self.assertEqual('https://192.0.2.10:9443/', url) url = self.driver._base_url(FAKE_IPV6, self.amp.api_version) self.assertEqual('https://[2001:db8::cafe]:9443/0.5/', url) url = self.driver._base_url(FAKE_IPV6_LLA, self.amp.api_version) self.assertEqual('https://[fe80::00ff:fe00:cafe%o-hm0]:9443/0.5/', url) @mock.patch('requests.Session.get', side_effect=requests.ConnectionError) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.time.sleep') def test_request(self, mock_sleep, mock_get): self.assertRaises(driver_except.TimeOutException, self.driver.request, 'get', self.amp, 'unavailableURL', self.timeout_dict) @requests_mock.mock() def test_get_api_version(self, mock_requests): ref_api_version = {'api_version': '0.1'} mock_requests.get('{base}/'.format(base=self.base_url), json=ref_api_version) result = self.driver.get_api_version(self.amp) self.assertEqual(ref_api_version, result) @requests_mock.mock() def test_get_api_version_not_found(self, mock_requests): mock_requests.get('{base}/'.format(base=self.base_url), status_code=404) self.assertRaises(exc.NotFound, self.driver.get_api_version, self.amp) @requests_mock.mock() def test_get_info(self, m): info = {"hostname": "some_hostname", "version": "some_version", "api_version": "0.5", "uuid": FAKE_UUID_1} m.get("{base}/info".format(base=self.base_url_ver), json=info) information = self.driver.get_info(self.amp) self.assertEqual(info, information) @requests_mock.mock() def test_get_info_unauthorized(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_info_missing(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_info_server_error(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_info_service_unavailable(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_details(self, m): details = {"hostname": "some_hostname", "version": "some_version", "api_version": "0.5", "uuid": FAKE_UUID_1, "network_tx": "some_tx", "network_rx": "some_rx", "active": True, "haproxy_count": 10} m.get("{base}/details".format(base=self.base_url_ver), json=details) amp_details = self.driver.get_details(self.amp) self.assertEqual(details, amp_details) @requests_mock.mock() def test_get_details_unauthorized(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_details_missing(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_details_server_error(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_details_service_unavailable(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_all_listeners(self, m): listeners = [{"status": "ONLINE", "provisioning_status": "ACTIVE", "type": "PASSIVE", "uuid": FAKE_UUID_1}] m.get("{base}/listeners".format(base=self.base_url_ver), json=listeners) all_listeners = self.driver.get_all_listeners(self.amp) self.assertEqual(listeners, all_listeners) @requests_mock.mock() def test_get_all_listeners_unauthorized(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_get_all_listeners_missing(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_get_all_listeners_server_error(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_get_all_listeners_service_unavailable(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_start_listener(self, m): m.put("{base}/listeners/{listener_id}/start".format( base=self.base_url_ver, listener_id=FAKE_UUID_1)) self.driver.start_listener(self.amp, FAKE_UUID_1) self.assertTrue(m.called) @requests_mock.mock() def test_start_listener_missing(self, m): m.put("{base}/listeners/{listener_id}/start".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_start_listener_unauthorized(self, m): m.put("{base}/listeners/{listener_id}/start".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_start_listener_server_error(self, m): m.put("{base}/listeners/{listener_id}/start".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_start_listener_service_unavailable(self, m): m.put("{base}/listeners/{listener_id}/start".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_delete_listener(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), json={}) self.driver.delete_listener(self.amp, FAKE_UUID_1) self.assertTrue(m.called) @requests_mock.mock() def test_delete_listener_missing(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=404, headers={'content-type': 'application/json'}) self.driver.delete_listener(self.amp, FAKE_UUID_1) self.assertTrue(m.called) @requests_mock.mock() def test_delete_listener_unauthorized(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.delete_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_delete_listener_server_error(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.delete_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_delete_listener_service_unavailable(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.delete_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_upload_cert_pem(self, m): m.put("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME)) self.driver.upload_cert_pem(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") self.assertTrue(m.called) @requests_mock.mock() def test_upload_invalid_cert_pem(self, m): m.put("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_upload_cert_pem_unauthorized(self, m): m.put("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_upload_cert_pem_server_error(self, m): m.put("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_upload_cert_pem_service_unavailable(self, m): m.put("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_update_cert_for_rotation(self, m): m.put("{base}/certificate".format(base=self.base_url_ver)) resp_body = self.driver.update_cert_for_rotation(self.amp, "some_file") self.assertEqual(200, resp_body.status_code) @requests_mock.mock() def test_update_invalid_cert_for_rotation(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_update_cert_for_rotation_unauthorized(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_update_cert_for_rotation_error(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_update_cert_for_rotation_unavailable(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_get_cert_5sum(self, m): md5sum = {"md5sum": "some_real_sum"} m.get("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), json=md5sum) sum_test = self.driver.get_cert_md5sum(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) self.assertIsNotNone(sum_test) @requests_mock.mock() def test_get_cert_5sum_missing(self, m): m.get("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_get_cert_5sum_unauthorized(self, m): m.get("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_get_cert_5sum_server_error(self, m): m.get("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_get_cert_5sum_service_unavailable(self, m): m.get("{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_delete_cert_pem(self, m): m.delete( "{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME)) self.driver.delete_cert_pem(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) self.assertTrue(m.called) @requests_mock.mock() def test_delete_cert_pem_missing(self, m): m.delete( "{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=404, headers={'content-type': 'application/json'}) self.driver.delete_cert_pem(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) self.assertTrue(m.called) @requests_mock.mock() def test_delete_cert_pem_unauthorized(self, m): m.delete( "{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.delete_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_delete_cert_pem_server_error(self, m): m.delete( "{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.delete_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_delete_cert_pem_service_unavailable(self, m): m.delete( "{base}/listeners/{listener_id}/certificates/{filename}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.delete_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_upload_config(self, m): config = {"name": "fake_config"} m.put( "{base}/listeners/{" "amphora_id}/{listener_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), json=config) self.driver.upload_config(self.amp, FAKE_UUID_1, config) self.assertTrue(m.called) @requests_mock.mock() def test_upload_invalid_config(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/{" "amphora_id}/{listener_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_config_unauthorized(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/{" "amphora_id}/{listener_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_config_server_error(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/{" "amphora_id}/{listener_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_config_service_unavailable(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/{" "amphora_id}/{listener_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config(self, m): config = {"name": "fake_config"} m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), json=config) self.driver.upload_udp_config(self.amp, FAKE_UUID_1, config) self.assertTrue(m.called) @requests_mock.mock() def test_upload_udp_invalid_config(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config_unauthorized(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config_server_error(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config_service_unavailable(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_plug_vip(self, m): m.post("{base}/plug/vip/{vip}".format( base=self.base_url_ver, vip=FAKE_IP) ) self.driver.plug_vip(self.amp, FAKE_IP, self.subnet_info) self.assertTrue(m.called) @requests_mock.mock() def test_plug_vip_api_not_ready(self, m): m.post("{base}/plug/vip/{vip}".format( base=self.base_url_ver, vip=FAKE_IP), status_code=404, headers={'content-type': 'text/html'} ) self.assertRaises(driver_except.TimeOutException, self.driver.plug_vip, self.amp, FAKE_IP, self.subnet_info) self.assertTrue(m.called) @requests_mock.mock() def test_plug_network(self, m): m.post("{base}/plug/network".format( base=self.base_url_ver) ) self.driver.plug_network(self.amp, self.port_info) self.assertTrue(m.called) @requests_mock.mock() def test_upload_vrrp_config(self, m): config = '{"name": "bad_config"}' m.put("{base}/vrrp/upload".format( base=self.base_url_ver) ) self.driver.upload_vrrp_config(self.amp, config) self.assertTrue(m.called) @requests_mock.mock() def test_vrrp_action(self, m): action = 'start' m.put("{base}/vrrp/{action}".format(base=self.base_url_ver, action=action)) self.driver._vrrp_action(action, self.amp) self.assertTrue(m.called) @requests_mock.mock() def test_get_interface(self, m): interface = [{"interface": "eth1"}] ip_addr = '192.51.100.1' m.get("{base}/interface/{ip_addr}".format(base=self.base_url_ver, ip_addr=ip_addr), json=interface) self.driver.get_interface(self.amp, ip_addr) self.assertTrue(m.called) m.register_uri('GET', self.base_url_ver + '/interface/' + ip_addr, status_code=500, reason='FAIL', json='FAIL') self.assertRaises(exc.InternalServerError, self.driver.get_interface, self.amp, ip_addr) @requests_mock.mock() def test_update_agent_config(self, m): m.put("{base}/config".format(base=self.base_url_ver)) resp_body = self.driver.update_agent_config(self.amp, "some_file") self.assertEqual(200, resp_body.status_code) @requests_mock.mock() def test_update_agent_config_error(self, m): m.put("{base}/config".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.update_agent_config, self.amp, "some_file") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py0000664000175000017500000017460600000000000031154 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils import requests import requests_mock from octavia.amphorae.driver_exceptions import exceptions as driver_except from octavia.amphorae.drivers.haproxy import exceptions as exc from octavia.amphorae.drivers.haproxy import rest_api_driver as driver from octavia.common import constants from octavia.common import data_models from octavia.common import utils as octavia_utils from octavia.db import models from octavia.network import data_models as network_models from octavia.tests.common import sample_certs from octavia.tests.unit import base from octavia.tests.unit.common.sample_configs import sample_configs_combined API_VERSION = '1.0' FAKE_CIDR = '198.51.100.0/24' FAKE_GATEWAY = '192.51.100.1' FAKE_IP = '192.0.2.10' FAKE_IPV6 = '2001:db8::cafe' FAKE_IPV6_LLA = 'fe80::00ff:fe00:cafe' FAKE_PEM_FILENAME = "file_name" FAKE_UUID_1 = uuidutils.generate_uuid() FAKE_VRRP_IP = '10.1.0.1' FAKE_MAC_ADDRESS = '123' FAKE_MTU = 1450 FAKE_MEMBER_IP_PORT_NAME_1 = "10.0.0.10:1003" FAKE_MEMBER_IP_PORT_NAME_2 = "10.0.0.11:1004" class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase): def setUp(self): super(TestHaproxyAmphoraLoadBalancerDriverTest, self).setUp() DEST1 = '198.51.100.0/24' DEST2 = '203.0.113.0/24' NEXTHOP = '192.0.2.1' self.driver = driver.HaproxyAmphoraLoadBalancerDriver() self.driver.cert_manager = mock.MagicMock() self.driver.cert_parser = mock.MagicMock() self.driver.clients = { 'base': mock.MagicMock(), API_VERSION: mock.MagicMock()} self.driver.clients['base'].get_api_version.return_value = { 'api_version': API_VERSION} self.driver.clients[ API_VERSION].get_info.return_value = { 'haproxy_version': u'1.6.3-1ubuntu0.1', 'api_version': API_VERSION} self.driver.jinja_combo = mock.MagicMock() self.driver.udp_jinja = mock.MagicMock() # Build sample Listener and VIP configs self.sl = sample_configs_combined.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, recursive_nest=True) self.sl_udp = sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.255.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT) self.pool_has_cert = sample_configs_combined.sample_pool_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True) self.amp = self.sl.load_balancer.amphorae[0] self.sv = sample_configs_combined.sample_vip_tuple() self.lb = self.sl.load_balancer self.lb_udp = ( sample_configs_combined.sample_lb_with_udp_listener_tuple()) self.fixed_ip = mock.MagicMock() self.fixed_ip.ip_address = '198.51.100.5' self.fixed_ip.subnet.cidr = '198.51.100.0/24' self.network = network_models.Network(mtu=FAKE_MTU) self.port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[self.fixed_ip], network=self.network) self.host_routes = [network_models.HostRoute(destination=DEST1, nexthop=NEXTHOP), network_models.HostRoute(destination=DEST2, nexthop=NEXTHOP)] host_routes_data = [{'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP}] self.subnet_info = {'subnet_cidr': FAKE_CIDR, 'gateway': FAKE_GATEWAY, 'mac_address': FAKE_MAC_ADDRESS, 'vrrp_ip': self.amp.vrrp_ip, 'mtu': FAKE_MTU, 'host_routes': host_routes_data} self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, constants.REQ_READ_TIMEOUT: 2, constants.CONN_MAX_RETRIES: 3, constants.CONN_RETRY_INTERVAL: 4} @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_amphora_listeners(self, mock_load_cert, mock_secret): mock_amphora = mock.MagicMock() mock_amphora.id = 'mock_amphora_id' mock_amphora.api_version = API_VERSION mock_secret.return_value = 'filename.pem' mock_load_cert.return_value = { 'tls_cert': self.sl.default_tls_container, 'sni_certs': [], 'client_ca_cert': None} self.driver.jinja_combo.build_config.return_value = 'the_config' mock_empty_lb = mock.MagicMock() mock_empty_lb.listeners = [] self.driver.update_amphora_listeners(mock_empty_lb, mock_amphora, self.timeout_dict) mock_load_cert.assert_not_called() self.driver.jinja_combo.build_config.assert_not_called() self.driver.clients[API_VERSION].upload_config.assert_not_called() self.driver.clients[API_VERSION].reload_listener.assert_not_called() self.driver.update_amphora_listeners(self.lb, mock_amphora, self.timeout_dict) self.driver.clients[API_VERSION].upload_config.assert_called_once_with( mock_amphora, self.lb.id, 'the_config', timeout_dict=self.timeout_dict) self.driver.clients[API_VERSION].reload_listener( mock_amphora, self.lb.id, timeout_dict=self.timeout_dict) mock_load_cert.reset_mock() self.driver.jinja_combo.build_config.reset_mock() self.driver.clients[API_VERSION].upload_config.reset_mock() self.driver.clients[API_VERSION].reload_listener.reset_mock() mock_amphora.status = constants.DELETED self.driver.update_amphora_listeners(self.lb, mock_amphora, self.timeout_dict) mock_load_cert.assert_not_called() self.driver.jinja_combo.build_config.assert_not_called() self.driver.clients[API_VERSION].upload_config.assert_not_called() self.driver.clients[API_VERSION].reload_listener.assert_not_called() @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.ListenerRepository.update') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_update_amphora_listeners_bad_cert( self, mock_load_cert, mock_list_update, mock_get_session): mock_amphora = mock.MagicMock() mock_amphora.id = 'mock_amphora_id' mock_amphora.api_version = API_VERSION mock_get_session.return_value = 'fake_session' mock_load_cert.side_effect = [Exception] self.driver.update_amphora_listeners(self.lb, mock_amphora, self.timeout_dict) mock_list_update.assert_called_once_with( 'fake_session', self.lb.listeners[0].id, provisioning_status=constants.ERROR, operating_status=constants.ERROR) self.driver.jinja_combo.build_config.assert_not_called() (self.driver.clients[API_VERSION].delete_listener. assert_called_once_with)(mock_amphora, self.lb.id) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') def test_update(self, mock_cert, mock_load_crt, mock_secret): mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} mock_secret.side_effect = ['filename.pem', 'crl-filename.pem'] sconts = [] for sni_container in self.sl.sni_containers: sconts.append(sni_container.tls_container) mock_load_crt.side_effect = [{ 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, {'tls_cert': None, 'sni_certs': []}] self.driver.clients[API_VERSION].get_cert_md5sum.side_effect = [ exc.NotFound, 'Fake_MD5', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'CA_CERT_MD5'] self.driver.jinja_combo.build_config.side_effect = ['fake_config'] # Execute driver method self.driver.update(self.lb) # verify result gcm_calls = [ mock.call(self.amp, self.lb.id, self.sl.default_tls_container.id + '.pem', ignore=(404,)), mock.call(self.amp, self.lb.id, sconts[0].id + '.pem', ignore=(404,)), mock.call(self.amp, self.lb.id, sconts[1].id + '.pem', ignore=(404,)), ] self.driver.clients[API_VERSION].get_cert_md5sum.assert_has_calls( gcm_calls, any_order=True) # this is called three times (last MD5 matches) fp1 = b'\n'.join([sample_certs.X509_CERT, sample_certs.X509_CERT_KEY, sample_certs.X509_IMDS]) + b'\n' fp2 = b'\n'.join([sample_certs.X509_CERT_2, sample_certs.X509_CERT_KEY_2, sample_certs.X509_IMDS]) + b'\n' fp3 = b'\n'.join([sample_certs.X509_CERT_3, sample_certs.X509_CERT_KEY_3, sample_certs.X509_IMDS]) + b'\n' ucp_calls = [ mock.call(self.amp, self.lb.id, self.sl.default_tls_container.id + '.pem', fp1), mock.call(self.amp, self.lb.id, sconts[0].id + '.pem', fp2), mock.call(self.amp, self.lb.id, sconts[1].id + '.pem', fp3), ] self.driver.clients[API_VERSION].upload_cert_pem.assert_has_calls( ucp_calls, any_order=True) # upload only one config file self.driver.clients[API_VERSION].upload_config.assert_called_once_with( self.amp, self.lb.id, 'fake_config', timeout_dict=None) # start should be called once self.driver.clients[ API_VERSION].reload_listener.assert_called_once_with( self.amp, self.lb.id, timeout_dict=None) secret_calls = [ mock.call(self.sl, self.sl.client_ca_tls_certificate_id, self.amp, self.lb.id), mock.call(self.sl, self.sl.client_crl_container_id, self.amp, self.lb.id) ] mock_secret.assert_has_calls(secret_calls) def test_udp_update(self): self.driver.udp_jinja.build_config.side_effect = ['fake_udp_config'] # Execute driver method self.driver.update(self.lb_udp) # upload only one config file self.driver.clients[ API_VERSION].upload_udp_config.assert_called_once_with( self.amp, self.sl_udp.id, 'fake_udp_config', timeout_dict=None) # start should be called once self.driver.clients[ API_VERSION].reload_listener.assert_called_once_with( self.amp, self.sl_udp.id, timeout_dict=None) def test_upload_cert_amp(self): self.driver.upload_cert_amp(self.amp, octavia_utils.b('test')) self.driver.clients[ API_VERSION].update_cert_for_rotation.assert_called_once_with( self.amp, octavia_utils.b('test')) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test__process_tls_certificates_no_ca_cert(self, mock_load_crt): sample_listener = sample_configs_combined.sample_listener_tuple( tls=True, sni=True) sconts = [] for sni_container in sample_listener.sni_containers: sconts.append(sni_container.tls_container) mock_load_crt.return_value = { 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts } self.driver.clients[API_VERSION].get_cert_md5sum.side_effect = [ exc.NotFound, 'Fake_MD5', 'aaaaa', 'aaaaa'] self.driver._process_tls_certificates( sample_listener, self.amp, sample_listener.load_balancer.id) gcm_calls = [ mock.call(self.amp, self.lb.id, self.sl.default_tls_container.id + '.pem', ignore=(404,)), mock.call(self.amp, self.lb.id, sconts[0].id + '.pem', ignore=(404,)), mock.call(self.amp, self.lb.id, sconts[1].id + '.pem', ignore=(404,)) ] self.driver.clients[API_VERSION].get_cert_md5sum.assert_has_calls( gcm_calls, any_order=True) fp1 = b'\n'.join([sample_certs.X509_CERT, sample_certs.X509_CERT_KEY, sample_certs.X509_IMDS]) + b'\n' fp2 = b'\n'.join([sample_certs.X509_CERT_2, sample_certs.X509_CERT_KEY_2, sample_certs.X509_IMDS]) + b'\n' fp3 = b'\n'.join([sample_certs.X509_CERT_3, sample_certs.X509_CERT_KEY_3, sample_certs.X509_IMDS]) + b'\n' ucp_calls = [ mock.call(self.amp, self.lb.id, self.sl.default_tls_container.id + '.pem', fp1), mock.call(self.amp, self.lb.id, sconts[0].id + '.pem', fp2), mock.call(self.amp, self.lb.id, sconts[1].id + '.pem', fp3) ] self.driver.clients[API_VERSION].upload_cert_pem.assert_has_calls( ucp_calls, any_order=True) self.assertEqual( 4, self.driver.clients[API_VERSION].upload_cert_pem.call_count) @mock.patch('oslo_context.context.RequestContext') @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._upload_cert') def test_process_secret(self, mock_upload_cert, mock_oslo): # Test bypass if no secret_ref sample_listener = sample_configs_combined.sample_listener_tuple( tls=True, sni=True) result = self.driver._process_secret(sample_listener, None) self.assertIsNone(result) self.driver.cert_manager.get_secret.assert_not_called() # Test the secret process sample_listener = sample_configs_combined.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True) fake_context = 'fake context' fake_secret = b'fake cert' mock_oslo.return_value = fake_context self.driver.cert_manager.get_secret.reset_mock() self.driver.cert_manager.get_secret.return_value = fake_secret ref_md5 = hashlib.md5(fake_secret).hexdigest() # nosec ref_id = hashlib.sha1(fake_secret).hexdigest() # nosec ref_name = '{id}.pem'.format(id=ref_id) result = self.driver._process_secret( sample_listener, sample_listener.client_ca_tls_certificate_id, self.amp, sample_listener.id) mock_oslo.assert_called_once_with( project_id=sample_listener.project_id) self.driver.cert_manager.get_secret.assert_called_once_with( fake_context, sample_listener.client_ca_tls_certificate_id) mock_upload_cert.assert_called_once_with( self.amp, sample_listener.id, pem=fake_secret, md5=ref_md5, name=ref_name) self.assertEqual(ref_name, result) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_pool_certs') def test__process_listener_pool_certs(self, mock_pool_cert): sample_listener = sample_configs_combined.sample_listener_tuple( l7=True) ref_pool_cert_1 = {'client_cert': '/some/fake/cert-1.pem'} ref_pool_cert_2 = {'client_cert': '/some/fake/cert-2.pem'} mock_pool_cert.side_effect = [ref_pool_cert_1, ref_pool_cert_2] ref_cert_dict = {'sample_pool_id_1': ref_pool_cert_1, 'sample_pool_id_2': ref_pool_cert_2} result = self.driver._process_listener_pool_certs( sample_listener, self.amp, sample_listener.load_balancer.id) pool_certs_calls = [ mock.call(sample_listener, sample_listener.default_pool, self.amp, sample_listener.load_balancer.id), mock.call(sample_listener, sample_listener.pools[1], self.amp, sample_listener.load_balancer.id) ] mock_pool_cert.assert_has_calls(pool_certs_calls, any_order=True) self.assertEqual(ref_cert_dict, result) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._upload_cert') @mock.patch('octavia.common.tls_utils.cert_parser.build_pem') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test__process_pool_certs(self, mock_load_certs, mock_build_pem, mock_upload_cert, mock_secret): fake_cert_dir = '/fake/cert/dir' conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_cert_dir=fake_cert_dir) sample_listener = sample_configs_combined.sample_listener_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True) pool_cert = data_models.TLSContainer( id=uuidutils.generate_uuid(), certificate='pool cert') pool_data = {'tls_cert': pool_cert, 'sni_certs': []} mock_load_certs.return_value = pool_data fake_pem = b'fake pem' mock_build_pem.return_value = fake_pem ref_md5 = hashlib.md5(fake_pem).hexdigest() # nosec ref_name = '{id}.pem'.format(id=pool_cert.id) ref_path = '{cert_dir}/{lb_id}/{name}'.format( cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, name=ref_name) ref_ca_name = 'fake_ca.pem' ref_ca_path = '{cert_dir}/{lb_id}/{name}'.format( cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, name=ref_ca_name) ref_crl_name = 'fake_crl.pem' ref_crl_path = '{cert_dir}/{lb_id}/{name}'.format( cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, name=ref_crl_name) ref_result = {'client_cert': ref_path, 'ca_cert': ref_ca_path, 'crl': ref_crl_path} mock_secret.side_effect = [ref_ca_name, ref_crl_name] result = self.driver._process_pool_certs( sample_listener, sample_listener.default_pool, self.amp, sample_listener.load_balancer.id) secret_calls = [ mock.call(sample_listener, sample_listener.default_pool.ca_tls_certificate_id, self.amp, sample_listener.load_balancer.id), mock.call(sample_listener, sample_listener.default_pool.crl_container_id, self.amp, sample_listener.load_balancer.id)] mock_build_pem.assert_called_once_with(pool_cert) mock_upload_cert.assert_called_once_with( self.amp, sample_listener.load_balancer.id, pem=fake_pem, md5=ref_md5, name=ref_name) mock_secret.assert_has_calls(secret_calls) self.assertEqual(ref_result, result) def test_start(self): amp1 = mock.MagicMock() amp1.api_version = API_VERSION amp2 = mock.MagicMock() amp2.api_version = API_VERSION amp2.status = constants.DELETED loadbalancer = mock.MagicMock() loadbalancer.id = uuidutils.generate_uuid() loadbalancer.amphorae = [amp1, amp2] loadbalancer.vip = self.sv listener = mock.MagicMock() listener.id = uuidutils.generate_uuid() listener.protocol = constants.PROTOCOL_HTTP loadbalancer.listeners = [listener] listener.load_balancer = loadbalancer self.driver.clients[ API_VERSION].start_listener.__name__ = 'start_listener' # Execute driver method self.driver.start(loadbalancer) self.driver.clients[ API_VERSION].start_listener.assert_called_once_with( amp1, loadbalancer.id, None) def test_reload(self): amp1 = mock.MagicMock() amp1.api_version = API_VERSION amp2 = mock.MagicMock() amp2.api_version = API_VERSION amp2.status = constants.DELETED loadbalancer = mock.MagicMock() loadbalancer.id = uuidutils.generate_uuid() loadbalancer.amphorae = [amp1, amp2] loadbalancer.vip = self.sv listener = mock.MagicMock() listener.id = uuidutils.generate_uuid() listener.protocol = constants.PROTOCOL_HTTP loadbalancer.listeners = [listener] listener.load_balancer = loadbalancer self.driver.clients[ API_VERSION].reload_listener.__name__ = 'reload_listener' # Execute driver method self.driver.reload(loadbalancer) self.driver.clients[ API_VERSION].reload_listener.assert_called_once_with( amp1, loadbalancer.id, None) def test_start_with_amphora(self): # Execute driver method amp = mock.MagicMock() self.driver.clients[ API_VERSION].start_listener.__name__ = 'start_listener' self.driver.start(self.lb, self.amp) self.driver.clients[ API_VERSION].start_listener.assert_called_once_with( self.amp, self.lb.id, None) self.driver.clients[API_VERSION].start_listener.reset_mock() amp.status = constants.DELETED self.driver.start(self.lb, amp) self.driver.clients[API_VERSION].start_listener.assert_not_called() def test_udp_start(self): self.driver.clients[ API_VERSION].start_listener.__name__ = 'start_listener' # Execute driver method self.driver.start(self.lb_udp) self.driver.clients[ API_VERSION].start_listener.assert_called_once_with( self.amp, self.sl_udp.id, None) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') def test_delete_second_listener(self, mock_cert, mock_load_crt, mock_secret): self.driver.clients[ API_VERSION].delete_listener.__name__ = 'delete_listener' sl = sample_configs_combined.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, recursive_nest=True) sl2 = sample_configs_combined.sample_listener_tuple( id='sample_listener_id_2') sl.load_balancer.listeners.append(sl2) mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} mock_secret.side_effect = ['filename.pem', 'crl-filename.pem'] sconts = [] for sni_container in self.sl.sni_containers: sconts.append(sni_container.tls_container) mock_load_crt.side_effect = [{ 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, {'tls_cert': None, 'sni_certs': []}] self.driver.jinja_combo.build_config.side_effect = ['fake_config'] # Execute driver method self.driver.delete(sl) # All of the pem files should be removed dcp_calls = [ mock.call(self.amp, sl.load_balancer.id, self.sl.default_tls_container.id + '.pem'), mock.call(self.amp, sl.load_balancer.id, sconts[0].id + '.pem'), mock.call(self.amp, sl.load_balancer.id, sconts[1].id + '.pem'), ] self.driver.clients[API_VERSION].delete_cert_pem.assert_has_calls( dcp_calls, any_order=True) # Now just make sure we did an update and not a delete self.driver.clients[API_VERSION].delete_listener.assert_not_called() self.driver.clients[API_VERSION].upload_config.assert_called_once_with( self.amp, sl.load_balancer.id, 'fake_config', timeout_dict=None) # start should be called once self.driver.clients[ API_VERSION].reload_listener.assert_called_once_with( self.amp, sl.load_balancer.id, timeout_dict=None) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') def test_delete_second_listener_active_standby(self, mock_cert, mock_load_crt, mock_secret): self.driver.clients[ API_VERSION].delete_listener.__name__ = 'delete_listener' sl = sample_configs_combined.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, recursive_nest=True, topology=constants.TOPOLOGY_ACTIVE_STANDBY) sl2 = sample_configs_combined.sample_listener_tuple( id='sample_listener_id_2', topology=constants.TOPOLOGY_ACTIVE_STANDBY) sl.load_balancer.listeners.append(sl2) mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} mock_secret.side_effect = ['filename.pem', 'crl-filename.pem', 'filename.pem', 'crl-filename.pem'] sconts = [] for sni_container in self.sl.sni_containers: sconts.append(sni_container.tls_container) mock_load_crt.side_effect = [{ 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, {'tls_cert': None, 'sni_certs': []}, {'tls_cert': None, 'sni_certs': []}, {'tls_cert': None, 'sni_certs': []}] self.driver.jinja_combo.build_config.side_effect = [ 'fake_config', 'fake_config'] # Execute driver method self.driver.delete(sl) amp1 = sl.load_balancer.amphorae[0] amp2 = sl.load_balancer.amphorae[1] # All of the pem files should be removed (using amp1 or amp2) dcp_calls_list = [ [ mock.call(amp1, sl.load_balancer.id, sl.default_tls_container.id + '.pem'), mock.call(amp2, sl.load_balancer.id, sl.default_tls_container.id + '.pem') ], [ mock.call(amp1, sl.load_balancer.id, sconts[0].id + '.pem'), mock.call(amp2, sl.load_balancer.id, sconts[0].id + '.pem') ], [ mock.call(amp1, sl.load_balancer.id, sconts[1].id + '.pem'), mock.call(amp2, sl.load_balancer.id, sconts[1].id + '.pem') ] ] mock_calls = ( self.driver.clients[API_VERSION].delete_cert_pem.mock_calls) for dcp_calls in dcp_calls_list: # Ensure that at least one call in each pair has been seen if (dcp_calls[0] not in mock_calls and dcp_calls[1] not in mock_calls): raise Exception("%s not found in %s" % (dcp_calls, mock_calls)) # Now just make sure we did an update and not a delete self.driver.clients[API_VERSION].delete_listener.assert_not_called() upload_config_calls = [ mock.call(amp1, sl.load_balancer.id, 'fake_config', timeout_dict=None), mock.call(amp2, sl.load_balancer.id, 'fake_config', timeout_dict=None) ] self.driver.clients[API_VERSION].upload_config.assert_has_calls( upload_config_calls, any_order=True) # start should be called once per amp reload_listener_calls = [ mock.call(amp1, sl.load_balancer.id, timeout_dict=None), mock.call(amp2, sl.load_balancer.id, timeout_dict=None) ] self.driver.clients[ API_VERSION].reload_listener.assert_has_calls( reload_listener_calls, any_order=True) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_delete_last_listener(self, mock_load_crt): self.driver.clients[ API_VERSION].delete_listener.__name__ = 'delete_listener' sl = sample_configs_combined.sample_listener_tuple( tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, recursive_nest=True) mock_load_crt.side_effect = [{ 'tls_cert': sl.default_tls_container, 'sni_certs': None}] # Execute driver method self.driver.delete(sl) self.driver.clients[ API_VERSION].delete_listener.assert_called_once_with( self.amp, sl.load_balancer.id) def test_udp_delete(self): self.driver.clients[ API_VERSION].delete_listener.__name__ = 'delete_listener' # Execute driver method self.driver.delete(self.sl_udp) self.driver.clients[ API_VERSION].delete_listener.assert_called_once_with( self.amp, self.sl_udp.id) def test_get_info(self): expected_info = {'haproxy_version': '1.6.3-1ubuntu0.1', 'api_version': '1.0'} result = self.driver.get_info(self.amp) self.assertEqual(expected_info, result) def test_get_diagnostics(self): # TODO(johnsom) Implement once this exists on the amphora agent. result = self.driver.get_diagnostics(self.amp) self.assertIsNone(result) def test_finalize_amphora(self): # TODO(johnsom) Implement once this exists on the amphora agent. result = self.driver.finalize_amphora(self.amp) self.assertIsNone(result) def test_post_vip_plug(self): amphorae_network_config = mock.MagicMock() amphorae_network_config.get().vip_subnet.cidr = FAKE_CIDR amphorae_network_config.get().vip_subnet.gateway_ip = FAKE_GATEWAY amphorae_network_config.get().vip_subnet.host_routes = self.host_routes amphorae_network_config.get().vrrp_port = self.port self.driver.post_vip_plug(self.amp, self.lb, amphorae_network_config) self.driver.clients[API_VERSION].plug_vip.assert_called_once_with( self.amp, self.lb.vip.ip_address, self.subnet_info) def test_post_network_plug(self): # Test dhcp path port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[], network=self.network) self.driver.post_network_plug(self.amp, port) self.driver.clients[API_VERSION].plug_network.assert_called_once_with( self.amp, dict(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[], mtu=FAKE_MTU)) self.driver.clients[API_VERSION].plug_network.reset_mock() # Test fixed IP path self.driver.post_network_plug(self.amp, self.port) self.driver.clients[API_VERSION].plug_network.assert_called_once_with( self.amp, dict(mac_address=FAKE_MAC_ADDRESS, fixed_ips=[dict(ip_address='198.51.100.5', subnet_cidr='198.51.100.0/24', host_routes=[])], mtu=FAKE_MTU)) def test_post_network_plug_with_host_routes(self): SUBNET_ID = 'SUBNET_ID' FIXED_IP1 = '192.0.2.2' FIXED_IP2 = '192.0.2.3' SUBNET_CIDR = '192.0.2.0/24' DEST1 = '198.51.100.0/24' DEST2 = '203.0.113.0/24' NEXTHOP = '192.0.2.1' host_routes = [network_models.HostRoute(destination=DEST1, nexthop=NEXTHOP), network_models.HostRoute(destination=DEST2, nexthop=NEXTHOP)] subnet = network_models.Subnet(id=SUBNET_ID, cidr=SUBNET_CIDR, ip_version=4, host_routes=host_routes) fixed_ips = [ network_models.FixedIP(subnet_id=subnet.id, ip_address=FIXED_IP1, subnet=subnet), network_models.FixedIP(subnet_id=subnet.id, ip_address=FIXED_IP2, subnet=subnet) ] port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, fixed_ips=fixed_ips, network=self.network) self.driver.post_network_plug(self.amp, port) expected_fixed_ips = [ {'ip_address': FIXED_IP1, 'subnet_cidr': SUBNET_CIDR, 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP}]}, {'ip_address': FIXED_IP2, 'subnet_cidr': SUBNET_CIDR, 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, {'destination': DEST2, 'nexthop': NEXTHOP}]} ] self.driver.clients[API_VERSION].plug_network.assert_called_once_with( self.amp, dict(mac_address=FAKE_MAC_ADDRESS, fixed_ips=expected_fixed_ips, mtu=FAKE_MTU)) def test_get_haproxy_versions(self): ref_haproxy_versions = ['1', '6'] result = self.driver._get_haproxy_versions(self.amp) self.driver.clients[API_VERSION].get_info.assert_called_once_with( self.amp) self.assertEqual(ref_haproxy_versions, result) def test_populate_amphora_api_version(self): # Normal path, populate the version # clear out any previous values ref_haproxy_version = list(map(int, API_VERSION.split('.'))) mock_amp = mock.MagicMock() mock_amp.api_version = None result = self.driver._populate_amphora_api_version(mock_amp) self.assertEqual(API_VERSION, mock_amp.api_version) self.assertEqual(ref_haproxy_version, result) # Existing version passed in fake_version = '9999.9999' ref_haproxy_version = list(map(int, fake_version.split('.'))) mock_amp = mock.MagicMock() mock_amp.api_version = fake_version result = self.driver._populate_amphora_api_version(mock_amp) self.assertEqual(fake_version, mock_amp.api_version) self.assertEqual(ref_haproxy_version, result) def test_update_amphora_agent_config(self): self.driver.update_amphora_agent_config( self.amp, octavia_utils.b('test')) self.driver.clients[ API_VERSION].update_agent_config.assert_called_once_with( self.amp, octavia_utils.b('test'), timeout_dict=None) class TestAmphoraAPIClientTest(base.TestCase): def setUp(self): super(TestAmphoraAPIClientTest, self).setUp() self.driver = driver.AmphoraAPIClient1_0() self.base_url = "https://192.0.2.77:9443/" self.base_url_ver = self.base_url + "1.0" self.amp = models.Amphora(lb_network_ip='192.0.2.77', compute_id='123') self.amp.api_version = API_VERSION self.port_info = dict(mac_address=FAKE_MAC_ADDRESS) # Override with much lower values for testing purposes.. conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", connection_max_retries=2) self.subnet_info = {'subnet_cidr': FAKE_CIDR, 'gateway': FAKE_GATEWAY, 'mac_address': FAKE_MAC_ADDRESS, 'vrrp_ip': self.amp.vrrp_ip} patcher = mock.patch('time.sleep').start() self.addCleanup(patcher.stop) self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, constants.REQ_READ_TIMEOUT: 2, constants.CONN_MAX_RETRIES: 3, constants.CONN_RETRY_INTERVAL: 4} def test_base_url(self): url = self.driver._base_url(FAKE_IP) self.assertEqual('https://192.0.2.10:9443/', url) url = self.driver._base_url(FAKE_IPV6, self.amp.api_version) self.assertEqual('https://[2001:db8::cafe]:9443/1.0/', url) url = self.driver._base_url(FAKE_IPV6_LLA, self.amp.api_version) self.assertEqual('https://[fe80::00ff:fe00:cafe%o-hm0]:9443/1.0/', url) @mock.patch('requests.Session.get', side_effect=requests.ConnectionError) @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.time.sleep') def test_request(self, mock_sleep, mock_get): self.assertRaises(driver_except.TimeOutException, self.driver.request, 'get', self.amp, 'unavailableURL', self.timeout_dict) @requests_mock.mock() def test_get_api_version(self, mock_requests): ref_api_version = {'api_version': '0.1'} mock_requests.get('{base}/'.format(base=self.base_url), json=ref_api_version) result = self.driver.get_api_version(self.amp) self.assertEqual(ref_api_version, result) @requests_mock.mock() def test_get_info(self, m): info = {"hostname": "some_hostname", "version": "some_version", "api_version": "0.5", "uuid": FAKE_UUID_1} m.get("{base}/info".format(base=self.base_url_ver), json=info) information = self.driver.get_info(self.amp) self.assertEqual(info, information) @requests_mock.mock() def test_get_info_unauthorized(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_info_missing(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_info_server_error(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_info_service_unavailable(self, m): m.get("{base}/info".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_info, self.amp) @requests_mock.mock() def test_get_details(self, m): details = {"hostname": "some_hostname", "version": "some_version", "api_version": "0.5", "uuid": FAKE_UUID_1, "network_tx": "some_tx", "network_rx": "some_rx", "active": True, "haproxy_count": 10} m.get("{base}/details".format(base=self.base_url_ver), json=details) amp_details = self.driver.get_details(self.amp) self.assertEqual(details, amp_details) @requests_mock.mock() def test_get_details_unauthorized(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_details_missing(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_details_server_error(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_details_service_unavailable(self, m): m.get("{base}/details".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_details, self.amp) @requests_mock.mock() def test_get_all_listeners(self, m): listeners = [{"status": "ONLINE", "provisioning_status": "ACTIVE", "type": "PASSIVE", "uuid": FAKE_UUID_1}] m.get("{base}/listeners".format(base=self.base_url_ver), json=listeners) all_listeners = self.driver.get_all_listeners(self.amp) self.assertEqual(listeners, all_listeners) @requests_mock.mock() def test_get_all_listeners_unauthorized(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_get_all_listeners_missing(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_get_all_listeners_server_error(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_get_all_listeners_service_unavailable(self, m): m.get("{base}/listeners".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_all_listeners, self.amp) @requests_mock.mock() def test_start_loadbalancer(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/start".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1)) self.driver.start_listener(self.amp, FAKE_UUID_1) self.assertTrue(m.called) @requests_mock.mock() def test_start_loadbalancer_missing(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/start".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_start_loadbalancer_unauthorized(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/start".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_start_loadbalancer_server_error(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/start".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_start_loadbalancer_service_unavailable(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/start".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.start_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_delete_listener(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), json={}) self.driver.delete_listener(self.amp, FAKE_UUID_1) self.assertTrue(m.called) @requests_mock.mock() def test_delete_listener_missing(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=404, headers={'content-type': 'application/json'}) self.driver.delete_listener(self.amp, FAKE_UUID_1) self.assertTrue(m.called) @requests_mock.mock() def test_delete_listener_unauthorized(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.delete_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_delete_listener_server_error(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.delete_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_delete_listener_service_unavailable(self, m): m.delete("{base}/listeners/{listener_id}".format( base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.delete_listener, self.amp, FAKE_UUID_1) @requests_mock.mock() def test_upload_cert_pem(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME)) self.driver.upload_cert_pem(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") self.assertTrue(m.called) @requests_mock.mock() def test_upload_invalid_cert_pem(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_upload_cert_pem_unauthorized(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_upload_cert_pem_server_error(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_upload_cert_pem_service_unavailable(self, m): m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.upload_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, "some_file") @requests_mock.mock() def test_update_cert_for_rotation(self, m): m.put("{base}/certificate".format(base=self.base_url_ver)) resp_body = self.driver.update_cert_for_rotation(self.amp, "some_file") self.assertEqual(200, resp_body.status_code) @requests_mock.mock() def test_update_invalid_cert_for_rotation(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_update_cert_for_rotation_unauthorized(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_update_cert_for_rotation_error(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_update_cert_for_rotation_unavailable(self, m): m.put("{base}/certificate".format(base=self.base_url_ver), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.update_cert_for_rotation, self.amp, "some_file") @requests_mock.mock() def test_get_cert_5sum(self, m): md5sum = {"md5sum": "some_real_sum"} m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), json=md5sum) sum_test = self.driver.get_cert_md5sum(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) self.assertIsNotNone(sum_test) @requests_mock.mock() def test_get_cert_5sum_missing(self, m): m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=404, headers={'content-type': 'application/json'}) self.assertRaises(exc.NotFound, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_get_cert_5sum_unauthorized(self, m): m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_get_cert_5sum_server_error(self, m): m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_get_cert_5sum_service_unavailable(self, m): m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.get_cert_md5sum, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_delete_cert_pem(self, m): m.delete( "{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME)) self.driver.delete_cert_pem(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) self.assertTrue(m.called) @requests_mock.mock() def test_delete_cert_pem_missing(self, m): m.delete( "{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=404, headers={'content-type': 'application/json'}) self.driver.delete_cert_pem(self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) self.assertTrue(m.called) @requests_mock.mock() def test_delete_cert_pem_unauthorized(self, m): m.delete( "{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.delete_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_delete_cert_pem_server_error(self, m): m.delete( "{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.delete_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_delete_cert_pem_service_unavailable(self, m): m.delete( "{base}/loadbalancer/{loadbalancer_id}/certificates/" "{filename}".format( base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, filename=FAKE_PEM_FILENAME), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.delete_cert_pem, self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) @requests_mock.mock() def test_upload_config(self, m): config = {"name": "fake_config"} m.put( "{base}/loadbalancer/{" "amphora_id}/{loadbalancer_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), json=config) self.driver.upload_config(self.amp, FAKE_UUID_1, config) self.assertTrue(m.called) @requests_mock.mock() def test_upload_invalid_config(self, m): config = '{"name": "bad_config"}' m.put( "{base}/loadbalancer/{" "amphora_id}/{loadbalancer_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_config_unauthorized(self, m): config = '{"name": "bad_config"}' m.put( "{base}/loadbalancer/{" "amphora_id}/{loadbalancer_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_config_server_error(self, m): config = '{"name": "bad_config"}' m.put( "{base}/loadbalancer/{" "amphora_id}/{loadbalancer_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_config_service_unavailable(self, m): config = '{"name": "bad_config"}' m.put( "{base}/loadbalancer/{" "amphora_id}/{loadbalancer_id}/haproxy".format( amphora_id=self.amp.id, base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.upload_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config(self, m): config = {"name": "fake_config"} m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), json=config) self.driver.upload_udp_config(self.amp, FAKE_UUID_1, config) self.assertTrue(m.called) @requests_mock.mock() def test_upload_udp_invalid_config(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=400) self.assertRaises(exc.InvalidRequest, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config_unauthorized(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=401) self.assertRaises(exc.Unauthorized, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config_server_error(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_upload_udp_config_service_unavailable(self, m): config = '{"name": "bad_config"}' m.put( "{base}/listeners/" "{amphora_id}/{listener_id}/udp_listener".format( amphora_id=self.amp.id, base=self.base_url_ver, listener_id=FAKE_UUID_1), status_code=503) self.assertRaises(exc.ServiceUnavailable, self.driver.upload_udp_config, self.amp, FAKE_UUID_1, config) @requests_mock.mock() def test_plug_vip(self, m): m.post("{base}/plug/vip/{vip}".format( base=self.base_url_ver, vip=FAKE_IP) ) self.driver.plug_vip(self.amp, FAKE_IP, self.subnet_info) self.assertTrue(m.called) @requests_mock.mock() def test_plug_vip_api_not_ready(self, m): m.post("{base}/plug/vip/{vip}".format( base=self.base_url_ver, vip=FAKE_IP), status_code=404, headers={'content-type': 'text/html'} ) self.assertRaises(driver_except.TimeOutException, self.driver.plug_vip, self.amp, FAKE_IP, self.subnet_info) self.assertTrue(m.called) @requests_mock.mock() def test_plug_network(self, m): m.post("{base}/plug/network".format( base=self.base_url_ver) ) self.driver.plug_network(self.amp, self.port_info) self.assertTrue(m.called) @requests_mock.mock() def test_upload_vrrp_config(self, m): config = '{"name": "bad_config"}' m.put("{base}/vrrp/upload".format( base=self.base_url_ver) ) self.driver.upload_vrrp_config(self.amp, config) self.assertTrue(m.called) @requests_mock.mock() def test_vrrp_action(self, m): action = 'start' m.put("{base}/vrrp/{action}".format(base=self.base_url_ver, action=action)) self.driver._vrrp_action(action, self.amp) self.assertTrue(m.called) @requests_mock.mock() def test_get_interface(self, m): interface = [{"interface": "eth1"}] ip_addr = '192.51.100.1' m.get("{base}/interface/{ip_addr}".format(base=self.base_url_ver, ip_addr=ip_addr), json=interface) self.driver.get_interface(self.amp, ip_addr) self.assertTrue(m.called) m.register_uri('GET', self.base_url_ver + '/interface/' + ip_addr, status_code=500, reason='FAIL', json='FAIL') self.assertRaises(exc.InternalServerError, self.driver.get_interface, self.amp, ip_addr) @requests_mock.mock() def test_update_agent_config(self, m): m.put("{base}/config".format(base=self.base_url_ver)) resp_body = self.driver.update_agent_config(self.amp, "some_file") self.assertEqual(200, resp_body.status_code) @requests_mock.mock() def test_update_agent_config_error(self, m): m.put("{base}/config".format(base=self.base_url_ver), status_code=500) self.assertRaises(exc.InternalServerError, self.driver.update_agent_config, self.amp, "some_file") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_common.py0000664000175000017500000000640400000000000032053 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.amphorae.drivers.haproxy import exceptions as exc from octavia.amphorae.drivers.haproxy import rest_api_driver import octavia.tests.unit.base as base class TestHAProxyAmphoraDriver(base.TestCase): def setUp(self): super(TestHAProxyAmphoraDriver, self).setUp() self.driver = rest_api_driver.HaproxyAmphoraLoadBalancerDriver() @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver.' '_populate_amphora_api_version') def test_get_interface_from_ip(self, mock_api_version): FAKE_INTERFACE = 'fake0' IP_ADDRESS = '203.0.113.42' TIMEOUT_DICT = {'outa': 'time'} amphora_mock = mock.MagicMock() amphora_mock.api_version = '0' client_mock = mock.MagicMock() client_mock.get_interface.side_effect = [ {'interface': FAKE_INTERFACE}, {'interface': FAKE_INTERFACE}, {}, exc.NotFound] self.driver.clients['0'] = client_mock # Test interface found no timeout result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) self.assertEqual(FAKE_INTERFACE, result) mock_api_version.assert_called_once_with(amphora_mock, None) client_mock.get_interface.assert_called_once_with( amphora_mock, IP_ADDRESS, None, log_error=False) # Test interface found with timeout mock_api_version.reset_mock() client_mock.reset_mock() result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS, timeout_dict=TIMEOUT_DICT) self.assertEqual(FAKE_INTERFACE, result) mock_api_version.assert_called_once_with(amphora_mock, TIMEOUT_DICT) client_mock.get_interface.assert_called_once_with( amphora_mock, IP_ADDRESS, TIMEOUT_DICT, log_error=False) # Test no interface data mock_api_version.reset_mock() client_mock.reset_mock() result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) self.assertIsNone(result) mock_api_version.assert_called_once_with(amphora_mock, None) client_mock.get_interface.assert_called_once_with( amphora_mock, IP_ADDRESS, None, log_error=False) # Test NotFound mock_api_version.reset_mock() client_mock.reset_mock() result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) self.assertIsNone(result) mock_api_version.assert_called_once_with(amphora_mock, None) client_mock.get_interface.assert_called_once_with( amphora_mock, IP_ADDRESS, None, log_error=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/health/0000775000175000017500000000000000000000000023460 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/health/__init__.py0000664000175000017500000000107400000000000025573 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py0000664000175000017500000001651000000000000027703 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import random import socket from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.amphorae.drivers.health import heartbeat_udp from octavia.common import exceptions from octavia.tests.unit import base FAKE_ID = 1 KEY = 'TEST' IP = '192.0.2.10' PORT = random.randrange(1, 9000) RLIMIT = random.randrange(1, 100) FAKE_ADDRINFO = ( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', (IP, PORT) ) HEALTH_DRIVER = 'health_logger' STATS_DRIVER = 'stats_logger' class TestHeartbeatUDP(base.TestCase): def setUp(self): super(TestHeartbeatUDP, self).setUp() self.health_update = mock.Mock() self.stats_update = mock.Mock() self.conf = oslo_fixture.Config(cfg.CONF) self.conf.config(group="health_manager", heartbeat_key=KEY) self.conf.config(group="health_manager", bind_ip=IP) self.conf.config(group="health_manager", bind_port=PORT) self.conf.config(group="health_manager", sock_rlimit=0) self.conf.config(group="health_manager", health_update_driver=HEALTH_DRIVER) self.conf.config(group="health_manager", stats_update_driver=STATS_DRIVER) @mock.patch('stevedore.driver.DriverManager') def test_update_health_func(self, driver_manager): obj = {'id': 1} heartbeat_udp.update_health(obj, '192.0.2.1') driver_manager.assert_called_once_with( invoke_on_load=True, name='health_logger', namespace='octavia.amphora.health_update_drivers' ) driver_manager().driver.update_health.assert_called_once_with( obj, '192.0.2.1') @mock.patch('stevedore.driver.DriverManager') def test_update_stats_func(self, driver_manager): obj = {'id': 1} heartbeat_udp.update_stats(obj, '192.0.2.1') driver_manager.assert_called_once_with( invoke_on_load=True, name='stats_logger', namespace='octavia.amphora.stats_update_drivers' ) driver_manager().driver.update_stats.assert_called_once_with( obj, '192.0.2.1') @mock.patch('socket.getaddrinfo') @mock.patch('socket.socket') def test_update(self, mock_socket, mock_getaddrinfo): socket_mock = mock.MagicMock() mock_socket.return_value = socket_mock mock_getaddrinfo.return_value = [FAKE_ADDRINFO] bind_mock = mock.MagicMock() socket_mock.bind = bind_mock getter = heartbeat_udp.UDPStatusGetter() mock_getaddrinfo.assert_called_with(IP, PORT, 0, socket.SOCK_DGRAM) self.assertEqual((IP, PORT), getter.sockaddr) mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) bind_mock.assert_called_once_with((IP, PORT)) self.conf.config(group="health_manager", sock_rlimit=RLIMIT) mock_getaddrinfo.return_value = [FAKE_ADDRINFO, FAKE_ADDRINFO] getter.update(KEY, IP, PORT) @mock.patch('socket.getaddrinfo') @mock.patch('socket.socket') def test_dorecv(self, mock_socket, mock_getaddrinfo): socket_mock = mock.MagicMock() mock_socket.return_value = socket_mock mock_getaddrinfo.return_value = [range(1, 6)] recvfrom = mock.MagicMock() socket_mock.recvfrom = recvfrom getter = heartbeat_udp.UDPStatusGetter() # key = 'TEST' msg = {"testkey": "TEST"} sample_msg = ('78daab562a492d2ec94ead54b252500a710d0e5' '1aa050041b506245806e5c1971e79951818394e' 'a6e71ad989ff950945f9573f4ab6f83e25db8ed7') bin_msg = binascii.unhexlify(sample_msg) recvfrom.return_value = bin_msg, ('192.0.2.1', 2) (obj, srcaddr) = getter.dorecv() self.assertEqual('192.0.2.1', srcaddr) self.assertIsNotNone(obj.pop('recv_time')) self.assertEqual({"testkey": "TEST"}, obj) @mock.patch('octavia.amphorae.backends.health_daemon.status_message.' 'unwrap_envelope') @mock.patch('socket.getaddrinfo') @mock.patch('socket.socket') def test_dorecv_bad_packet(self, mock_socket, mock_getaddrinfo, mock_unwrap): socket_mock = mock.MagicMock() mock_socket.return_value = socket_mock mock_unwrap.side_effect = Exception('boom') mock_getaddrinfo.return_value = [range(1, 6)] recvfrom = mock.MagicMock() socket_mock.recvfrom = recvfrom getter = heartbeat_udp.UDPStatusGetter() # key = 'TEST' msg = {"testkey": "TEST"} sample_msg = ('78daab562a492d2ec94ead54b252500a710d0e5' '1aa050041b506245806e5c1971e79951818394e' 'a6e71ad989ff950945f9573f4ab6f83e25db8ed7') bin_msg = binascii.unhexlify(sample_msg) recvfrom.return_value = bin_msg, 2 self.assertRaises(exceptions.InvalidHMACException, getter.dorecv) @mock.patch('socket.getaddrinfo') @mock.patch('socket.socket') def test_check(self, mock_socket, mock_getaddrinfo): socket_mock = mock.MagicMock() mock_socket.return_value = socket_mock mock_getaddrinfo.return_value = [range(1, 6)] mock_dorecv = mock.Mock() mock_health_executor = mock.Mock() mock_stats_executor = mock.Mock() getter = heartbeat_udp.UDPStatusGetter() getter.dorecv = mock_dorecv mock_dorecv.side_effect = [(dict(id=FAKE_ID), 2)] getter.health_executor = mock_health_executor getter.stats_executor = mock_stats_executor getter.check() getter.health_executor.shutdown() getter.stats_executor.shutdown() mock_health_executor.submit.assert_has_calls( [mock.call(heartbeat_udp.update_health, {'id': 1}, 2)]) mock_stats_executor.submit.assert_has_calls( [mock.call(heartbeat_udp.update_stats, {'id': 1}, 2)]) @mock.patch('socket.getaddrinfo') @mock.patch('socket.socket') def test_socket_except(self, mock_socket, mock_getaddrinfo): self.assertRaises(exceptions.NetworkConfig, heartbeat_udp.UDPStatusGetter) @mock.patch('concurrent.futures.ThreadPoolExecutor.submit') @mock.patch('socket.getaddrinfo') @mock.patch('socket.socket') def test_check_exception(self, mock_socket, mock_getaddrinfo, mock_submit): self.mock_socket = mock_socket self.mock_getaddrinfo = mock_getaddrinfo self.mock_getaddrinfo.return_value = [range(1, 6)] mock_dorecv = mock.Mock() getter = heartbeat_udp.UDPStatusGetter() getter.dorecv = mock_dorecv mock_dorecv.side_effect = exceptions.InvalidHMACException getter.check() self.assertFalse(mock_submit.called) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/keepalived/0000775000175000017500000000000000000000000024324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/keepalived/__init__.py0000664000175000017500000000107400000000000026437 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/keepalived/jinja/0000775000175000017500000000000000000000000025417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/keepalived/jinja/__init__.py0000664000175000017500000000107400000000000027532 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py0000664000175000017500000001576000000000000030753 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg from octavia.common import constants import octavia.tests.unit.base as base class TestVRRPRestDriver(base.TestCase): def setUp(self): super(TestVRRPRestDriver, self).setUp() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group="haproxy_amphora", base_path='/tmp/test') conf.config(group="keepalived_vrrp", vrrp_garp_refresh_interval=5) conf.config(group="keepalived_vrrp", vrrp_garp_refresh_count=2) conf.config(group="keepalived_vrrp", vrrp_check_interval=5) conf.config(group="keepalived_vrrp", vrrp_fail_count=2) conf.config(group="keepalived_vrrp", vrrp_success_count=2) self.templater = jinja_cfg.KeepalivedJinjaTemplater() self.amphora1 = mock.MagicMock() self.amphora1.status = constants.AMPHORA_ALLOCATED self.amphora1.vrrp_ip = '10.0.0.1' self.amphora1.role = constants.ROLE_MASTER self.amphora1.vrrp_interface = 'eth1' self.amphora1.vrrp_id = 1 self.amphora1.vrrp_priority = 100 self.amphora2 = mock.MagicMock() self.amphora2.status = constants.AMPHORA_ALLOCATED self.amphora2.vrrp_ip = '10.0.0.2' self.amphora2.role = constants.ROLE_BACKUP self.amphora2.vrrp_interface = 'eth1' self.amphora2.vrrp_id = 1 self.amphora2.vrrp_priority = 90 self.lb = mock.MagicMock() self.lb.amphorae = [self.amphora1, self.amphora2] self.lb.vrrp_group.vrrp_group_name = 'TESTGROUP' self.lb.vrrp_group.vrrp_auth_type = constants.VRRP_AUTH_DEFAULT self.lb.vrrp_group.vrrp_auth_pass = 'TESTPASSWORD' self.lb.vip.ip_address = '10.1.0.5' self.lb.vrrp_group.advert_int = 10 self.ref_conf = ("vrrp_script check_script {\n" " script /tmp/test/vrrp/check_script.sh\n" " interval 5\n" " fall 2\n" " rise 2\n" "}\n" "\n" "vrrp_instance TESTGROUP {\n" " state MASTER\n" " interface eth1\n" " virtual_router_id 1\n" " priority 100\n" " nopreempt\n" " accept\n" " garp_master_refresh 5\n" " garp_master_refresh_repeat 2\n" " advert_int 10\n" " authentication {\n" " auth_type PASS\n" " auth_pass TESTPASSWORD\n" " }\n" "\n" " unicast_src_ip 10.0.0.1\n" " unicast_peer {\n" " 10.0.0.2\n" " }\n" "\n" " virtual_ipaddress {\n" " 10.1.0.5\n" " }\n\n" " virtual_routes {\n" " 10.1.0.0/24 dev eth1 src 10.1.0.5 scope link " "table 1\n" " }\n\n" " virtual_rules {\n" " from 10.1.0.5/32 table 1 priority 100\n" " }\n\n" " track_script {\n" " check_script\n" " }\n" "}") self.amphora1v6 = copy.deepcopy(self.amphora1) self.amphora1v6.vrrp_ip = '2001:db8::10' self.amphora2v6 = copy.deepcopy(self.amphora2) self.amphora2v6.vrrp_ip = '2001:db8::11' self.lbv6 = copy.deepcopy(self.lb) self.lbv6.amphorae = [self.amphora1v6, self.amphora2v6] self.lbv6.vip.ip_address = '2001:db8::15' self.ref_v6_conf = ("vrrp_script check_script {\n" " script /tmp/test/vrrp/check_script.sh\n" " interval 5\n" " fall 2\n" " rise 2\n" "}\n" "\n" "vrrp_instance TESTGROUP {\n" " state MASTER\n" " interface eth1\n" " virtual_router_id 1\n" " priority 100\n" " nopreempt\n" " accept\n" " garp_master_refresh 5\n" " garp_master_refresh_repeat 2\n" " advert_int 10\n" " authentication {\n" " auth_type PASS\n" " auth_pass TESTPASSWORD\n" " }\n" "\n" " unicast_src_ip 2001:db8::10\n" " unicast_peer {\n" " 2001:db8::11\n" " }\n" "\n" " virtual_ipaddress {\n" " 2001:db8::15\n" " }\n\n" " virtual_routes {\n" " 2001:db8::/64 dev eth1 src " "2001:db8::15 scope link table 1\n" " }\n\n" " virtual_rules {\n" " from 2001:db8::15/128 table 1 " "priority 100\n" " }\n\n" " track_script {\n" " check_script\n" " }\n" "}") def test_build_keepalived_config(self): config = self.templater.build_keepalived_config( self.lb, self.amphora1, '10.1.0.0/24') self.assertEqual(self.ref_conf, config) def test_build_keepalived_ipv6_config(self): config = self.templater.build_keepalived_config( self.lbv6, self.amphora1v6, '2001:db8::/64') self.assertEqual(self.ref_v6_conf, config) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py0000664000175000017500000001124500000000000031341 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from oslo_utils import uuidutils from octavia.amphorae.drivers.keepalived import vrrp_rest_driver from octavia.common import constants import octavia.tests.unit.base as base # Version 1.0 is functionally identical to all versions before it API_VERSION = '1.0' class TestVRRPRestDriver(base.TestCase): def setUp(self): self.keepalived_mixin = vrrp_rest_driver.KeepalivedAmphoraDriverMixin() self.keepalived_mixin.clients = { 'base': mock.MagicMock(), API_VERSION: mock.MagicMock()} self.keepalived_mixin._populate_amphora_api_version = mock.MagicMock() self.clients = self.keepalived_mixin.clients self.FAKE_CONFIG = 'FAKE CONFIG' self.lb_mock = mock.MagicMock() self.amphora_mock = mock.MagicMock() self.amphora_mock.id = uuidutils.generate_uuid() self.amphora_mock.status = constants.AMPHORA_ALLOCATED self.amphora_mock.api_version = API_VERSION self.lb_mock.amphorae = [self.amphora_mock] self.amphorae_network_config = {} vip_subnet = mock.MagicMock() vip_subnet.cidr = '192.0.2.0/24' self.amphorae_network_config[self.amphora_mock.id] = vip_subnet super(TestVRRPRestDriver, self).setUp() @mock.patch('octavia.amphorae.drivers.keepalived.jinja.' 'jinja_cfg.KeepalivedJinjaTemplater.build_keepalived_config') def test_update_vrrp_conf(self, mock_templater): mock_templater.return_value = self.FAKE_CONFIG self.keepalived_mixin.update_vrrp_conf( self.lb_mock, self.amphorae_network_config, self.amphora_mock) self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with( self.amphora_mock, self.FAKE_CONFIG) # Test with amphorav2 amphorae_network_config list of dicts mock_templater.reset_mock() self.clients[API_VERSION].upload_vrrp_config.reset_mock() v2_amphorae_network_config = {} vip_subnet_dict = { constants.VIP_SUBNET: {constants.CIDR: '192.0.2.0/24'}} v2_amphorae_network_config[self.amphora_mock.id] = vip_subnet_dict self.keepalived_mixin.update_vrrp_conf( self.lb_mock, v2_amphorae_network_config, self.amphora_mock) self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with( self.amphora_mock, self.FAKE_CONFIG) # Test amphora not in AMPHORA_ALLOCATED state mock_templater.reset_mock() self.clients[API_VERSION].upload_vrrp_config.reset_mock() ready_amphora_mock = mock.MagicMock() ready_amphora_mock.id = uuidutils.generate_uuid() ready_amphora_mock.status = constants.AMPHORA_READY ready_amphora_mock.api_version = API_VERSION self.keepalived_mixin.update_vrrp_conf( self.lb_mock, self.amphorae_network_config, ready_amphora_mock) mock_templater.assert_not_called() self.clients[API_VERSION].upload_vrrp_config.assert_not_called() def test_stop_vrrp_service(self): self.keepalived_mixin.stop_vrrp_service(self.lb_mock) self.clients[API_VERSION].stop_vrrp.assert_called_once_with( self.amphora_mock) def test_start_vrrp_service(self): self.keepalived_mixin.start_vrrp_service(self.amphora_mock) self.clients[API_VERSION].start_vrrp.assert_called_once_with( self.amphora_mock, timeout_dict=None) # Test amphora not in AMPHORA_ALLOCATED state self.clients[API_VERSION].start_vrrp.reset_mock() ready_amphora_mock = mock.MagicMock() ready_amphora_mock.id = uuidutils.generate_uuid() ready_amphora_mock.status = constants.AMPHORA_READY ready_amphora_mock.api_version = API_VERSION self.keepalived_mixin.start_vrrp_service(ready_amphora_mock) self.clients[API_VERSION].start_vrrp.assert_not_called() def test_reload_vrrp_service(self): self.keepalived_mixin.reload_vrrp_service(self.lb_mock) self.clients[API_VERSION].reload_vrrp.assert_called_once_with( self.amphora_mock) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py0000664000175000017500000001630000000000000032211 0ustar00zuulzuul00000000000000# Copyright 2014, Author: Min Wang,German Eichberger # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from octavia.amphorae.drivers.noop_driver import driver from octavia.common import constants from octavia.common import data_models from octavia.network import data_models as network_models from octavia.tests.unit import base FAKE_UUID_1 = uuidutils.generate_uuid() class TestLoggingUpdate(base.TestCase): def setUp(self): super(TestLoggingUpdate, self).setUp() self.mixin = driver.LoggingUpdate() def test_update_stats(self): self.mixin.update_stats('test update stats') self.assertEqual('test update stats', self.mixin.stats) def test_update_health(self): self.mixin.update_health('test update health') self.assertEqual('test update health', self.mixin.health) class TestNoopAmphoraLoadBalancerDriver(base.TestCase): FAKE_UUID_1 = uuidutils.generate_uuid() def setUp(self): super(TestNoopAmphoraLoadBalancerDriver, self).setUp() self.driver = driver.NoopAmphoraLoadBalancerDriver() self.listener = data_models.Listener() self.listener.id = uuidutils.generate_uuid() self.listener.protocol_port = 80 self.vip = data_models.Vip() self.vip.ip_address = "192.51.100.1" self.amphora = data_models.Amphora() self.amphora.id = self.FAKE_UUID_1 self.load_balancer = data_models.LoadBalancer( id=FAKE_UUID_1, amphorae=[self.amphora], vip=self.vip, listeners=[self.listener]) self.listener.load_balancer = self.load_balancer self.network = network_models.Network(id=self.FAKE_UUID_1) self.port = network_models.Port(id=uuidutils.generate_uuid()) self.amphorae_net_configs = { self.amphora.id: network_models.AmphoraNetworkConfig( amphora=self.amphora, vip_subnet=network_models.Subnet(id=self.FAKE_UUID_1)) } self.pem_file = 'test_pem_file' self.agent_config = 'test agent config' self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, constants.REQ_READ_TIMEOUT: 2, constants.CONN_MAX_RETRIES: 3, constants.CONN_RETRY_INTERVAL: 4} def test_update_amphora_listeners(self): self.driver.update_amphora_listeners(self.load_balancer, self.amphora, self.timeout_dict) self.assertEqual((self.listener, self.amphora.id, self.timeout_dict, 'update_amp'), self.driver.driver.amphoraconfig[( self.listener.id, self.amphora.id)]) def test_update(self): self.driver.update(self.load_balancer) self.assertEqual(([self.listener], self.vip, 'active'), self.driver.driver.amphoraconfig[( (self.listener.protocol_port,), self.vip.ip_address)]) def test_start(self): mock_amphora = mock.MagicMock() mock_amphora.id = '321' self.driver.start(self.load_balancer, amphora=mock_amphora) self.assertEqual((self.load_balancer, mock_amphora, 'start'), self.driver.driver.amphoraconfig[( self.load_balancer.id, '321')]) def test_reload(self): mock_amphora = mock.MagicMock() mock_amphora.id = '321' self.driver.reload(self.load_balancer, amphora=mock_amphora) self.assertEqual((self.load_balancer, mock_amphora, 'reload'), self.driver.driver.amphoraconfig[( self.load_balancer.id, '321')]) def test_delete(self): self.driver.delete(self.listener) self.assertEqual((self.listener, self.vip, 'delete'), self.driver.driver.amphoraconfig[( self.listener.protocol_port, self.vip.ip_address)]) def test_get_info(self): self.driver.get_info(self.amphora) self.assertEqual((self.amphora.id, 'get_info'), self.driver.driver.amphoraconfig[ self.amphora.id]) def test_get_diagnostics(self): self.driver.get_diagnostics(self.amphora) self.assertEqual((self.amphora.id, 'get_diagnostics'), self.driver.driver.amphoraconfig[ self.amphora.id]) def test_finalize_amphora(self): self.driver.finalize_amphora(self.amphora) self.assertEqual((self.amphora.id, 'finalize amphora'), self.driver.driver.amphoraconfig[ self.amphora.id]) def test_post_network_plug(self): self.driver.post_network_plug(self.amphora, self.port) self.assertEqual((self.amphora.id, self.port.id, 'post_network_plug'), self.driver.driver.amphoraconfig[( self.amphora.id, self.port.id)]) def test_post_vip_plug(self): self.driver.post_vip_plug(self.amphora, self.load_balancer, self.amphorae_net_configs) expected_method_and_args = (self.load_balancer.id, self.amphorae_net_configs, 'post_vip_plug') actual_method_and_args = self.driver.driver.amphoraconfig[( self.load_balancer.id, id(self.amphorae_net_configs) )] self.assertEqual(expected_method_and_args, actual_method_and_args) def test_upload_cert_amp(self): self.driver.upload_cert_amp(self.amphora, self.pem_file) self.assertEqual( (self.amphora.id, self.pem_file, 'update_amp_cert_file'), self.driver.driver.amphoraconfig[( self.amphora.id, self.pem_file)]) def test_update_agent_config(self): self.driver.update_amphora_agent_config(self.amphora, self.agent_config) self.assertEqual( (self.amphora.id, self.agent_config, 'update_amphora_agent_config'), self.driver.driver.amphoraconfig[( self.amphora.id, self.agent_config)]) def test_get_interface_from_ip(self): result = self.driver.get_interface_from_ip(self.amphora, '198.51.100.99') self.assertEqual('noop0', result) result = self.driver.get_interface_from_ip(self.amphora, '198.51.100.9') self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/api/0000775000175000017500000000000000000000000017512 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/__init__.py0000664000175000017500000000107400000000000021625 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4142168 octavia-6.2.2/octavia/tests/unit/api/common/0000775000175000017500000000000000000000000021002 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/common/__init__.py0000664000175000017500000000107400000000000023115 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/common/base.py0000664000175000017500000002034600000000000022273 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from octavia.api.common import types as base_type from octavia.common import constants from octavia.tests.unit import base def build_body(mandatory_fields, extra_attributes): body = {} for key in mandatory_fields: body[key] = mandatory_fields[key] for key in extra_attributes: body[key] = extra_attributes[key] return body class BaseTypesTest(base.TestCase): _type = base_type.BaseType _mandatory_fields = {} class BaseTestUuid(base.TestCase): def assert_uuid_attr(self, attr): kwargs = {attr: uuidutils.generate_uuid()} self._type(**kwargs) def assert_uuid_attr_fail_with_integer(self, attr): kwargs = {attr: 1} self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_uuid_attr_fail_with_short_str(self, attr): kwargs = {attr: '12345'} self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_uuid_attr_fail_with_shorter_than_uuid(self, attr): kwargs = {attr: uuidutils.generate_uuid()[1:]} self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_uuid_attr_fail_with_longer_than_uuid(self, attr): kwargs = {attr: uuidutils.generate_uuid() + "0"} self.assertRaises(exc.InvalidInput, self._type, **kwargs) class BaseTestString(base.TestCase): def _default_min_max_lengths(self, min_length=None, max_length=None): if max_length is None: if min_length is None: max_length = 255 min_length = 2 else: max_length = min_length + 1 else: if min_length is None: min_length = max_length - 1 return min_length, max_length def assert_string_attr(self, attr, min_length=None, max_length=None): min_length, max_length = self._default_min_max_lengths(min_length, max_length) string_val = 'a' * (max_length - 1) kwargs = {attr: string_val} self._type(**kwargs) def assert_string_attr_min_length(self, attr, min_length): min_length, max_length = self._default_min_max_lengths(min_length) string_val = 'a' * (min_length - 1) kwargs = {attr: string_val} # No point in testing if min_length is <= 0 if min_length > 0: self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_string_attr_max_length(self, attr, max_length=None): min_length, max_length = self._default_min_max_lengths(max_length) string_val = 'a' * (max_length + 1) kwargs = {attr: string_val} self.assertRaises(exc.InvalidInput, self._type, **kwargs) class BaseTestBool(base.TestCase): def assert_bool_attr(self, attr): kwargs = {attr: True} self.assertIsNotNone(self._type(**kwargs)) kwargs = {attr: False} self.assertIsNotNone(self._type(**kwargs)) def assert_bool_attr_non_bool(self, attr): kwargs = {attr: 'test'} self.assertRaises(exc.InvalidInput, self._type, **kwargs) class TestIdMixin(BaseTestUuid): id_attr = 'id' def test_id(self): self.assert_uuid_attr(self.id_attr) self.assert_uuid_attr_fail_with_integer(self.id_attr) self.assert_uuid_attr_fail_with_short_str(self.id_attr) self.assert_uuid_attr_fail_with_shorter_than_uuid(self.id_attr) self.assert_uuid_attr_fail_with_longer_than_uuid(self.id_attr) def test_id_readonly(self): body = build_body(self._mandatory_fields, {self.id_attr: uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestProjectIdMixin(BaseTestUuid): project_id_attr = 'project_id' def test_project_id(self): self.assert_uuid_attr(self.project_id_attr) self.assert_uuid_attr_fail_with_integer(self.project_id_attr) self.assert_uuid_attr_fail_with_short_str(self.project_id_attr) self.assert_uuid_attr_fail_with_shorter_than_uuid(self.project_id_attr) self.assert_uuid_attr_fail_with_longer_than_uuid(self.project_id_attr) def test_project_id_readonly(self): body = build_body(self._mandatory_fields, {self.project_id_attr: uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestNameMixin(BaseTestString): name_attr = 'name' def test_name(self): self.assert_string_attr(self.name_attr, min_length=0, max_length=255) self.assert_string_attr_min_length(self.name_attr, 0) self.assert_string_attr_max_length(self.name_attr, 255) def test_editable_name(self): name = "Name" body = build_body(self._mandatory_fields, {self.name_attr: name}) type_instance = wsme_json.fromjson(self._type, body) self.assertEqual(name, type_instance.name) class TestDescriptionMixin(BaseTestString): description_attr = 'description' def test_description(self): self.assert_string_attr(self.description_attr, min_length=0, max_length=255) self.assert_string_attr_min_length(self.description_attr, 0) self.assert_string_attr_max_length(self.description_attr, 255) def test_editable_description(self): description = "Description" body = build_body(self._mandatory_fields, {self.description_attr: description}) type_instance = wsme_json.fromjson(self._type, body) self.assertEqual(description, type_instance.description) class TestEnabledMixin(BaseTestBool): enabled_attr = 'enabled' def test_enabled(self): self.assert_bool_attr(self.enabled_attr) self.assert_bool_attr_non_bool(self.enabled_attr) def test_default_enabled_true(self): body = build_body(self._mandatory_fields, {}) type_instance = wsme_json.fromjson(self._type, body) self.assertTrue(type_instance.enabled) def test_editable_enabled(self): body = build_body(self._mandatory_fields, {"enabled": False}) type_instance = wsme_json.fromjson(self._type, body) self.assertFalse(type_instance.enabled) class TestProvisioningStatusMixin(BaseTestString): provisioning_attr = 'provisioning_status' def test_provisioning_status(self): self.assert_string_attr(self.provisioning_attr, min_length=0, max_length=16) self.assert_string_attr_min_length(self.provisioning_attr, 0) self.assert_string_attr_max_length(self.provisioning_attr, 16) def test_provisioning_status_readonly(self): status = constants.ACTIVE body = build_body(self._mandatory_fields, {self.provisioning_attr: status}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestOperatingStatusMixin(BaseTestString): operating_attr = 'operating_status' def test_operating_status(self): self.assert_string_attr(self.operating_attr, min_length=0, max_length=16) self.assert_string_attr_min_length(self.operating_attr, 0) self.assert_string_attr_max_length(self.operating_attr, 16) def test_operating_status_readonly(self): status = constants.ONLINE body = build_body(self._mandatory_fields, {self.operating_attr: status}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/common/test_types.py0000664000175000017500000001561200000000000023564 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from octavia.api.common import types from octavia.common import data_models from octavia.tests.unit import base class TestTypeRename(types.BaseType): _type_to_model_map = {'renamed': 'original', 'child_one': 'child.one', 'child_two': 'child.two', 'admin_state_up': 'enabled'} id = wtypes.wsattr(wtypes.StringType()) renamed = wtypes.wsattr(wtypes.StringType()) child_one = wtypes.wsattr(wtypes.StringType()) child_two = wtypes.wsattr(wtypes.StringType()) admin_state_up = wtypes.wsattr(bool) class TestTypeRenameSubset(types.BaseType): _type_to_model_map = {'renamed': 'original', 'child_one': 'child.one', 'child_two': 'child.two'} id = wtypes.wsattr(wtypes.StringType()) renamed = wtypes.wsattr(wtypes.StringType()) class TestTypeTenantProject(types.BaseType): tenant_id = wtypes.wsattr(wtypes.StringType()) project_id = wtypes.wsattr(wtypes.StringType()) class ChildTestModel(data_models.BaseDataModel): def __init__(self, one=None, two=None): self.one = one self.two = two class TestModel(data_models.BaseDataModel): def __init__(self, id=None, original=None, child=None, enabled=None): self.id = id self.original = original self.child = child self.enabled = enabled def to_dict(self): result = super(TestModel, self).to_dict() result['child'] = self.child.to_dict() return result class TestTypeDataModelRenames(base.TestCase): def setUp(self): super(TestTypeDataModelRenames, self).setUp() child_model = ChildTestModel(one='baby_turtle_one', two='baby_turtle_two') self.model = TestModel(id='1234', original='turtles', child=child_model) def test_model_to_type(self): new_type = TestTypeRename.from_data_model(self.model) self.assertEqual(self.model.original, new_type.renamed) self.assertEqual(self.model.child.one, new_type.child_one) self.assertEqual(self.model.child.two, new_type.child_two) self.assertEqual(self.model.id, new_type.id) def test_model_to_type_with_subset_of_fields(self): new_type = TestTypeRenameSubset.from_data_model(self.model) self.assertEqual(self.model.original, new_type.renamed) self.assertEqual(self.model.id, new_type.id) self.assertFalse(hasattr(new_type, 'child_one')) self.assertFalse(hasattr(new_type, 'child_two')) def test_type_to_dict(self): new_type = TestTypeRename(id='1234', renamed='turtles', child_one='baby_turtle_one', child_two='baby_turtle_two') type_dict = new_type.to_dict() self.assertEqual(new_type.id, type_dict.get('id')) self.assertEqual(new_type.renamed, type_dict.get('original')) self.assertIn('child', type_dict) child_dict = type_dict.pop('child') self.assertEqual(new_type.child_one, child_dict.get('one')) self.assertEqual(new_type.child_two, child_dict.get('two')) def test_translate_dict_keys_to_data_model(self): new_type = TestTypeRename.from_data_model(self.model) new_type_vars = { k: getattr(new_type, k) for k in dir(new_type) if not ( callable(getattr(new_type, k)) or k.startswith('_')) } self.assertEqual( set(vars(self.model)), set(new_type.translate_dict_keys_to_data_model(new_type_vars)), ) def test_type_to_dict_with_tenant_id(self): type_dict = TestTypeTenantProject(tenant_id='1234').to_dict() self.assertEqual('1234', type_dict['project_id']) self.assertNotIn('tenant_id', type_dict) def test_type_to_dict_when_admin_state_up_is_null(self): rtype = TestTypeRename(id='1234', renamed='turtles', child_one='baby_turtle_one', child_two='baby_turtle_two', admin_state_up=None) rtype_dict = rtype.to_dict() self.assertFalse(rtype_dict['enabled']) class TestToDictModel(data_models.BaseDataModel): def __init__(self, text, parent=None): self.parent = parent self.child = None self.children = None self.text = text def set_children(self, children): self.children = children def set_child(self, child): self.child = child def set_parent(self, parent): self.parent = parent class TestDataModelToDict(base.TestCase): RECURSED_RESULT = {'parent': None, 'text': 'parent_text', 'child': {'parent': None, 'text': 'child_text', 'child': None, 'children': None}, 'children': [ {'parent': None, 'text': 'child1_text', 'child': None, 'children': None}, {'parent': None, 'text': 'child2_text', 'child': None, 'children': None}]} NO_RECURSE_RESULT = {'parent': None, 'text': 'parent_text', 'child': None, 'children': []} def setUp(self): super(TestDataModelToDict, self).setUp() self.model = TestToDictModel('parent_text') self.model.set_child(TestToDictModel('child_text', self.model)) self.model.set_children([TestToDictModel('child1_text', self.model), TestToDictModel('child2_text', self.model)]) def test_to_dict_no_recurse(self): self.assertEqual(self.model.to_dict(), self.NO_RECURSE_RESULT) def test_to_dict_recurse(self): self.assertEqual(self.model.to_dict(recurse=True), self.RECURSED_RESULT) def test_type_to_dict_with_project_id(self): type_dict = TestTypeTenantProject(project_id='1234').to_dict() self.assertEqual('1234', type_dict['project_id']) self.assertNotIn('tenant_id', type_dict) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/drivers/0000775000175000017500000000000000000000000021170 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/__init__.py0000664000175000017500000000107400000000000023303 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/0000775000175000017500000000000000000000000024172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/__init__.py0000664000175000017500000000107400000000000026305 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/v1/0000775000175000017500000000000000000000000024520 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py0000664000175000017500000000107400000000000026633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py0000664000175000017500000010364500000000000031144 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions from oslo_utils import uuidutils from octavia.api.drivers.amphora_driver.v1 import driver from octavia.common import constants as consts from octavia.network import base as network_base from octavia.tests.common import sample_data_models from octavia.tests.unit import base class TestAmphoraDriver(base.TestRpc): def setUp(self): super(TestAmphoraDriver, self).setUp() self.amp_driver = driver.AmphoraProviderDriver() self.sample_data = sample_data_models.SampleDriverDataModels() @mock.patch('octavia.common.utils.get_network_driver') def test_create_vip_port(self, mock_get_net_driver): mock_net_driver = mock.MagicMock() mock_get_net_driver.return_value = mock_net_driver mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip provider_vip_dict = self.amp_driver.create_vip_port( self.sample_data.lb_id, self.sample_data.project_id, self.sample_data.provider_vip_dict) self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict) @mock.patch('octavia.common.utils.get_network_driver') def test_create_vip_port_without_port_security_enabled( self, mock_get_net_driver): mock_net_driver = mock.MagicMock() mock_get_net_driver.return_value = mock_net_driver network = mock.MagicMock() network.port_security_enabled = False mock_net_driver.get_network.return_value = network mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip self.assertRaises(exceptions.DriverError, self.amp_driver.create_vip_port, self.sample_data.lb_id, self.sample_data.project_id, self.sample_data.provider_vip_dict) @mock.patch('octavia.common.utils.get_network_driver') def test_create_vip_port_failed(self, mock_get_net_driver): mock_net_driver = mock.MagicMock() mock_get_net_driver.return_value = mock_net_driver mock_net_driver.allocate_vip.side_effect = ( network_base.AllocateVIPException()) self.assertRaises(exceptions.DriverError, self.amp_driver.create_vip_port, self.sample_data.lb_id, self.sample_data.project_id, self.sample_data.provider_vip_dict) # Load Balancer @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_create(self, mock_cast): provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) self.amp_driver.loadbalancer_create(provider_lb) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, consts.FLAVOR: None, consts.AVAILABILITY_ZONE: None} mock_cast.assert_called_with({}, 'create_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_delete(self, mock_cast): provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) self.amp_driver.loadbalancer_delete(provider_lb) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, 'cascade': False} mock_cast.assert_called_with({}, 'delete_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_failover(self, mock_cast): self.amp_driver.loadbalancer_failover(self.sample_data.lb_id) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id} mock_cast.assert_called_with({}, 'failover_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_update(self, mock_cast): old_provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id, admin_state_up=True) lb_dict = {'enabled': True} self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, consts.LOAD_BALANCER_UPDATES: lb_dict} mock_cast.assert_called_with({}, 'update_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_update_name(self, mock_cast): old_provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id, name='Great LB') lb_dict = {'name': 'Great LB'} self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, consts.LOAD_BALANCER_UPDATES: lb_dict} mock_cast.assert_called_with({}, 'update_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_update_qos(self, mock_cast): qos_policy_id = uuidutils.generate_uuid() old_provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id, vip_qos_policy_id=qos_policy_id) lb_dict = {'vip': {'qos_policy_id': qos_policy_id}} self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, consts.LOAD_BALANCER_UPDATES: lb_dict} mock_cast.assert_called_with({}, 'update_load_balancer', **payload) # Listener @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_create(self, mock_cast): provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) self.amp_driver.listener_create(provider_listener) payload = {consts.LISTENER_ID: self.sample_data.listener1_id} mock_cast.assert_called_with({}, 'create_listener', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_delete(self, mock_cast): provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) self.amp_driver.listener_delete(provider_listener) payload = {consts.LISTENER_ID: self.sample_data.listener1_id} mock_cast.assert_called_with({}, 'delete_listener', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_update(self, mock_cast): old_provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id, admin_state_up=False) listener_dict = {'enabled': False} self.amp_driver.listener_update(old_provider_listener, provider_listener) payload = {consts.LISTENER_ID: self.sample_data.listener1_id, consts.LISTENER_UPDATES: listener_dict} mock_cast.assert_called_with({}, 'update_listener', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_update_name(self, mock_cast): old_provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id, name='Great Listener') listener_dict = {'name': 'Great Listener'} self.amp_driver.listener_update(old_provider_listener, provider_listener) payload = {consts.LISTENER_ID: self.sample_data.listener1_id, consts.LISTENER_UPDATES: listener_dict} mock_cast.assert_called_with({}, 'update_listener', **payload) # Pool @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_create(self, mock_cast): provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id, lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN) self.amp_driver.pool_create(provider_pool) payload = {consts.POOL_ID: self.sample_data.pool1_id} mock_cast.assert_called_with({}, 'create_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_create_unsupported_algorithm(self, mock_cast): provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool.lb_algorithm = 'foo' self.assertRaises( exceptions.UnsupportedOptionError, self.amp_driver.pool_create, provider_pool) mock_cast.assert_not_called() @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_delete(self, mock_cast): provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) self.amp_driver.pool_delete(provider_pool) payload = {consts.POOL_ID: self.sample_data.pool1_id} mock_cast.assert_called_with({}, 'delete_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_update(self, mock_cast): old_provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id, admin_state_up=True, ca_tls_container_data='CA DATA', ca_tls_container_ref='CA REF', crl_container_data='CRL DATA', crl_container_ref='CRL REF', description='TEST DESCRIPTION', name='TEST NAME', lb_algorithm=consts.LB_ALGORITHM_SOURCE_IP, session_persistence='FAKE SP', tls_container_data='TLS DATA', tls_container_ref='TLS REF', tls_enabled=False) pool_dict = {'description': 'TEST DESCRIPTION', 'lb_algorithm': 'SOURCE_IP', 'name': 'TEST NAME', 'session_persistence': 'FAKE SP', 'tls_enabled': False, 'enabled': True, 'tls_certificate_id': 'TLS REF', 'ca_tls_certificate_id': 'CA REF', 'crl_container_id': 'CRL REF'} self.amp_driver.pool_update(old_provider_pool, provider_pool) payload = {consts.POOL_ID: self.sample_data.pool1_id, consts.POOL_UPDATES: pool_dict} mock_cast.assert_called_with({}, 'update_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_update_name(self, mock_cast): old_provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id, name='Great pool', admin_state_up=True, tls_enabled=True) pool_dict = {'name': 'Great pool', 'enabled': True, 'tls_enabled': True} self.amp_driver.pool_update(old_provider_pool, provider_pool) payload = {consts.POOL_ID: self.sample_data.pool1_id, consts.POOL_UPDATES: pool_dict} mock_cast.assert_called_with({}, 'update_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_update_unsupported_algorithm(self, mock_cast): old_provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool.lb_algorithm = 'foo' self.assertRaises( exceptions.UnsupportedOptionError, self.amp_driver.pool_update, old_provider_pool, provider_pool) mock_cast.assert_not_called() # Member @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_create(self, mock_cast, mock_pool_get, mock_session): provider_member = driver_dm.Member( member_id=self.sample_data.member1_id) self.amp_driver.member_create(provider_member) payload = {consts.MEMBER_ID: self.sample_data.member1_id} mock_cast.assert_called_with({}, 'create_member', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "192.0.1.1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool_get.return_value = mock_pool provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, address="192.0.2.1") self.amp_driver.member_create(provider_member) payload = {consts.MEMBER_ID: self.sample_data.member1_id} mock_cast.assert_called_with({}, 'create_member', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "fe80::1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool_get.return_value = mock_pool provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, address="192.0.2.1") self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.member_create, provider_member) @mock.patch('oslo_messaging.RPCClient.cast') def test_member_delete(self, mock_cast): provider_member = driver_dm.Member( member_id=self.sample_data.member1_id) self.amp_driver.member_delete(provider_member) payload = {consts.MEMBER_ID: self.sample_data.member1_id} mock_cast.assert_called_with({}, 'delete_member', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_member_update(self, mock_cast): old_provider_member = driver_dm.Member( member_id=self.sample_data.member1_id) provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, admin_state_up=True) member_dict = {'enabled': True} self.amp_driver.member_update(old_provider_member, provider_member) payload = {consts.MEMBER_ID: self.sample_data.member1_id, consts.MEMBER_UPDATES: member_dict} mock_cast.assert_called_with({}, 'update_member', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_member_update_name(self, mock_cast): old_provider_member = driver_dm.Member( member_id=self.sample_data.member1_id) provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, name='Great member') member_dict = {'name': 'Great member'} self.amp_driver.member_update(old_provider_member, provider_member) payload = {consts.MEMBER_ID: self.sample_data.member1_id, consts.MEMBER_UPDATES: member_dict} mock_cast.assert_called_with({}, 'update_member', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session): mock_pool = mock.MagicMock() mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, admin_state_up=False, address='192.0.2.17', monitor_address='192.0.2.77', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] update_mem_dict = {'ip_address': '192.0.2.17', 'name': 'updated-member2', 'monitor_address': '192.0.2.77', 'id': self.sample_data.member2_id, 'enabled': False, 'protocol_port': 80, 'pool_id': self.sample_data.pool1_id} self.amp_driver.member_batch_update( self.sample_data.pool1_id, prov_members) payload = {'old_member_ids': [self.sample_data.member1_id], 'new_member_ids': [self.sample_data.member3_id], 'updated_members': [update_mem_dict]} mock_cast.assert_called_with({}, 'batch_update_members', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_no_admin_addr(self, mock_cast, mock_pool_get, mock_session): mock_pool = mock.MagicMock() mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, monitor_address='192.0.2.77', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] update_mem_dict = {'name': 'updated-member2', 'monitor_address': '192.0.2.77', 'id': self.sample_data.member2_id, 'protocol_port': 80, 'pool_id': self.sample_data.pool1_id} self.amp_driver.member_batch_update( self.sample_data.pool1_id, prov_members) payload = {'old_member_ids': [self.sample_data.member1_id], 'new_member_ids': [self.sample_data.member3_id], 'updated_members': [update_mem_dict]} mock_cast.assert_called_with({}, 'batch_update_members', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_clear_already_empty( self, mock_cast, mock_pool_get, mock_session): """Expect that we will pass an empty payload if directed. Logic for whether or not to attempt this will be done above the driver layer, so our driver is responsible to forward the request even if it is a perceived no-op. """ mock_pool = mock.MagicMock() mock_pool_get.return_value = mock_pool self.amp_driver.member_batch_update( self.sample_data.pool1_id, []) payload = {'old_member_ids': [], 'new_member_ids': [], 'updated_members': []} mock_cast.assert_called_with({}, 'batch_update_members', **payload) # Health Monitor @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_create(self, mock_cast): provider_HM = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) self.amp_driver.health_monitor_create(provider_HM) payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} mock_cast.assert_called_with({}, 'create_health_monitor', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_delete(self, mock_cast): provider_HM = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) self.amp_driver.health_monitor_delete(provider_HM) payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} mock_cast.assert_called_with({}, 'delete_health_monitor', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "192.0.1.1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, admin_state_up=False, address='192.0.2.17', monitor_address='192.0.2.77', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] update_mem_dict = {'ip_address': '192.0.2.17', 'name': 'updated-member2', 'monitor_address': '192.0.2.77', 'id': self.sample_data.member2_id, 'enabled': False, 'protocol_port': 80, 'pool_id': self.sample_data.pool1_id} self.amp_driver.member_batch_update( self.sample_data.pool1_id, prov_members) payload = {'old_member_ids': [self.sample_data.member1_id], 'new_member_ids': [self.sample_data.member3_id], 'updated_members': [update_mem_dict]} mock_cast.assert_called_with({}, 'batch_update_members', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "192.0.1.1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, admin_state_up=False, address='fe80::1', monitor_address='fe80::2', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.member_batch_update, self.sample_data.pool1_id, prov_members) @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_update(self, mock_cast): old_provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True, max_retries=1, max_retries_down=2) hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2} self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, consts.HEALTH_MONITOR_UPDATES: hm_dict} mock_cast.assert_called_with({}, 'update_health_monitor', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_update_name(self, mock_cast): old_provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id, name='Great HM') hm_dict = {'name': 'Great HM'} self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, consts.HEALTH_MONITOR_UPDATES: hm_dict} mock_cast.assert_called_with({}, 'update_health_monitor', **payload) # L7 Policy @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_create(self, mock_cast): provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) self.amp_driver.l7policy_create(provider_l7policy) payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} mock_cast.assert_called_with({}, 'create_l7policy', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_delete(self, mock_cast): provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) self.amp_driver.l7policy_delete(provider_l7policy) payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} mock_cast.assert_called_with({}, 'delete_l7policy', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_update(self, mock_cast): old_provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True) l7policy_dict = {'enabled': True} self.amp_driver.l7policy_update(old_provider_l7policy, provider_l7policy) payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, consts.L7POLICY_UPDATES: l7policy_dict} mock_cast.assert_called_with({}, 'update_l7policy', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_update_name(self, mock_cast): old_provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy') l7policy_dict = {'name': 'Great L7Policy'} self.amp_driver.l7policy_update(old_provider_l7policy, provider_l7policy) payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, consts.L7POLICY_UPDATES: l7policy_dict} mock_cast.assert_called_with({}, 'update_l7policy', **payload) # L7 Rules @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_create(self, mock_cast): provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) self.amp_driver.l7rule_create(provider_l7rule) payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} mock_cast.assert_called_with({}, 'create_l7rule', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_delete(self, mock_cast): provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) self.amp_driver.l7rule_delete(provider_l7rule) payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} mock_cast.assert_called_with({}, 'delete_l7rule', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_update(self, mock_cast): old_provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True) l7rule_dict = {'enabled': True} self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, consts.L7RULE_UPDATES: l7rule_dict} mock_cast.assert_called_with({}, 'update_l7rule', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_update_invert(self, mock_cast): old_provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id, invert=True) l7rule_dict = {'invert': True} self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, consts.L7RULE_UPDATES: l7rule_dict} mock_cast.assert_called_with({}, 'update_l7rule', **payload) # Flavor def test_get_supported_flavor_metadata(self): test_schema = { "properties": { "test_name": {"description": "Test description"}, "test_name2": {"description": "Another description"}}} ref_dict = {"test_name": "Test description", "test_name2": "Another description"} # mock out the supported_flavor_metadata with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' 'SUPPORTED_FLAVOR_SCHEMA', test_schema): result = self.amp_driver.get_supported_flavor_metadata() self.assertEqual(ref_dict, result) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): self.assertRaises(exceptions.DriverError, self.amp_driver.get_supported_flavor_metadata) def test_validate_flavor(self): ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE} self.amp_driver.validate_flavor(ref_dict) # Test bad flavor metadata value is bad ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'} self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.validate_flavor, ref_dict) # Test bad flavor metadata key ref_dict = {'bogus': 'bogus'} self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.validate_flavor, ref_dict) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): self.assertRaises(exceptions.DriverError, self.amp_driver.validate_flavor, 'bogus') # Availability Zone def test_get_supported_availability_zone_metadata(self): test_schema = { "properties": { "test_name": {"description": "Test description"}, "test_name2": {"description": "Another description"}}} ref_dict = {"test_name": "Test description", "test_name2": "Another description"} # mock out the supported_availability_zone_metadata with mock.patch('octavia.api.drivers.amphora_driver.' 'availability_zone_schema.' 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema): result = self.amp_driver.get_supported_availability_zone_metadata() self.assertEqual(ref_dict, result) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.' 'availability_zone_schema.' 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): self.assertRaises( exceptions.DriverError, self.amp_driver.get_supported_availability_zone_metadata) def test_validate_availability_zone(self): ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'} self.amp_driver.validate_availability_zone(ref_dict) # Test bad availability zone metadata key ref_dict = {'bogus': 'bogus'} self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.validate_availability_zone, ref_dict) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.' 'availability_zone_schema.' 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): self.assertRaises(exceptions.DriverError, self.amp_driver.validate_availability_zone, 'bogus') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/v2/0000775000175000017500000000000000000000000024521 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py0000664000175000017500000000107400000000000026634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py0000664000175000017500000010775700000000000031155 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions from oslo_utils import uuidutils from octavia.api.drivers.amphora_driver.v2 import driver from octavia.common import constants as consts from octavia.network import base as network_base from octavia.tests.common import sample_data_models from octavia.tests.unit import base class TestAmphoraDriver(base.TestRpc): def setUp(self): super(TestAmphoraDriver, self).setUp() self.amp_driver = driver.AmphoraProviderDriver() self.sample_data = sample_data_models.SampleDriverDataModels() @mock.patch('octavia.common.utils.get_network_driver') def test_create_vip_port(self, mock_get_net_driver): mock_net_driver = mock.MagicMock() mock_get_net_driver.return_value = mock_net_driver mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip provider_vip_dict = self.amp_driver.create_vip_port( self.sample_data.lb_id, self.sample_data.project_id, self.sample_data.provider_vip_dict) self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict) @mock.patch('octavia.common.utils.get_network_driver') def test_create_vip_port_without_port_security_enabled( self, mock_get_net_driver): mock_net_driver = mock.MagicMock() mock_get_net_driver.return_value = mock_net_driver network = mock.MagicMock() network.port_security_enabled = False mock_net_driver.get_network.return_value = network mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip self.assertRaises(exceptions.DriverError, self.amp_driver.create_vip_port, self.sample_data.lb_id, self.sample_data.project_id, self.sample_data.provider_vip_dict) @mock.patch('octavia.common.utils.get_network_driver') def test_create_vip_port_failed(self, mock_get_net_driver): mock_net_driver = mock.MagicMock() mock_get_net_driver.return_value = mock_net_driver mock_net_driver.allocate_vip.side_effect = ( network_base.AllocateVIPException()) self.assertRaises(exceptions.DriverError, self.amp_driver.create_vip_port, self.sample_data.lb_id, self.sample_data.project_id, self.sample_data.provider_vip_dict) # Load Balancer @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_create(self, mock_cast): provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) self.amp_driver.loadbalancer_create(provider_lb) payload = {consts.LOADBALANCER: provider_lb.to_dict(), consts.FLAVOR: None, consts.AVAILABILITY_ZONE: None} mock_cast.assert_called_with({}, 'create_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_delete(self, mock_cast): provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) self.amp_driver.loadbalancer_delete(provider_lb) payload = {consts.LOADBALANCER: provider_lb.to_dict(), 'cascade': False} mock_cast.assert_called_with({}, 'delete_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_failover(self, mock_cast): self.amp_driver.loadbalancer_failover(self.sample_data.lb_id) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id} mock_cast.assert_called_with({}, 'failover_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_update(self, mock_cast): old_provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id, admin_state_up=True) lb_dict = {'enabled': True} self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(), consts.LOAD_BALANCER_UPDATES: lb_dict} mock_cast.assert_called_with({}, 'update_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_update_name(self, mock_cast): old_provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id, name='Great LB') lb_dict = {'name': 'Great LB'} self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(), consts.LOAD_BALANCER_UPDATES: lb_dict} mock_cast.assert_called_with({}, 'update_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_loadbalancer_update_qos(self, mock_cast): qos_policy_id = uuidutils.generate_uuid() old_provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id) provider_lb = driver_dm.LoadBalancer( loadbalancer_id=self.sample_data.lb_id, vip_qos_policy_id=qos_policy_id) lb_dict = {'vip': {'qos_policy_id': qos_policy_id}} self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(), consts.LOAD_BALANCER_UPDATES: lb_dict} mock_cast.assert_called_with({}, 'update_load_balancer', **payload) # Listener @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_create(self, mock_cast): provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) self.amp_driver.listener_create(provider_listener) payload = {consts.LISTENER: provider_listener.to_dict()} mock_cast.assert_called_with({}, 'create_listener', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_delete(self, mock_cast): provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) self.amp_driver.listener_delete(provider_listener) payload = {consts.LISTENER: provider_listener.to_dict()} mock_cast.assert_called_with({}, 'delete_listener', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_update(self, mock_cast): old_provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id, admin_state_up=False) listener_dict = provider_listener.to_dict() listener_dict['admin_state_up'] = False self.amp_driver.listener_update(old_provider_listener, provider_listener) payload = {consts.ORIGINAL_LISTENER: old_provider_listener.to_dict(), consts.LISTENER_UPDATES: listener_dict} mock_cast.assert_called_with({}, 'update_listener', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_listener_update_name(self, mock_cast): old_provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id) provider_listener = driver_dm.Listener( listener_id=self.sample_data.listener1_id, name='Great Listener') listener_dict = provider_listener.to_dict() listener_dict['name'] = 'Great Listener' self.amp_driver.listener_update(old_provider_listener, provider_listener) payload = {consts.ORIGINAL_LISTENER: old_provider_listener.to_dict(), consts.LISTENER_UPDATES: listener_dict} mock_cast.assert_called_with({}, 'update_listener', **payload) # Pool @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_create(self, mock_cast): provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id, lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN) self.amp_driver.pool_create(provider_pool) payload = {consts.POOL: provider_pool.to_dict()} mock_cast.assert_called_with({}, 'create_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_create_unsupported_algorithm(self, mock_cast): provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool.lb_algorithm = 'foo' self.assertRaises( exceptions.UnsupportedOptionError, self.amp_driver.pool_create, provider_pool) mock_cast.assert_not_called() @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_delete(self, mock_cast): provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) self.amp_driver.pool_delete(provider_pool) payload = {consts.POOL: provider_pool.to_dict()} mock_cast.assert_called_with({}, 'delete_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_update(self, mock_cast): old_provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id, admin_state_up=True, ca_tls_container_data='CA DATA', ca_tls_container_ref='CA REF', crl_container_data='CRL DATA', crl_container_ref='CRL REF', description='TEST DESCRIPTION', name='TEST NAME', lb_algorithm=consts.LB_ALGORITHM_SOURCE_IP, session_persistence='FAKE SP', tls_container_data='TLS DATA', tls_container_ref='TLS REF', tls_enabled=False) pool_dict = {'description': 'TEST DESCRIPTION', 'lb_algorithm': 'SOURCE_IP', 'name': 'TEST NAME', 'session_persistence': 'FAKE SP', 'tls_enabled': False, 'enabled': True, 'tls_certificate_id': 'TLS REF', 'ca_tls_certificate_id': 'CA REF', 'crl_container_id': 'CRL REF'} self.amp_driver.pool_update(old_provider_pool, provider_pool) payload = {consts.ORIGINAL_POOL: old_provider_pool.to_dict(), consts.POOL_UPDATES: pool_dict} mock_cast.assert_called_with({}, 'update_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_update_name(self, mock_cast): old_provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id, name='Great pool', admin_state_up=True, tls_enabled=True) pool_dict = {'name': 'Great pool', 'enabled': True, 'tls_enabled': True} self.amp_driver.pool_update(old_provider_pool, provider_pool) payload = {consts.ORIGINAL_POOL: old_provider_pool.to_dict(), consts.POOL_UPDATES: pool_dict} mock_cast.assert_called_with({}, 'update_pool', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_pool_update_unsupported_algorithm(self, mock_cast): old_provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool = driver_dm.Pool( pool_id=self.sample_data.pool1_id) provider_pool.lb_algorithm = 'foo' self.assertRaises( exceptions.UnsupportedOptionError, self.amp_driver.pool_update, old_provider_pool, provider_pool) mock_cast.assert_not_called() # Member @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_create(self, mock_cast, mock_pool_get, mock_session): provider_member = driver_dm.Member( member_id=self.sample_data) self.amp_driver.member_create(provider_member) payload = {consts.MEMBER: provider_member.to_dict()} mock_cast.assert_called_with({}, 'create_member', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "192.0.1.1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool_get.return_value = mock_pool provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, address="192.0.2.1") self.amp_driver.member_create(provider_member) payload = {consts.MEMBER: provider_member.to_dict()} mock_cast.assert_called_with({}, 'create_member', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "fe80::1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool_get.return_value = mock_pool provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, address="192.0.2.1") self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.member_create, provider_member) @mock.patch('oslo_messaging.RPCClient.cast') def test_member_delete(self, mock_cast): provider_member = driver_dm.Member( member_id=self.sample_data.member1_id) self.amp_driver.member_delete(provider_member) payload = {consts.MEMBER: provider_member.to_dict()} mock_cast.assert_called_with({}, 'delete_member', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_member_update(self, mock_cast): old_provider_member = driver_dm.Member( member_id=self.sample_data.member1_id) provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, admin_state_up=True) member_dict = provider_member.to_dict() member_dict.pop(consts.MEMBER_ID) member_dict['enabled'] = member_dict.pop('admin_state_up') self.amp_driver.member_update(old_provider_member, provider_member) payload = {consts.ORIGINAL_MEMBER: old_provider_member.to_dict(), consts.MEMBER_UPDATES: member_dict} mock_cast.assert_called_with({}, 'update_member', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_member_update_name(self, mock_cast): old_provider_member = driver_dm.Member( member_id=self.sample_data.member1_id) provider_member = driver_dm.Member( member_id=self.sample_data.member1_id, name='Great member') member_dict = provider_member.to_dict() member_dict.pop(consts.MEMBER_ID) self.amp_driver.member_update(old_provider_member, provider_member) payload = {consts.ORIGINAL_MEMBER: old_provider_member.to_dict(), consts.MEMBER_UPDATES: member_dict} mock_cast.assert_called_with({}, 'update_member', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session): mock_pool = mock.MagicMock() mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, admin_state_up=False, address='192.0.2.17', monitor_address='192.0.2.77', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] update_mem_dict = {'ip_address': '192.0.2.17', 'name': 'updated-member2', 'monitor_address': '192.0.2.77', 'id': self.sample_data.member2_id, 'enabled': False, 'protocol_port': 80, 'pool_id': self.sample_data.pool1_id} self.amp_driver.member_batch_update( self.sample_data.pool1_id, prov_members) payload = { 'old_members': [self.sample_data.db_pool1_members[0].to_dict()], 'new_members': [prov_new_member.to_dict()], 'updated_members': [update_mem_dict]} mock_cast.assert_called_with({}, 'batch_update_members', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_no_admin_addr(self, mock_cast, mock_pool_get, mock_session): mock_pool = mock.MagicMock() mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, monitor_address='192.0.2.77', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] update_mem_dict = {'name': 'updated-member2', 'monitor_address': '192.0.2.77', 'id': self.sample_data.member2_id, 'protocol_port': 80, 'pool_id': self.sample_data.pool1_id} self.amp_driver.member_batch_update( self.sample_data.pool1_id, prov_members) payload = { 'old_members': [self.sample_data.db_pool1_members[0].to_dict()], 'new_members': [prov_new_member.to_dict()], 'updated_members': [update_mem_dict]} mock_cast.assert_called_with({}, 'batch_update_members', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_clear_already_empty( self, mock_cast, mock_pool_get, mock_session): """Expect that we will pass an empty payload if directed. Logic for whether or not to attempt this will be done above the driver layer, so our driver is responsible to forward the request even if it is a perceived no-op. """ mock_pool = mock.MagicMock() mock_pool_get.return_value = mock_pool self.amp_driver.member_batch_update( self.sample_data.pool1_id, []) payload = {'old_members': [], 'new_members': [], 'updated_members': []} mock_cast.assert_called_with({}, 'batch_update_members', **payload) # Health Monitor @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_create(self, mock_cast): provider_HM = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) self.amp_driver.health_monitor_create(provider_HM) payload = {consts.HEALTH_MONITOR: provider_HM.to_dict()} mock_cast.assert_called_with({}, 'create_health_monitor', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_delete(self, mock_cast): provider_HM = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) self.amp_driver.health_monitor_delete(provider_HM) payload = {consts.HEALTH_MONITOR: provider_HM.to_dict()} mock_cast.assert_called_with({}, 'delete_health_monitor', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "192.0.1.1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, admin_state_up=False, address='192.0.2.17', monitor_address='192.0.2.77', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] update_mem_dict = {'ip_address': '192.0.2.17', 'name': 'updated-member2', 'monitor_address': '192.0.2.77', 'id': self.sample_data.member2_id, 'enabled': False, 'protocol_port': 80, 'pool_id': self.sample_data.pool1_id} self.amp_driver.member_batch_update( self.sample_data.pool1_id, prov_members) payload = {'old_members': [self.sample_data.db_pool1_members[0].to_dict()], 'new_members': [prov_new_member.to_dict()], 'updated_members': [update_mem_dict]} mock_cast.assert_called_with({}, 'batch_update_members', **payload) @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.db.repositories.PoolRepository.get') @mock.patch('oslo_messaging.RPCClient.cast') def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, mock_session): mock_lb = mock.MagicMock() mock_lb.vip = mock.MagicMock() mock_lb.vip.ip_address = "192.0.1.1" mock_listener = mock.MagicMock() mock_listener.load_balancer = mock_lb mock_pool = mock.MagicMock() mock_pool.protocol = consts.PROTOCOL_UDP mock_pool.listeners = [mock_listener] mock_pool.members = self.sample_data.db_pool1_members mock_pool_get.return_value = mock_pool prov_mem_update = driver_dm.Member( member_id=self.sample_data.member2_id, pool_id=self.sample_data.pool1_id, admin_state_up=False, address='fe80::1', monitor_address='fe80::2', protocol_port=80, name='updated-member2') prov_new_member = driver_dm.Member( member_id=self.sample_data.member3_id, pool_id=self.sample_data.pool1_id, address='192.0.2.18', monitor_address='192.0.2.28', protocol_port=80, name='member3') prov_members = [prov_mem_update, prov_new_member] self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.member_batch_update, self.sample_data.pool1_id, prov_members) @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_update(self, mock_cast): old_provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True, max_retries=1, max_retries_down=2) hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2} self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) payload = {consts.ORIGINAL_HEALTH_MONITOR: old_provider_hm.to_dict(), consts.HEALTH_MONITOR_UPDATES: hm_dict} mock_cast.assert_called_with({}, 'update_health_monitor', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_health_monitor_update_name(self, mock_cast): old_provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id) provider_hm = driver_dm.HealthMonitor( healthmonitor_id=self.sample_data.hm1_id, name='Great HM') hm_dict = {'name': 'Great HM'} self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) payload = {consts.ORIGINAL_HEALTH_MONITOR: old_provider_hm.to_dict(), consts.HEALTH_MONITOR_UPDATES: hm_dict} mock_cast.assert_called_with({}, 'update_health_monitor', **payload) # L7 Policy @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_create(self, mock_cast): provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) self.amp_driver.l7policy_create(provider_l7policy) payload = {consts.L7POLICY: provider_l7policy.to_dict()} mock_cast.assert_called_with({}, 'create_l7policy', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_delete(self, mock_cast): provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) self.amp_driver.l7policy_delete(provider_l7policy) payload = {consts.L7POLICY: provider_l7policy.to_dict()} mock_cast.assert_called_with({}, 'delete_l7policy', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_update(self, mock_cast): old_provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True) l7policy_dict = {'enabled': True} self.amp_driver.l7policy_update(old_provider_l7policy, provider_l7policy) payload = {consts.ORIGINAL_L7POLICY: old_provider_l7policy.to_dict(), consts.L7POLICY_UPDATES: l7policy_dict} mock_cast.assert_called_with({}, 'update_l7policy', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7policy_update_name(self, mock_cast): old_provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id) provider_l7policy = driver_dm.L7Policy( l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy') l7policy_dict = {'name': 'Great L7Policy'} self.amp_driver.l7policy_update(old_provider_l7policy, provider_l7policy) payload = {consts.ORIGINAL_L7POLICY: old_provider_l7policy.to_dict(), consts.L7POLICY_UPDATES: l7policy_dict} mock_cast.assert_called_with({}, 'update_l7policy', **payload) # L7 Rules @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_create(self, mock_cast): provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) self.amp_driver.l7rule_create(provider_l7rule) payload = {consts.L7RULE: provider_l7rule.to_dict()} mock_cast.assert_called_with({}, 'create_l7rule', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_delete(self, mock_cast): provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) self.amp_driver.l7rule_delete(provider_l7rule) payload = {consts.L7RULE: provider_l7rule.to_dict()} mock_cast.assert_called_with({}, 'delete_l7rule', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_update(self, mock_cast): old_provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True) l7rule_dict = {'enabled': True} self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) payload = {consts.ORIGINAL_L7RULE: old_provider_l7rule.to_dict(), consts.L7RULE_UPDATES: l7rule_dict} mock_cast.assert_called_with({}, 'update_l7rule', **payload) @mock.patch('oslo_messaging.RPCClient.cast') def test_l7rule_update_invert(self, mock_cast): old_provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id) provider_l7rule = driver_dm.L7Rule( l7rule_id=self.sample_data.l7rule1_id, invert=True) l7rule_dict = {'invert': True} self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) payload = {consts.ORIGINAL_L7RULE: old_provider_l7rule.to_dict(), consts.L7RULE_UPDATES: l7rule_dict} mock_cast.assert_called_with({}, 'update_l7rule', **payload) # Flavor def test_get_supported_flavor_metadata(self): test_schema = { "properties": { "test_name": {"description": "Test description"}, "test_name2": {"description": "Another description"}}} ref_dict = {"test_name": "Test description", "test_name2": "Another description"} # mock out the supported_flavor_metadata with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' 'SUPPORTED_FLAVOR_SCHEMA', test_schema): result = self.amp_driver.get_supported_flavor_metadata() self.assertEqual(ref_dict, result) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): self.assertRaises(exceptions.DriverError, self.amp_driver.get_supported_flavor_metadata) def test_validate_flavor(self): ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE} self.amp_driver.validate_flavor(ref_dict) # Test bad flavor metadata value is bad ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'} self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.validate_flavor, ref_dict) # Test bad flavor metadata key ref_dict = {'bogus': 'bogus'} self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.validate_flavor, ref_dict) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): self.assertRaises(exceptions.DriverError, self.amp_driver.validate_flavor, 'bogus') # Availability Zone def test_get_supported_availability_zone_metadata(self): test_schema = { "properties": { "test_name": {"description": "Test description"}, "test_name2": {"description": "Another description"}}} ref_dict = {"test_name": "Test description", "test_name2": "Another description"} # mock out the supported_availability_zone_metadata with mock.patch('octavia.api.drivers.amphora_driver.' 'availability_zone_schema.' 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema): result = self.amp_driver.get_supported_availability_zone_metadata() self.assertEqual(ref_dict, result) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.' 'availability_zone_schema.' 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): self.assertRaises( exceptions.DriverError, self.amp_driver.get_supported_availability_zone_metadata) def test_validate_availability_zone(self): # Test compute zone ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'} self.amp_driver.validate_availability_zone(ref_dict) # Test vip networks ref_dict = {consts.VALID_VIP_NETWORKS: ['my_vip_net']} self.amp_driver.validate_availability_zone(ref_dict) # Test management network ref_dict = {consts.MANAGEMENT_NETWORK: 'my_management_net'} self.amp_driver.validate_availability_zone(ref_dict) # Test bad availability zone metadata key ref_dict = {'bogus': 'bogus'} self.assertRaises(exceptions.UnsupportedOptionError, self.amp_driver.validate_availability_zone, ref_dict) # Test for bad schema with mock.patch('octavia.api.drivers.amphora_driver.' 'availability_zone_schema.' 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): self.assertRaises(exceptions.DriverError, self.amp_driver.validate_availability_zone, 'bogus') @mock.patch('cryptography.fernet.Fernet') def test_encrypt_listener_dict(self, mock_fernet): mock_fern = mock.MagicMock() mock_fernet.return_value = mock_fern TEST_DATA = {'cert': b'some data'} TEST_DATA2 = {'test': 'more data'} FAKE_ENCRYPTED_DATA = b'alqwkhjetrhth' mock_fern.encrypt.return_value = FAKE_ENCRYPTED_DATA # We need a class instance with the mock amp_driver = driver.AmphoraProviderDriver() # Test just default_tls_container_data list_dict = {consts.DEFAULT_TLS_CONTAINER_DATA: TEST_DATA} amp_driver._encrypt_listener_dict(list_dict) mock_fern.encrypt.assert_called_once_with(b'some data') self.assertEqual({'cert': FAKE_ENCRYPTED_DATA}, list_dict[consts.DEFAULT_TLS_CONTAINER_DATA]) mock_fern.reset_mock() # Test just sni_container_data TEST_DATA = {'cert': b'some data'} sni_dict = {consts.SNI_CONTAINER_DATA: [TEST_DATA, TEST_DATA2]} amp_driver._encrypt_listener_dict(sni_dict) mock_fern.encrypt.assert_called_once_with(b'some data') encrypted_sni = [{'cert': FAKE_ENCRYPTED_DATA}, TEST_DATA2] self.assertEqual(encrypted_sni, sni_dict[consts.SNI_CONTAINER_DATA]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/drivers/driver_agent/0000775000175000017500000000000000000000000023641 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/driver_agent/__init__.py0000664000175000017500000000107400000000000025754 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py0000664000175000017500000001260400000000000027407 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia_lib.common import constants as lib_consts from oslo_utils import uuidutils from octavia.api.drivers.driver_agent import driver_get from octavia.common import constants import octavia.tests.unit.base as base class TestDriverGet(base.TestCase): @mock.patch('octavia.db.api.get_session') def _test_process_get_object(self, object_name, mock_object_repo, mock_object_to_provider, mock_get_session): mock_get_session.return_value = 'bogus_session' object_repo_mock = mock.MagicMock() mock_object_repo.return_value = object_repo_mock db_object_mock = mock.MagicMock() object_repo_mock.get.return_value = db_object_mock mock_prov_object = mock.MagicMock() mock_object_to_provider.return_value = mock_prov_object ref_prov_dict = mock_prov_object.to_dict(recurse=True, render_unsets=True) object_id = uuidutils.generate_uuid() data = {constants.OBJECT: object_name, lib_consts.ID: object_id} # Happy path result = driver_get.process_get(data) mock_object_repo.assert_called_once_with() object_repo_mock.get.assert_called_once_with( 'bogus_session', id=object_id, show_deleted=False) mock_object_to_provider.assert_called_once_with(db_object_mock) self.assertEqual(ref_prov_dict, result) # No matching listener mock_object_repo.reset_mock() mock_object_to_provider.reset_mock() object_repo_mock.get.return_value = None result = driver_get.process_get(data) mock_object_repo.assert_called_once_with() object_repo_mock.get.assert_called_once_with( 'bogus_session', id=object_id, show_deleted=False) mock_object_to_provider.assert_not_called() self.assertEqual({}, result) @mock.patch('octavia.api.drivers.utils.' 'db_loadbalancer_to_provider_loadbalancer') @mock.patch('octavia.db.repositories.LoadBalancerRepository') def test_process_get_loadbalancer(self, mock_lb_repo, mock_lb_to_provider): self._test_process_get_object( lib_consts.LOADBALANCERS, mock_lb_repo, mock_lb_to_provider) @mock.patch('octavia.api.drivers.utils.db_listener_to_provider_listener') @mock.patch('octavia.db.repositories.ListenerRepository') def test_process_get_listener(self, mock_listener_repo, mock_listener_to_provider): self._test_process_get_object(lib_consts.LISTENERS, mock_listener_repo, mock_listener_to_provider) @mock.patch('octavia.api.drivers.utils.db_pool_to_provider_pool') @mock.patch('octavia.db.repositories.PoolRepository') def test_process_get_pool(self, mock_pool_repo, mock_pool_to_provider): self._test_process_get_object(lib_consts.POOLS, mock_pool_repo, mock_pool_to_provider) @mock.patch('octavia.api.drivers.utils.db_member_to_provider_member') @mock.patch('octavia.db.repositories.MemberRepository') def test_process_get_member(self, mock_member_repo, mock_member_to_provider): self._test_process_get_object(lib_consts.MEMBERS, mock_member_repo, mock_member_to_provider) @mock.patch('octavia.api.drivers.utils.db_HM_to_provider_HM') @mock.patch('octavia.db.repositories.HealthMonitorRepository') def test_process_get_healthmonitor(self, mock_hm_repo, mock_hm_to_provider): self._test_process_get_object(lib_consts.HEALTHMONITORS, mock_hm_repo, mock_hm_to_provider) @mock.patch('octavia.api.drivers.utils.db_l7policy_to_provider_l7policy') @mock.patch('octavia.db.repositories.L7PolicyRepository') def test_process_get_l7policy(self, mock_l7policy_repo, mock_l7policy_to_provider): self._test_process_get_object(lib_consts.L7POLICIES, mock_l7policy_repo, mock_l7policy_to_provider) @mock.patch('octavia.api.drivers.utils.db_l7rule_to_provider_l7rule') @mock.patch('octavia.db.repositories.L7RuleRepository') def test_process_get_l7rule(self, mock_l7rule_repo, mock_l7rule_to_provider): self._test_process_get_object(lib_consts.L7RULES, mock_l7rule_repo, mock_l7rule_to_provider) @mock.patch('octavia.db.api.get_session') def test_process_get_bogus_object(self, mock_get_session): data = {constants.OBJECT: 'bogus', lib_consts.ID: 'bad ID'} result = driver_get.process_get(data) self.assertEqual({}, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/driver_agent/test_driver_listener.py0000664000175000017500000002132700000000000030457 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from unittest import mock from oslo_config import cfg from oslo_serialization import jsonutils from octavia.api.drivers.driver_agent import driver_listener import octavia.tests.unit.base as base CONF = cfg.CONF class TestDriverListener(base.TestCase): def setUp(self): super(TestDriverListener, self).setUp() @mock.patch('octavia.api.drivers.driver_agent.driver_listener.memoryview') def test_recv(self, mock_memoryview): # TEST_STRING len() is 15 TEST_STRING = '{"test": "msg"}' ref_object = jsonutils.loads(TEST_STRING) mock_recv_socket = mock.MagicMock() mock_recv = mock.MagicMock() mock_recv.side_effect = [b'1', b'5', b'\n'] mock_recv_socket.recv = mock_recv mock_recv_socket.recv_into.return_value = 15 mock_mv_buffer = mock.MagicMock() mock_tobytes = mock.MagicMock() mock_tobytes.return_value = TEST_STRING mock_mv_buffer.tobytes = mock_tobytes mock_memoryview.return_value = mock_mv_buffer result = driver_listener._recv(mock_recv_socket) self.assertEqual(ref_object, result) calls = [mock.call(1), mock.call(1), mock.call(1)] mock_recv.assert_has_calls(calls) mock_memoryview.assert_called_once_with(bytearray(15)) mock_recv_socket.recv_into.assert_called_once_with(mock_mv_buffer[0:], 15) @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' 'DriverUpdater') @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') def test_StatusRequestHandler_handle(self, mock_recv, mock_driverupdater): TEST_OBJECT = {"test": "msg"} mock_recv.return_value = 'bogus' mock_updater = mock.MagicMock() mock_update_loadbalancer_status = mock.MagicMock() mock_update_loadbalancer_status.return_value = TEST_OBJECT mock_updater.update_loadbalancer_status = ( mock_update_loadbalancer_status) mock_driverupdater.return_value = mock_updater mock_request = mock.MagicMock() mock_send = mock.MagicMock() mock_sendall = mock.MagicMock() mock_request.send = mock_send mock_request.sendall = mock_sendall StatusRequestHandler = driver_listener.StatusRequestHandler( mock_request, 'bogus', 'bogus') StatusRequestHandler.handle() mock_recv.assert_called_with(mock_request) mock_update_loadbalancer_status.assert_called_with('bogus') mock_send.assert_called_with(b'15\n') mock_sendall.assert_called_with( jsonutils.dumps(TEST_OBJECT).encode('utf-8')) @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' 'DriverUpdater') @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') def test_StatsRequestHandler_handle(self, mock_recv, mock_driverupdater): TEST_OBJECT = {"test": "msg"} mock_recv.return_value = 'bogus' mock_updater = mock.MagicMock() mock_update_listener_stats = mock.MagicMock() mock_update_listener_stats.return_value = TEST_OBJECT mock_updater.update_listener_statistics = (mock_update_listener_stats) mock_driverupdater.return_value = mock_updater mock_request = mock.MagicMock() mock_send = mock.MagicMock() mock_sendall = mock.MagicMock() mock_request.send = mock_send mock_request.sendall = mock_sendall StatsRequestHandler = driver_listener.StatsRequestHandler( mock_request, 'bogus', 'bogus') StatsRequestHandler.handle() mock_recv.assert_called_with(mock_request) mock_update_listener_stats.assert_called_with('bogus') mock_send.assert_called_with(b'15\n') mock_sendall.assert_called_with(jsonutils.dump_as_bytes(TEST_OBJECT)) @mock.patch('octavia.api.drivers.driver_agent.driver_get.' 'process_get') @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') def test_GetRequestHandler_handle(self, mock_recv, mock_process_get): TEST_OBJECT = {"test": "msg"} mock_recv.return_value = 'bogus' mock_process_get.return_value = TEST_OBJECT mock_request = mock.MagicMock() mock_send = mock.MagicMock() mock_sendall = mock.MagicMock() mock_request.send = mock_send mock_request.sendall = mock_sendall GetRequestHandler = driver_listener.GetRequestHandler( mock_request, 'bogus', 'bogus') GetRequestHandler.handle() mock_recv.assert_called_with(mock_request) mock_process_get.assert_called_with('bogus') mock_send.assert_called_with(b'15\n') mock_sendall.assert_called_with(jsonutils.dump_as_bytes(TEST_OBJECT)) @mock.patch('os.remove') def test_cleanup_socket_file(self, mock_remove): mock_remove.side_effect = [mock.DEFAULT, OSError, OSError(errno.ENOENT, 'no_file')] driver_listener._cleanup_socket_file('fake_filename') mock_remove.assert_called_once_with('fake_filename') self.assertRaises(OSError, driver_listener._cleanup_socket_file, 'fake_filename') # Make sure we just pass if the file was not found driver_listener._cleanup_socket_file('fake_filename') @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' '_cleanup_socket_file') @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' 'ForkingUDSServer') def test_status_listener(self, mock_forking_server, mock_cleanup): mock_server = mock.MagicMock() mock_active_children = mock.PropertyMock( side_effect=['a', 'a', 'a', 'a' * CONF.driver_agent.status_max_processes, 'a', 'a' * 1000, '']) type(mock_server).active_children = mock_active_children mock_forking_server.return_value = mock_server mock_exit_event = mock.MagicMock() mock_exit_event.is_set.side_effect = [False, False, False, False, True] driver_listener.status_listener(mock_exit_event) mock_server.handle_request.assert_called() mock_server.server_close.assert_called_once() self.assertEqual(2, mock_cleanup.call_count) @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' '_cleanup_socket_file') @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' 'ForkingUDSServer') def test_stats_listener(self, mock_forking_server, mock_cleanup): mock_server = mock.MagicMock() mock_active_children = mock.PropertyMock( side_effect=['a', 'a', 'a', 'a' * CONF.driver_agent.status_max_processes, 'a', 'a' * 1000, '']) type(mock_server).active_children = mock_active_children mock_forking_server.return_value = mock_server mock_exit_event = mock.MagicMock() mock_exit_event.is_set.side_effect = [False, False, False, False, True] driver_listener.stats_listener(mock_exit_event) mock_server.handle_request.assert_called() mock_server.server_close.assert_called_once() @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' '_cleanup_socket_file') @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' 'ForkingUDSServer') def test_get_listener(self, mock_forking_server, mock_cleanup): mock_server = mock.MagicMock() mock_active_children = mock.PropertyMock( side_effect=['a', 'a', 'a', 'a' * CONF.driver_agent.status_max_processes, 'a', 'a' * 1000, '']) type(mock_server).active_children = mock_active_children mock_forking_server.return_value = mock_server mock_exit_event = mock.MagicMock() mock_exit_event.is_set.side_effect = [False, False, False, False, True] driver_listener.get_listener(mock_exit_event) mock_server.handle_request.assert_called() mock_server.server_close.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py0000664000175000017500000004172200000000000030277 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from mock import call from octavia_lib.api.drivers import exceptions as driver_exceptions from octavia_lib.common import constants as lib_consts from oslo_utils import uuidutils from octavia.api.drivers.driver_agent import driver_updater from octavia.common import exceptions import octavia.tests.unit.base as base class TestDriverUpdater(base.TestCase): @mock.patch('octavia.db.repositories.LoadBalancerRepository') @mock.patch('octavia.db.repositories.ListenerRepository') @mock.patch('octavia.db.repositories.L7PolicyRepository') @mock.patch('octavia.db.repositories.L7RuleRepository') @mock.patch('octavia.db.repositories.PoolRepository') @mock.patch('octavia.db.repositories.HealthMonitorRepository') @mock.patch('octavia.db.repositories.MemberRepository') @mock.patch('octavia.db.api.get_session') def setUp(self, mock_get_session, mock_member_repo, mock_health_repo, mock_pool_repo, mock_l7r_repo, mock_l7p_repo, mock_list_repo, mock_lb_repo): super(TestDriverUpdater, self).setUp() self.mock_session = "FAKE_DB_SESSION" mock_get_session.return_value = self.mock_session member_mock = mock.MagicMock() mock_member_repo.return_value = member_mock self.mock_member_repo = member_mock health_mock = mock.MagicMock() mock_health_repo.return_value = health_mock self.mock_health_repo = health_mock pool_mock = mock.MagicMock() mock_pool_repo.return_value = pool_mock self.mock_pool_repo = pool_mock l7r_mock = mock.MagicMock() mock_l7r_repo.return_value = l7r_mock self.mock_l7r_repo = l7r_mock l7p_mock = mock.MagicMock() mock_l7p_repo.return_value = l7p_mock self.mock_l7p_repo = l7p_mock list_mock = mock.MagicMock() mock_list_repo.return_value = list_mock self.mock_list_repo = list_mock lb_mock = mock.MagicMock() mock_lb_repo.return_value = lb_mock self.mock_lb_repo = lb_mock self.driver_updater = driver_updater.DriverUpdater() self.ref_ok_response = {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_OK} mock_lb = mock.MagicMock() self.lb_id = uuidutils.generate_uuid() self.lb_project_id = uuidutils.generate_uuid() mock_lb.id = self.lb_id mock_lb.project_id = self.lb_project_id mock_lb.provisioning_status = lib_consts.ACTIVE self.lb_data_model = 'FakeLBDataModel' self.mock_lb_repo.model_class.__data_model__ = self.lb_data_model self.mock_lb_repo.get.return_value = mock_lb @mock.patch('octavia.common.utils.get_network_driver') def test_check_for_lb_vip_deallocate(self, mock_get_net_drvr): mock_repo = mock.MagicMock() mock_lb = mock.MagicMock() mock_vip = mock.MagicMock() mock_octavia_owned = mock.PropertyMock(side_effect=[True, False]) type(mock_vip).octavia_owned = mock_octavia_owned mock_lb.vip = mock_vip mock_repo.get.return_value = mock_lb mock_net_drvr = mock.MagicMock() mock_get_net_drvr.return_value = mock_net_drvr self.driver_updater._check_for_lb_vip_deallocate(mock_repo, 'bogus_id') mock_net_drvr.deallocate_vip.assert_called_once_with(mock_vip) mock_net_drvr.reset_mock() self.driver_updater._check_for_lb_vip_deallocate(mock_repo, 'bogus_id') mock_net_drvr.deallocate_vip.assert_not_called() @mock.patch('octavia.db.repositories.Repositories.decrement_quota') @mock.patch('octavia.db.api.get_session') def test_decrement_quota(self, mock_get_session, mock_dec_quota): mock_session = mock.MagicMock() mock_get_session.return_value = mock_session mock_dec_quota.side_effect = [mock.DEFAULT, exceptions.OctaviaException('Boom')] self.driver_updater._decrement_quota(self.mock_lb_repo, 'FakeName', self.lb_id) mock_dec_quota.assert_called_once_with( mock_session, self.mock_lb_repo.model_class.__data_model__, self.lb_project_id) mock_session.commit.assert_called_once() mock_session.rollback.assert_not_called() # Test exception path mock_dec_quota.reset_mock() mock_session.reset_mock() self.assertRaises(exceptions.OctaviaException, self.driver_updater._decrement_quota, self.mock_lb_repo, 'FakeName', self.lb_id) mock_dec_quota.assert_called_once_with( mock_session, self.mock_lb_repo.model_class.__data_model__, self.lb_project_id) mock_session.commit.assert_not_called() mock_session.rollback.assert_called_once() # Test already deleted path mock_dec_quota.reset_mock() mock_session.reset_mock() # Create a local mock LB and LB_repo for this test mock_lb = mock.MagicMock() mock_lb.id = self.lb_id mock_lb.provisioning_status = lib_consts.DELETED mock_lb_repo = mock.MagicMock() mock_lb_repo.model_class.__data_model__ = self.lb_data_model mock_lb_repo.get.return_value = mock_lb self.driver_updater._decrement_quota(mock_lb_repo, 'FakeName', self.lb_id) mock_dec_quota.assert_not_called() mock_session.commit.assert_not_called() mock_session.rollback.assert_called_once() @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' 'DriverUpdater._decrement_quota') @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' 'DriverUpdater._check_for_lb_vip_deallocate') def test_process_status_update(self, mock_deallocate, mock_decrement_quota): mock_repo = mock.MagicMock() list_dict = {"id": 2, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} list_prov_dict = {"id": 2, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE} list_oper_dict = {"id": 2, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} list_deleted_dict = { "id": 2, lib_consts.PROVISIONING_STATUS: lib_consts.DELETED, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} # Test with full record self.driver_updater._process_status_update(mock_repo, 'FakeName', list_dict) mock_repo.update.assert_called_once_with( self.mock_session, 2, provisioning_status=lib_consts.ACTIVE, operating_status=lib_consts.ONLINE) mock_repo.delete.assert_not_called() # Test with only provisioning status record mock_repo.reset_mock() self.driver_updater._process_status_update(mock_repo, 'FakeName', list_prov_dict) mock_repo.update.assert_called_once_with( self.mock_session, 2, provisioning_status=lib_consts.ACTIVE) mock_repo.delete.assert_not_called() # Test with only operating status record mock_repo.reset_mock() self.driver_updater._process_status_update(mock_repo, 'FakeName', list_oper_dict) mock_repo.update.assert_called_once_with( self.mock_session, 2, operating_status=lib_consts.ONLINE) mock_repo.delete.assert_not_called() # Test with deleted but delete_record False mock_repo.reset_mock() self.driver_updater._process_status_update(mock_repo, 'FakeName', list_deleted_dict) mock_repo.update.assert_called_once_with( self.mock_session, 2, provisioning_status=lib_consts.DELETED, operating_status=lib_consts.ONLINE) mock_repo.delete.assert_not_called() mock_decrement_quota.assert_called_once_with(mock_repo, 'FakeName', 2) # Test with an empty update mock_repo.reset_mock() self.driver_updater._process_status_update(mock_repo, 'FakeName', {"id": 2}) mock_repo.update.assert_not_called() mock_repo.delete.assert_not_called() # Test with deleted and delete_record True mock_decrement_quota.reset_mock() mock_repo.reset_mock() self.driver_updater._process_status_update( mock_repo, 'FakeName', list_deleted_dict, delete_record=True) mock_repo.delete.assert_called_once_with(self.mock_session, id=2) mock_repo.update.assert_not_called() mock_decrement_quota.assert_called_once_with(mock_repo, 'FakeName', 2) # Test with LB Delete mock_decrement_quota.reset_mock() mock_repo.reset_mock() self.driver_updater._process_status_update( mock_repo, lib_consts.LOADBALANCERS, list_deleted_dict) mock_deallocate.assert_called_once_with(mock_repo, 2) mock_decrement_quota.assert_called_once_with( mock_repo, lib_consts.LOADBALANCERS, 2) # Test with an exception mock_repo.reset_mock() mock_repo.update.side_effect = Exception('boom') self.assertRaises(driver_exceptions.UpdateStatusError, self.driver_updater._process_status_update, mock_repo, 'FakeName', list_dict) # Test with no ID record mock_repo.reset_mock() self.assertRaises(driver_exceptions.UpdateStatusError, self.driver_updater._process_status_update, mock_repo, 'FakeName', {"fake": "data"}) @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' 'DriverUpdater._process_status_update') def test_update_loadbalancer_status(self, mock_status_update): mock_status_update.side_effect = [ mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, driver_exceptions.UpdateStatusError( fault_string='boom', status_object='fruit', status_object_id='1', status_record='grape'), Exception('boom')] lb_dict = {"id": 1, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} list_dict = {"id": 2, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} pool_dict = {"id": 3, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} member_dict = {"id": 4, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} hm_dict = {"id": 5, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} l7p_dict = {"id": 6, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} l7r_dict = {"id": 7, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, lib_consts.OPERATING_STATUS: lib_consts.ONLINE} status_dict = {lib_consts.LOADBALANCERS: [lb_dict], lib_consts.LISTENERS: [list_dict], lib_consts.POOLS: [pool_dict], lib_consts.MEMBERS: [member_dict], lib_consts.HEALTHMONITORS: [hm_dict], lib_consts.L7POLICIES: [l7p_dict], lib_consts.L7RULES: [l7r_dict]} result = self.driver_updater.update_loadbalancer_status( copy.deepcopy(status_dict)) calls = [call(self.mock_member_repo, lib_consts.MEMBERS, member_dict, delete_record=True), call(self.mock_health_repo, lib_consts.HEALTHMONITORS, hm_dict, delete_record=True), call(self.mock_pool_repo, lib_consts.POOLS, pool_dict, delete_record=True), call(self.mock_l7r_repo, lib_consts.L7RULES, l7r_dict, delete_record=True), call(self.mock_l7p_repo, lib_consts.L7POLICIES, l7p_dict, delete_record=True), call(self.mock_list_repo, lib_consts.LISTENERS, list_dict, delete_record=True), call(self.mock_lb_repo, lib_consts.LOADBALANCERS, lb_dict)] mock_status_update.assert_has_calls(calls) self.assertEqual(self.ref_ok_response, result) # Test empty status updates mock_status_update.reset_mock() result = self.driver_updater.update_loadbalancer_status({}) mock_status_update.assert_not_called() self.assertEqual(self.ref_ok_response, result) # Test UpdateStatusError case ref_update_status_error = { lib_consts.FAULT_STRING: 'boom', lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.STATUS_OBJECT: 'fruit', lib_consts.STATUS_OBJECT_ID: '1'} result = self.driver_updater.update_loadbalancer_status( copy.deepcopy(status_dict)) self.assertEqual(ref_update_status_error, result) # Test general exceptions result = self.driver_updater.update_loadbalancer_status( copy.deepcopy(status_dict)) self.assertEqual({ lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.FAULT_STRING: 'boom'}, result) @mock.patch('octavia.db.repositories.ListenerStatisticsRepository.replace') def test_update_listener_statistics(self, mock_replace): listener_stats_list = [{"id": 1, "active_connections": 10, "bytes_in": 20, "bytes_out": 30, "request_errors": 40, "total_connections": 50}, {"id": 2, "active_connections": 60, "bytes_in": 70, "bytes_out": 80, "request_errors": 90, "total_connections": 100}] listener_stats_dict = {"listeners": listener_stats_list} mock_replace.side_effect = [mock.DEFAULT, mock.DEFAULT, Exception('boom')] result = self.driver_updater.update_listener_statistics( copy.deepcopy(listener_stats_dict)) calls = [call(self.mock_session, 1, 1, active_connections=10, bytes_in=20, bytes_out=30, request_errors=40, total_connections=50), call(self.mock_session, 2, 2, active_connections=60, bytes_in=70, bytes_out=80, request_errors=90, total_connections=100)] mock_replace.assert_has_calls(calls) self.assertEqual(self.ref_ok_response, result) # Test empty stats updates mock_replace.reset_mock() result = self.driver_updater.update_listener_statistics({}) mock_replace.assert_not_called() self.assertEqual(self.ref_ok_response, result) # Test missing ID bad_id_dict = {"listeners": [{"notID": "one"}]} result = self.driver_updater.update_listener_statistics(bad_id_dict) ref_update_listener_stats_error = { lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.STATS_OBJECT: lib_consts.LISTENERS, lib_consts.FAULT_STRING: "'id'"} self.assertEqual(ref_update_listener_stats_error, result) # Test for replace exception result = self.driver_updater.update_listener_statistics( copy.deepcopy(listener_stats_dict)) ref_update_listener_stats_error = { lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, lib_consts.STATS_OBJECT: lib_consts.LISTENERS, lib_consts.FAULT_STRING: 'boom', lib_consts.STATS_OBJECT_ID: 1} self.assertEqual(ref_update_listener_stats_error, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/test_driver_factory.py0000664000175000017500000000333400000000000025626 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.api.drivers import driver_factory from octavia.common import exceptions import octavia.tests.unit.base as base class TestDriverFactory(base.TestCase): def setUp(self): super(TestDriverFactory, self).setUp() @mock.patch('stevedore.driver.DriverManager') def test_driver_factory_no_provider(self, mock_drivermgr): mock_mgr = mock.MagicMock() mock_drivermgr.return_value = mock_mgr driver = driver_factory.get_driver(None) self.assertEqual(mock_mgr.driver, driver) @mock.patch('stevedore.driver.DriverManager') def test_driver_factory_failed_to_load_driver(self, mock_drivermgr): mock_drivermgr.side_effect = Exception('boom') self.assertRaises(exceptions.ProviderNotFound, driver_factory.get_driver, None) @mock.patch('stevedore.driver.DriverManager') def test_driver_factory_not_enabled(self, mock_drivermgr): self.assertRaises(exceptions.ProviderNotEnabled, driver_factory.get_driver, 'dont-enable-this-fake-driver-name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/test_driver_lib.py0000664000175000017500000000350100000000000024721 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import octavia_lib.api.drivers.driver_lib as lib_driver_lib from octavia.api.drivers import driver_lib import octavia.tests.unit.base as base class TestDriverLib(base.TestCase): def setUp(self): super(TestDriverLib, self).setUp() # Silly test to check that debtcollector moves is working @mock.patch('octavia_lib.api.drivers.driver_lib.DriverLibrary.' '_check_for_socket_ready') def test_driver_lib_exists(self, mock_ready): driver_lib_class = driver_lib.DriverLibrary() self.assertIsInstance(driver_lib_class, lib_driver_lib.DriverLibrary) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/test_provider_base.py0000664000175000017500000000335100000000000025427 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import octavia_lib.api.drivers.provider_base as lib_provider_base from octavia.api.drivers import provider_base import octavia.tests.unit.base as base class TestProviderBase(base.TestCase): def setUp(self): super(TestProviderBase, self).setUp() # Silly test to check that debtcollector moves is working def test_provider_base_exists(self): provider_base_class = provider_base.ProviderDriver() self.assertIsInstance(provider_base_class, lib_provider_base.ProviderDriver) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/test_provider_noop_agent.py0000664000175000017500000000217500000000000026651 0ustar00zuulzuul00000000000000# Copyright 2019 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.api.drivers.noop_driver import agent import octavia.tests.unit.base as base class TestNoopProviderAgent(base.TestCase): def setUp(self): super(TestNoopProviderAgent, self).setUp() @mock.patch('time.sleep') def test_noop_provider_agent(self, mock_sleep): mock_exit_event = mock.MagicMock() mock_exit_event.is_set.side_effect = [False, True] agent.noop_provider_agent(mock_exit_event) mock_sleep.assert_called_once_with(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/test_provider_noop_driver.py0000664000175000017500000003067500000000000027054 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from octavia.api.drivers import data_models from octavia.api.drivers.noop_driver import driver import octavia.tests.unit.base as base class TestNoopProviderDriver(base.TestCase): def setUp(self): super(TestNoopProviderDriver, self).setUp() self.driver = driver.NoopProviderDriver() self.loadbalancer_id = uuidutils.generate_uuid() self.vip_address = '192.0.2.10' self.vip_network_id = uuidutils.generate_uuid() self.vip_port_id = uuidutils.generate_uuid() self.vip_subnet_id = uuidutils.generate_uuid() self.listener_id = uuidutils.generate_uuid() self.pool_id = uuidutils.generate_uuid() self.member_id = uuidutils.generate_uuid() self.member_subnet_id = uuidutils.generate_uuid() self.healthmonitor_id = uuidutils.generate_uuid() self.l7policy_id = uuidutils.generate_uuid() self.l7rule_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.default_tls_container_ref = uuidutils.generate_uuid() self.sni_container_ref_1 = uuidutils.generate_uuid() self.sni_container_ref_2 = uuidutils.generate_uuid() self.ref_vip = data_models.VIP( vip_address=self.vip_address, vip_network_id=self.vip_network_id, vip_port_id=self.vip_port_id, vip_subnet_id=self.vip_subnet_id) self.ref_member = data_models.Member( address='198.51.100.4', admin_state_up=True, member_id=self.member_id, monitor_address='203.0.113.2', monitor_port=66, name='jacket', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.ref_healthmonitor = data_models.HealthMonitor( admin_state_up=False, delay=2, expected_codes="500", healthmonitor_id=self.healthmonitor_id, http_method='TRACE', max_retries=1, max_retries_down=0, name='doc', pool_id=self.pool_id, timeout=3, type='PHD', url_path='/index.html') self.ref_pool = data_models.Pool( admin_state_up=True, description='Olympic swimming pool', healthmonitor=self.ref_healthmonitor, lb_algorithm='A_Fast_One', loadbalancer_id=self.loadbalancer_id, listener_id=self.listener_id, members=[self.ref_member], name='Osborn', pool_id=self.pool_id, protocol='avian', session_persistence={'type': 'glue'}) self.ref_l7rule = data_models.L7Rule( admin_state_up=True, compare_type='store_brand', invert=True, key='board', l7policy_id=self.l7policy_id, l7rule_id=self.l7rule_id, type='strict', value='gold') self.ref_l7policy = data_models.L7Policy( action='packed', admin_state_up=False, description='Corporate policy', l7policy_id=self.l7policy_id, listener_id=self.listener_id, name='more_policy', position=1, redirect_pool_id=self.pool_id, redirect_url='/hr', rules=[self.ref_l7rule]) self.ref_listener = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, default_tls_container_data='default_cert_data', default_tls_container_ref=self.default_tls_container_ref, description='The listener', insert_headers={'X-Forwarded-For': 'true'}, l7policies=[self.ref_l7policy], listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='super_listener', protocol='avian', protocol_port=42, sni_container_data=['sni_cert_data_1', 'sni_cert_data_2'], sni_container_refs=[self.sni_container_ref_1, self.sni_container_ref_2]) self.ref_lb = data_models.LoadBalancer( admin_state_up=False, description='One great load balancer', flavor={'cake': 'chocolate'}, listeners=[self.ref_listener], loadbalancer_id=self.loadbalancer_id, name='favorite_lb', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, vip_port_id=self.vip_port_id, vip_subnet_id=self.vip_subnet_id) self.ref_flavor_metadata = {"amp_image_tag": "The glance image tag " "to use for this load balancer."} self.ref_availability_zone_metadata = { "compute_zone": "The compute availability zone to use for this " "loadbalancer."} def test_create_vip_port(self): vip_dict = self.driver.create_vip_port(self.loadbalancer_id, self.project_id, self.ref_vip.to_dict()) self.assertEqual(self.ref_vip.to_dict(), vip_dict) def test_loadbalancer_create(self): self.driver.loadbalancer_create(self.ref_lb) self.assertEqual((self.ref_lb, 'loadbalancer_create'), self.driver.driver.driverconfig[self.loadbalancer_id]) def test_loadbalancer_delete(self): self.driver.loadbalancer_delete(self.ref_lb, cascade=True) self.assertEqual((self.loadbalancer_id, True, 'loadbalancer_delete'), self.driver.driver.driverconfig[self.loadbalancer_id]) def test_loadbalancer_failover(self): self.driver.loadbalancer_failover(self.loadbalancer_id) self.assertEqual((self.loadbalancer_id, 'loadbalancer_failover'), self.driver.driver.driverconfig[self.loadbalancer_id]) def test_loadbalancer_update(self): self.driver.loadbalancer_update(self.ref_lb, self.ref_lb) self.assertEqual((self.ref_lb, 'loadbalancer_update'), self.driver.driver.driverconfig[self.loadbalancer_id]) def test_listener_create(self): self.driver.listener_create(self.ref_listener) self.assertEqual((self.ref_listener, 'listener_create'), self.driver.driver.driverconfig[self.listener_id]) def test_listener_delete(self): self.driver.listener_delete(self.ref_listener) self.assertEqual((self.listener_id, 'listener_delete'), self.driver.driver.driverconfig[self.listener_id]) def test_listener_update(self): self.driver.listener_update(self.ref_listener, self.ref_listener) self.assertEqual((self.ref_listener, 'listener_update'), self.driver.driver.driverconfig[self.listener_id]) def test_pool_create(self): self.driver.pool_create(self.ref_pool) self.assertEqual((self.ref_pool, 'pool_create'), self.driver.driver.driverconfig[self.pool_id]) def test_pool_delete(self): self.driver.pool_delete(self.ref_pool) self.assertEqual((self.pool_id, 'pool_delete'), self.driver.driver.driverconfig[self.pool_id]) def test_pool_update(self): self.driver.pool_update(self.ref_pool, self.ref_pool) self.assertEqual((self.ref_pool, 'pool_update'), self.driver.driver.driverconfig[self.pool_id]) def test_member_create(self): self.driver.member_create(self.ref_member) self.assertEqual((self.ref_member, 'member_create'), self.driver.driver.driverconfig[self.member_id]) def test_member_delete(self): self.driver.member_delete(self.ref_member) self.assertEqual((self.member_id, 'member_delete'), self.driver.driver.driverconfig[self.member_id]) def test_member_update(self): self.driver.member_update(self.ref_member, self.ref_member) self.assertEqual((self.ref_member, 'member_update'), self.driver.driver.driverconfig[self.member_id]) def test_member_batch_update(self): self.driver.member_batch_update(self.pool_id, [self.ref_member]) self.assertEqual((self.ref_member, 'member_batch_update'), self.driver.driver.driverconfig[self.member_id]) def test_health_monitor_create(self): self.driver.health_monitor_create(self.ref_healthmonitor) self.assertEqual( (self.ref_healthmonitor, 'health_monitor_create'), self.driver.driver.driverconfig[self.healthmonitor_id]) def test_health_monitor_delete(self): self.driver.health_monitor_delete(self.ref_healthmonitor) self.assertEqual( (self.healthmonitor_id, 'health_monitor_delete'), self.driver.driver.driverconfig[self.healthmonitor_id]) def test_health_monitor_update(self): self.driver.health_monitor_update(self.ref_healthmonitor, self.ref_healthmonitor) self.assertEqual( (self.ref_healthmonitor, 'health_monitor_update'), self.driver.driver.driverconfig[self.healthmonitor_id]) def test_l7policy_create(self): self.driver.l7policy_create(self.ref_l7policy) self.assertEqual((self.ref_l7policy, 'l7policy_create'), self.driver.driver.driverconfig[self.l7policy_id]) def test_l7policy_delete(self): self.driver.l7policy_delete(self.ref_l7policy) self.assertEqual((self.l7policy_id, 'l7policy_delete'), self.driver.driver.driverconfig[self.l7policy_id]) def test_l7policy_update(self): self.driver.l7policy_update(self.ref_l7policy, self.ref_l7policy) self.assertEqual((self.ref_l7policy, 'l7policy_update'), self.driver.driver.driverconfig[self.l7policy_id]) def test_l7rule_create(self): self.driver.l7rule_create(self.ref_l7rule) self.assertEqual((self.ref_l7rule, 'l7rule_create'), self.driver.driver.driverconfig[self.l7rule_id]) def test_l7rule_delete(self): self.driver.l7rule_delete(self.ref_l7rule) self.assertEqual((self.l7rule_id, 'l7rule_delete'), self.driver.driver.driverconfig[self.l7rule_id]) def test_l7rule_update(self): self.driver.l7rule_update(self.ref_l7rule, self.ref_l7rule) self.assertEqual((self.ref_l7rule, 'l7rule_update'), self.driver.driver.driverconfig[self.l7rule_id]) def test_get_supported_flavor_metadata(self): metadata = self.driver.get_supported_flavor_metadata() self.assertEqual(self.ref_flavor_metadata, metadata) def test_validate_flavor(self): self.driver.validate_flavor(self.ref_flavor_metadata) flavor_hash = hash(frozenset(self.ref_flavor_metadata)) self.assertEqual((self.ref_flavor_metadata, 'validate_flavor'), self.driver.driver.driverconfig[flavor_hash]) def test_get_supported_availability_zone_metadata(self): metadata = self.driver.get_supported_availability_zone_metadata() self.assertEqual(self.ref_availability_zone_metadata, metadata) def test_validate_availability_zone(self): self.driver.validate_availability_zone( self.ref_availability_zone_metadata) az_hash = hash(frozenset(self.ref_availability_zone_metadata)) self.assertEqual((self.ref_availability_zone_metadata, 'validate_availability_zone'), self.driver.driver.driverconfig[az_hash]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/drivers/test_utils.py0000664000175000017500000006121300000000000023744 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions as lib_exceptions from octavia_lib.common import constants as lib_constants from octavia.api.drivers import utils from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions from octavia.tests.common import sample_data_models from octavia.tests.unit import base class TestUtils(base.TestCase): def setUp(self): super(TestUtils, self).setUp() self.sample_data = sample_data_models.SampleDriverDataModels() def test_call_provider(self): mock_driver_method = mock.MagicMock() # Test happy path utils.call_provider("provider_name", mock_driver_method, "arg1", foo="arg2") mock_driver_method.assert_called_with("arg1", foo="arg2") # Test driver raising DriverError mock_driver_method.side_effect = lib_exceptions.DriverError self.assertRaises(exceptions.ProviderDriverError, utils.call_provider, "provider_name", mock_driver_method) # Test driver raising different types of NotImplementedError mock_driver_method.side_effect = NotImplementedError self.assertRaises(exceptions.ProviderNotImplementedError, utils.call_provider, "provider_name", mock_driver_method) mock_driver_method.side_effect = lib_exceptions.NotImplementedError self.assertRaises(exceptions.ProviderNotImplementedError, utils.call_provider, "provider_name", mock_driver_method) # Test driver raising UnsupportedOptionError mock_driver_method.side_effect = ( lib_exceptions.UnsupportedOptionError) self.assertRaises(exceptions.ProviderUnsupportedOptionError, utils.call_provider, "provider_name", mock_driver_method) # Test driver raising ProviderDriverError mock_driver_method.side_effect = Exception self.assertRaises(exceptions.ProviderDriverError, utils.call_provider, "provider_name", mock_driver_method) def test_base_to_provider_dict(self): test_dict = {'provisioning_status': constants.ACTIVE, 'operating_status': constants.ONLINE, 'provider': 'octavia', 'created_at': 'now', 'updated_at': 'then', 'enabled': True, 'project_id': 1} result_dict = utils._base_to_provider_dict(test_dict, include_project_id=True) self.assertEqual({'admin_state_up': True, 'project_id': 1}, result_dict) result_dict = utils._base_to_provider_dict(test_dict, include_project_id=False) self.assertEqual({'admin_state_up': True}, result_dict) @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict') @mock.patch('octavia.db.api.get_session') @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_lb_dict_to_provider_dict(self, mock_load_cert, mock_secret, mock_get_session, mock_get_flavor): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE', 'ca cert', 'X509 CRL FILE', 'ca cert', 'X509 CRL FILE', 'X509 POOL CA CERT FILE', 'X509 CRL FILE'] listener_certs = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} pool_cert = data_models.TLSContainer(certificate='pool cert') pool_certs = {'tls_cert': pool_cert, 'sni_certs': []} mock_load_cert.side_effect = [pool_certs, listener_certs, listener_certs, listener_certs, listener_certs] mock_get_flavor.return_value = {'shaved_ice': 'cherry'} test_lb_dict = {'name': 'lb1', 'project_id': self.sample_data.project_id, 'vip_subnet_id': self.sample_data.subnet_id, 'vip_port_id': self.sample_data.port_id, 'vip_address': self.sample_data.ip_address, 'vip_network_id': self.sample_data.network_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'id': self.sample_data.lb_id, 'listeners': [], 'pools': [], 'description': '', 'admin_state_up': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'flavor_id': 'flavor_id', 'provider': 'noop_driver'} ref_listeners = copy.deepcopy(self.sample_data.provider_listeners) # TODO(johnsom) Remove when versions implemented for listener in ref_listeners: delattr(listener, lib_constants.TLS_VERSIONS) expect_pools = copy.deepcopy(self.sample_data.provider_pools,) for pool in expect_pools: delattr(pool, lib_constants.TLS_VERSIONS) ref_prov_lb_dict = { 'vip_address': self.sample_data.ip_address, 'admin_state_up': True, 'loadbalancer_id': self.sample_data.lb_id, 'vip_subnet_id': self.sample_data.subnet_id, 'listeners': ref_listeners, 'description': '', 'project_id': self.sample_data.project_id, 'vip_port_id': self.sample_data.port_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'vip_network_id': self.sample_data.network_id, 'pools': expect_pools, 'flavor': {'shaved_ice': 'cherry'}, 'name': 'lb1'} vip = data_models.Vip(ip_address=self.sample_data.ip_address, network_id=self.sample_data.network_id, port_id=self.sample_data.port_id, subnet_id=self.sample_data.subnet_id, qos_policy_id=self.sample_data.qos_policy_id) provider_lb_dict = utils.lb_dict_to_provider_dict( test_lb_dict, vip=vip, db_pools=self.sample_data.test_db_pools, db_listeners=self.sample_data.test_db_listeners) self.assertEqual(ref_prov_lb_dict, provider_lb_dict) @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict') @mock.patch('octavia.db.api.get_session') def test_db_loadbalancer_to_provider_loadbalancer(self, mock_get_session, mock_get_flavor): mock_get_flavor.return_value = {'shaved_ice': 'cherry'} vip = data_models.Vip(ip_address=self.sample_data.ip_address, network_id=self.sample_data.network_id, port_id=self.sample_data.port_id, subnet_id=self.sample_data.subnet_id) test_db_lb = data_models.LoadBalancer(id=1, flavor_id='2', vip=vip) provider_lb = utils.db_loadbalancer_to_provider_loadbalancer( test_db_lb) ref_provider_lb = driver_dm.LoadBalancer( loadbalancer_id=1, flavor={'shaved_ice': 'cherry'}, vip_address=self.sample_data.ip_address, vip_network_id=self.sample_data.network_id, vip_port_id=self.sample_data.port_id, vip_subnet_id=self.sample_data.subnet_id) self.assertEqual(ref_provider_lb.to_dict(render_unsets=True), provider_lb.to_dict(render_unsets=True)) def test_db_listener_to_provider_listener(self): test_db_list = data_models.Listener(id=1) provider_list = utils.db_listener_to_provider_listener(test_db_list) ref_provider_list = driver_dm.Listener(listener_id=1, insert_headers={}) self.assertEqual(ref_provider_list.to_dict(render_unsets=True), provider_list.to_dict(render_unsets=True)) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_db_listeners_to_provider_listeners(self, mock_load_cert, mock_secret): mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', 'ca cert', 'X509 CRL FILE', 'ca cert', 'X509 CRL FILE'] cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_load_cert.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} provider_listeners = utils.db_listeners_to_provider_listeners( self.sample_data.test_db_listeners) ref_listeners = copy.deepcopy(self.sample_data.provider_listeners) # TODO(johnsom) Remove when versions implemented for listener in ref_listeners: delattr(listener, lib_constants.TLS_VERSIONS) self.assertEqual(ref_listeners, provider_listeners) @mock.patch('oslo_context.context.RequestContext', return_value=None) def test_get_secret_data_errors(self, mock_context): mock_cert_mngr = mock.MagicMock() mock_cert_mngr.get_secret.side_effect = [Exception, Exception] # Test for_delete == False path self.assertRaises(exceptions.CertificateRetrievalException, utils._get_secret_data, mock_cert_mngr, 'fake_project_id', 1) # Test for_delete == True path self.assertIsNone( utils._get_secret_data(mock_cert_mngr, 'fake_project_id', 2, for_delete=True)) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_dict_to_provider_dict(self, mock_load_cert, mock_secret): mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', 'X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') listener_certs = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} pool_cert = data_models.TLSContainer(certificate='pool cert') pool_certs = {'tls_cert': pool_cert, 'sni_certs': []} mock_load_cert.side_effect = [listener_certs, pool_certs] # The reason to do this, as before the logic arrives the test func, # there are two data sources, one is from db_dict, the other is from # the api layer model_dict, actually, they are different and contain # different fields. That's why the test_listener1_dict from sample data # just contain the client_ca_tls_certificate_id for client certificate, # not any other related fields. So we need to delete them. expect_prov = copy.deepcopy(self.sample_data.provider_listener1_dict) expect_pool_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) # TODO(johnsom) Remove when versions and ciphers are implemented expect_pool_prov.pop(lib_constants.TLS_VERSIONS) expect_prov.pop(lib_constants.TLS_VERSIONS) expect_prov['default_pool'] = expect_pool_prov provider_listener = utils.listener_dict_to_provider_dict( self.sample_data.test_listener1_dict) self.assertEqual(expect_prov, provider_listener) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_dict_to_provider_dict_load_cert_error( self, mock_load_cert, mock_secret): mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', 'X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] mock_load_cert.side_effect = [exceptions.OctaviaException, Exception] # Test load_cert exception for_delete == False path self.assertRaises(exceptions.OctaviaException, utils.listener_dict_to_provider_dict, self.sample_data.test_listener1_dict) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_dict_to_provider_dict_load_cert_error_for_delete( self, mock_load_cert, mock_secret): mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', 'X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] mock_load_cert.side_effect = [Exception] # Test load_cert exception for_delete == True path expect_prov = copy.deepcopy(self.sample_data.provider_listener1_dict) expect_pool_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) del expect_pool_prov['tls_container_data'] # TODO(johnsom) Remove when versions and ciphers are implemented expect_pool_prov.pop(lib_constants.TLS_VERSIONS) expect_prov.pop(lib_constants.TLS_VERSIONS) expect_prov['default_pool'] = expect_pool_prov del expect_prov['default_tls_container_data'] del expect_prov['sni_container_data'] provider_listener = utils.listener_dict_to_provider_dict( self.sample_data.test_listener1_dict, for_delete=True) self.assertEqual(expect_prov, provider_listener) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_dict_to_provider_dict_SNI(self, mock_load_cert, mock_secret): mock_secret.return_value = 'ca cert' cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_load_cert.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} # Test with bad SNI content test_listener = copy.deepcopy(self.sample_data.test_listener1_dict) test_listener['sni_containers'] = [()] self.assertRaises(exceptions.ValidationException, utils.listener_dict_to_provider_dict, test_listener) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_db_pool_to_provider_pool(self, mock_load_cert, mock_secret): pool_cert = data_models.TLSContainer(certificate='pool cert') mock_load_cert.return_value = {'tls_cert': pool_cert, 'sni_certs': None, 'client_ca_cert': None} mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] provider_pool = utils.db_pool_to_provider_pool( self.sample_data.db_pool1) # TODO(johnsom) Remove when versions and ciphers are implemented expect_prov_pool = copy.deepcopy(self.sample_data.provider_pool1) delattr(expect_prov_pool, lib_constants.TLS_VERSIONS) self.assertEqual(expect_prov_pool, provider_pool) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_db_pool_to_provider_pool_partial(self, mock_load_cert, mock_secret): pool_cert = data_models.TLSContainer(certificate='pool cert') mock_load_cert.return_value = {'tls_cert': pool_cert, 'sni_certs': None, 'client_ca_cert': None} mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] test_db_pool = self.sample_data.db_pool1 test_db_pool.members = [self.sample_data.db_member1] provider_pool = utils.db_pool_to_provider_pool(test_db_pool) # TODO(johnsom) Remove when versions and ciphers are implemented expect_prov_pool = copy.deepcopy(self.sample_data.provider_pool1) delattr(expect_prov_pool, lib_constants.TLS_VERSIONS) self.assertEqual(expect_prov_pool, provider_pool) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_db_pools_to_provider_pools(self, mock_load_cert, mock_secret): pool_cert = data_models.TLSContainer(certificate='pool cert') mock_load_cert.return_value = {'tls_cert': pool_cert, 'sni_certs': None, 'client_ca_cert': None} mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] provider_pools = utils.db_pools_to_provider_pools( self.sample_data.test_db_pools) # TODO(johnsom) Remove when versions and ciphers are implemented expect_prov_pools = copy.deepcopy(self.sample_data.provider_pools) for prov_pool in expect_prov_pools: delattr(prov_pool, lib_constants.TLS_VERSIONS) self.assertEqual(expect_prov_pools, provider_pools) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_pool_dict_to_provider_dict(self, mock_load_cert, mock_secret): pool_cert = data_models.TLSContainer(certificate='pool cert') mock_load_cert.return_value = {'tls_cert': pool_cert, 'sni_certs': None, 'client_ca_cert': None} mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] expect_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) expect_prov.pop('crl_container_ref') provider_pool_dict = utils.pool_dict_to_provider_dict( self.sample_data.test_pool1_dict) provider_pool_dict.pop('crl_container_ref') # TODO(johnsom) Remove when versions and ciphers are implemented expect_prov.pop(lib_constants.TLS_VERSIONS) self.assertEqual(expect_prov, provider_pool_dict) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_pool_dict_to_provider_dict_load_cert_error( self, mock_load_cert, mock_secret): mock_load_cert.side_effect = [exceptions.OctaviaException, Exception] # Test load_cert exception for_delete == False path self.assertRaises(exceptions.OctaviaException, utils.pool_dict_to_provider_dict, self.sample_data.test_pool1_dict) @mock.patch('octavia.api.drivers.utils._get_secret_data') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_pool_dict_to_provider_dict_load_cert_error_for_delete( self, mock_load_cert, mock_secret): mock_load_cert.side_effect = [Exception] # Test load_cert exception for_delete == True path mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE'] expect_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) expect_prov.pop('crl_container_ref') del expect_prov['tls_container_data'] provider_pool_dict = utils.pool_dict_to_provider_dict( self.sample_data.test_pool1_dict, for_delete=True) provider_pool_dict.pop('crl_container_ref') # TODO(johnsom) Remove when versions and ciphers are implemented expect_prov.pop(lib_constants.TLS_VERSIONS) self.assertEqual(expect_prov, provider_pool_dict) def test_db_HM_to_provider_HM(self): provider_hm = utils.db_HM_to_provider_HM(self.sample_data.db_hm1) self.assertEqual(self.sample_data.provider_hm1, provider_hm) def test_hm_dict_to_provider_dict(self): provider_hm_dict = utils.hm_dict_to_provider_dict( self.sample_data.test_hm1_dict) self.assertEqual(self.sample_data.provider_hm1_dict, provider_hm_dict) def test_HM_to_provider_HM_with_http_version_and_domain_name(self): provider_hm = utils.db_HM_to_provider_HM(self.sample_data.db_hm2) self.assertEqual(self.sample_data.provider_hm2, provider_hm) provider_hm_dict = utils.hm_dict_to_provider_dict( self.sample_data.test_hm2_dict) self.assertEqual(self.sample_data.provider_hm2_dict, provider_hm_dict) def test_hm_dict_to_provider_dict_partial(self): provider_hm_dict = utils.hm_dict_to_provider_dict({'id': 1}) self.assertEqual({'healthmonitor_id': 1}, provider_hm_dict) def test_db_members_to_provider_members(self): provider_members = utils.db_members_to_provider_members( self.sample_data.db_pool1_members) self.assertEqual(self.sample_data.provider_pool1_members, provider_members) def test_member_dict_to_provider_dict(self): provider_member_dict = utils.member_dict_to_provider_dict( self.sample_data.test_member1_dict) self.assertEqual(self.sample_data.provider_member1_dict, provider_member_dict) def test_db_l7policies_to_provider_l7policies(self): provider_rules = utils.db_l7policies_to_provider_l7policies( self.sample_data.db_l7policies) self.assertEqual(self.sample_data.provider_l7policies, provider_rules) def test_l7policy_dict_to_provider_dict(self): provider_l7policy_dict = utils.l7policy_dict_to_provider_dict( self.sample_data.test_l7policy1_dict) self.assertEqual(self.sample_data.provider_l7policy1_dict, provider_l7policy_dict) def test_db_l7rules_to_provider_l7rules(self): provider_rules = utils.db_l7rules_to_provider_l7rules( self.sample_data.db_l7Rules) self.assertEqual(self.sample_data.provider_rules, provider_rules) def test_l7rule_dict_to_provider_dict(self): provider_rules_dict = utils.l7rule_dict_to_provider_dict( self.sample_data.test_l7rule1_dict) self.assertEqual(self.sample_data.provider_l7rule1_dict, provider_rules_dict) def test_vip_dict_to_provider_dict(self): new_vip_dict = utils.vip_dict_to_provider_dict( self.sample_data.test_vip_dict) self.assertEqual(self.sample_data.provider_vip_dict, new_vip_dict) def test_vip_dict_to_provider_dict_partial(self): new_vip_dict = utils.vip_dict_to_provider_dict( {'ip_address': '192.0.2.44'}) self.assertEqual({'vip_address': '192.0.2.44'}, new_vip_dict) def test_provider_vip_dict_to_vip_obj(self): new_provider_vip = utils.provider_vip_dict_to_vip_obj( self.sample_data.provider_vip_dict) self.assertEqual(self.sample_data.db_vip, new_provider_vip) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/hooks/0000775000175000017500000000000000000000000020635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/hooks/__init__.py0000664000175000017500000000107400000000000022750 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/hooks/test_query_parameters.py0000664000175000017500000002161500000000000025643 0ustar00zuulzuul00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils from octavia.api.common import pagination from octavia.common import exceptions from octavia.db import models from octavia.tests.unit import base DEFAULT_SORTS = [('created_at', 'asc'), ('id', 'asc')] class TestPaginationHelper(base.TestCase): @mock.patch('octavia.api.common.pagination.request') def test_no_params(self, request_mock): params = {} helper = pagination.PaginationHelper(params) query_mock = mock.MagicMock() helper.apply(query_mock, models.LoadBalancer) self.assertEqual(DEFAULT_SORTS, helper.sort_keys) self.assertIsNone(helper.marker) self.assertEqual(1000, helper.limit) query_mock.order_by().order_by().limit.assert_called_with( 1000) def test_sort_empty(self): sort_params = "" params = {'sort': sort_params} act_params = pagination.PaginationHelper( params).sort_keys self.assertEqual([], act_params) def test_sort_none(self): sort_params = None params = {'sort': sort_params} act_params = pagination.PaginationHelper( params).sort_keys self.assertEqual([], act_params) def test_sort_key_dir(self): sort_keys = "key1,key2,key3" sort_dirs = "asc,desc" ref_sort_keys = [('key1', 'asc'), ('key2', 'desc'), ('key3', 'asc')] params = {'sort_key': sort_keys, 'sort_dir': sort_dirs} helper = pagination.PaginationHelper(params) self.assertEqual(ref_sort_keys, helper.sort_keys) def test_invalid_sorts(self): sort_params = "shoud_fail_exception:cause:of:this" params = {'sort': sort_params} self.assertRaises(exceptions.InvalidSortKey, pagination.PaginationHelper, params) sort_params = "ke1:asc,key2:InvalidDir,key3" params = {'sort': sort_params} self.assertRaises(exceptions.InvalidSortDirection, pagination.PaginationHelper, params) def test_marker(self): marker = 'random_uuid' params = {'marker': marker} helper = pagination.PaginationHelper(params) self.assertEqual(marker, helper.marker) @mock.patch('octavia.api.common.pagination.request') def test_limit(self, request_mock): limit = 100 params = {'limit': limit} helper = pagination.PaginationHelper(params) query_mock = mock.MagicMock() helper.apply(query_mock, models.LoadBalancer) query_mock.order_by().order_by().limit.assert_called_with( limit) @mock.patch('octavia.api.common.pagination.request') def test_filter_correct_params(self, request_mock): params = {'id': 'fake_id'} helper = pagination.PaginationHelper(params) query_mock = mock.MagicMock() helper.apply(query_mock, models.LoadBalancer) self.assertEqual(params, helper.filters) @mock.patch('octavia.api.common.pagination.request') def test_filter_mismatched_params(self, request_mock): params = { 'id': 'fake_id', 'fields': 'field', 'limit': '10', 'sort': None, } filters = {'id': 'fake_id'} helper = pagination.PaginationHelper(params) query_mock = mock.MagicMock() helper.apply(query_mock, models.LoadBalancer) self.assertEqual(filters, helper.filters) helper.apply(query_mock, models.LoadBalancer, enforce_valid_params=True) self.assertEqual(filters, helper.filters) @mock.patch('octavia.api.common.pagination.request') def test_filter_with_invalid_params(self, request_mock): params = {'id': 'fake_id', 'no_such_param': 'id'} filters = {'id': 'fake_id'} helper = pagination.PaginationHelper(params) query_mock = mock.MagicMock() helper.apply(query_mock, models.LoadBalancer, # silently ignore invalid parameter enforce_valid_params=False) self.assertEqual(filters, helper.filters) self.assertRaises( exceptions.InvalidFilterArgument, pagination.PaginationHelper.apply, helper, query_mock, models.Amphora, ) @mock.patch('octavia.api.common.pagination.request') def test_duplicate_argument(self, request_mock): params = {'loadbalacer_id': 'id1', 'load_balacer_id': 'id2'} query_mock = mock.MagicMock() helper = pagination.PaginationHelper(params) self.assertRaises( exceptions.InvalidFilterArgument, pagination.PaginationHelper.apply, helper, query_mock, models.Amphora, ) @mock.patch('octavia.api.common.pagination.request') def test_fields_not_passed(self, request_mock): params = {'fields': 'id'} helper = pagination.PaginationHelper(params) query_mock = mock.MagicMock() helper.apply(query_mock, models.LoadBalancer) self.assertEqual({}, helper.filters) @mock.patch('octavia.api.common.pagination.request') def test_make_links_next(self, request_mock): request_mock.path = "/lbaas/v2/pools/1/members" request_mock.path_url = "http://localhost" + request_mock.path member1 = models.Member() member1.id = uuidutils.generate_uuid() model_list = [member1] params = {'limit': 1} helper = pagination.PaginationHelper(params) links = helper._make_links(model_list) self.assertEqual(links[0].rel, "next") self.assertEqual( links[0].href, "{path_url}?limit={limit}&marker={marker}".format( path_url=request_mock.path_url, limit=params['limit'], marker=member1.id )) @mock.patch('octavia.api.common.pagination.request') def test_make_links_prev(self, request_mock): request_mock.path = "/lbaas/v2/pools/1/members" request_mock.path_url = "http://localhost" + request_mock.path member1 = models.Member() member1.id = uuidutils.generate_uuid() model_list = [member1] params = {'limit': 1, 'marker': member1.id} helper = pagination.PaginationHelper(params) links = helper._make_links(model_list) self.assertEqual(links[0].rel, "previous") self.assertEqual( links[1].href, "{path_url}?limit={limit}&marker={marker}".format( path_url=request_mock.path_url, limit=params['limit'], marker=member1.id)) self.assertEqual(links[1].rel, "next") self.assertEqual( links[1].href, "{path_url}?limit={limit}&marker={marker}".format( path_url=request_mock.path_url, limit=params['limit'], marker=member1.id)) @mock.patch('octavia.api.common.pagination.request') def test_make_links_with_configured_url(self, request_mock): request_mock.path = "/lbaas/v2/pools/1/members" request_mock.path_url = "http://localhost" + request_mock.path api_base_uri = "https://127.0.0.1" conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(group='api_settings', api_base_uri=api_base_uri) member1 = models.Member() member1.id = uuidutils.generate_uuid() model_list = [member1] params = {'limit': 1, 'marker': member1.id} helper = pagination.PaginationHelper(params) links = helper._make_links(model_list) self.assertEqual(links[0].rel, "previous") self.assertEqual( links[1].href, "{base_uri}{path}?limit={limit}&marker={marker}".format( base_uri=api_base_uri, path=request_mock.path, limit=params['limit'], marker=member1.id )) self.assertEqual(links[1].rel, "next") self.assertEqual( links[1].href, "{base_uri}{path}?limit={limit}&marker={marker}".format( base_uri=api_base_uri, path=request_mock.path, limit=params['limit'], marker=member1.id)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/v2/0000775000175000017500000000000000000000000020041 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/__init__.py0000664000175000017500000000107400000000000022154 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/api/v2/types/0000775000175000017500000000000000000000000021205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/__init__.py0000664000175000017500000000107400000000000023320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/base.py0000664000175000017500000002034600000000000022476 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from octavia.api.common import types as base_type from octavia.common import constants from octavia.tests.unit import base def build_body(mandatory_fields, extra_attributes): body = {} for key in mandatory_fields: body[key] = mandatory_fields[key] for key in extra_attributes: body[key] = extra_attributes[key] return body class BaseTypesTest(base.TestCase): _type = base_type.BaseType _mandatory_fields = {} class BaseTestUuid(base.TestCase): def assert_uuid_attr(self, attr): kwargs = {attr: uuidutils.generate_uuid()} self._type(**kwargs) def assert_uuid_attr_fail_with_integer(self, attr): kwargs = {attr: 1} self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_uuid_attr_fail_with_short_str(self, attr): kwargs = {attr: '12345'} self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_uuid_attr_fail_with_shorter_than_uuid(self, attr): kwargs = {attr: uuidutils.generate_uuid()[1:]} self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_uuid_attr_fail_with_longer_than_uuid(self, attr): kwargs = {attr: uuidutils.generate_uuid() + "0"} self.assertRaises(exc.InvalidInput, self._type, **kwargs) class BaseTestString(base.TestCase): def _default_min_max_lengths(self, min_length=None, max_length=None): if max_length is None: if min_length is None: max_length = 255 min_length = 2 else: max_length = min_length + 1 else: if min_length is None: min_length = max_length - 1 return min_length, max_length def assert_string_attr(self, attr, min_length=None, max_length=None): min_length, max_length = self._default_min_max_lengths(min_length, max_length) string_val = 'a' * (max_length - 1) kwargs = {attr: string_val} self._type(**kwargs) def assert_string_attr_min_length(self, attr, min_length): min_length, max_length = self._default_min_max_lengths(min_length) string_val = 'a' * (min_length - 1) kwargs = {attr: string_val} # No point in testing if min_length is <= 0 if min_length > 0: self.assertRaises(exc.InvalidInput, self._type, **kwargs) def assert_string_attr_max_length(self, attr, max_length=None): min_length, max_length = self._default_min_max_lengths(max_length) string_val = 'a' * (max_length + 1) kwargs = {attr: string_val} self.assertRaises(exc.InvalidInput, self._type, **kwargs) class BaseTestBool(base.TestCase): def assert_bool_attr(self, attr): kwargs = {attr: True} self.assertIsNotNone(self._type(**kwargs)) kwargs = {attr: False} self.assertIsNotNone(self._type(**kwargs)) def assert_bool_attr_non_bool(self, attr): kwargs = {attr: 'test'} self.assertRaises(exc.InvalidInput, self._type, **kwargs) class TestIdMixin(BaseTestUuid): id_attr = 'id' def test_id(self): self.assert_uuid_attr(self.id_attr) self.assert_uuid_attr_fail_with_integer(self.id_attr) self.assert_uuid_attr_fail_with_short_str(self.id_attr) self.assert_uuid_attr_fail_with_shorter_than_uuid(self.id_attr) self.assert_uuid_attr_fail_with_longer_than_uuid(self.id_attr) def test_id_readonly(self): body = build_body(self._mandatory_fields, {self.id_attr: uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestProjectIdMixin(BaseTestUuid): project_id_attr = 'project_id' def test_project_id(self): self.assert_uuid_attr(self.project_id_attr) self.assert_uuid_attr_fail_with_integer(self.project_id_attr) self.assert_uuid_attr_fail_with_short_str(self.project_id_attr) self.assert_uuid_attr_fail_with_shorter_than_uuid(self.project_id_attr) self.assert_uuid_attr_fail_with_longer_than_uuid(self.project_id_attr) def test_project_id_readonly(self): body = build_body(self._mandatory_fields, {self.project_id_attr: uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestNameMixin(BaseTestString): name_attr = 'name' def test_name(self): self.assert_string_attr(self.name_attr, min_length=0, max_length=255) self.assert_string_attr_min_length(self.name_attr, 0) self.assert_string_attr_max_length(self.name_attr, 255) def test_editable_name(self): name = "Name" body = build_body(self._mandatory_fields, {self.name_attr: name}) type_instance = wsme_json.fromjson(self._type, body) self.assertEqual(name, type_instance.name) class TestDescriptionMixin(BaseTestString): description_attr = 'description' def test_description(self): self.assert_string_attr(self.description_attr, min_length=0, max_length=255) self.assert_string_attr_min_length(self.description_attr, 0) self.assert_string_attr_max_length(self.description_attr, 255) def test_editable_description(self): description = "Description" body = build_body(self._mandatory_fields, {self.description_attr: description}) type_instance = wsme_json.fromjson(self._type, body) self.assertEqual(description, type_instance.description) class TestEnabledMixin(BaseTestBool): enabled_attr = 'enabled' def test_enabled(self): self.assert_bool_attr(self.enabled_attr) self.assert_bool_attr_non_bool(self.enabled_attr) def test_default_enabled_true(self): body = build_body(self._mandatory_fields, {}) type_instance = wsme_json.fromjson(self._type, body) self.assertTrue(type_instance.enabled) def test_editable_enabled(self): body = build_body(self._mandatory_fields, {"enabled": False}) type_instance = wsme_json.fromjson(self._type, body) self.assertFalse(type_instance.enabled) class TestProvisioningStatusMixin(BaseTestString): provisioning_attr = 'provisioning_status' def test_provisioning_status(self): self.assert_string_attr(self.provisioning_attr, min_length=0, max_length=16) self.assert_string_attr_min_length(self.provisioning_attr, 0) self.assert_string_attr_max_length(self.provisioning_attr, 16) def test_provisioning_status_readonly(self): status = constants.ACTIVE body = build_body(self._mandatory_fields, {self.provisioning_attr: status}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestOperatingStatusMixin(BaseTestString): operating_attr = 'operating_status' def test_operating_status(self): self.assert_string_attr(self.operating_attr, min_length=0, max_length=16) self.assert_string_attr_min_length(self.operating_attr, 0) self.assert_string_attr_max_length(self.operating_attr, 16) def test_operating_status_readonly(self): status = constants.ONLINE body = build_body(self._mandatory_fields, {self.operating_attr: status}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_availability_zone_profiles.py0000664000175000017500000000516600000000000030236 0ustar00zuulzuul00000000000000# Copyright 2019 Verizon Media # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import exc from wsme.rest import json as wsme_json from octavia.api.v2.types import availability_zone_profile as azp_type from octavia.common import constants from octavia.tests.unit.api.common import base class TestAvailabilityZoneProfile(object): _type = None def test_availability_zone_profile(self): body = {"name": "test_name", "provider_name": "test1", constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} availability_zone = wsme_json.fromjson(self._type, body) self.assertEqual(availability_zone.name, body["name"]) def test_invalid_name(self): body = {"name": 0} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_name_length(self): body = {"name": "x" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_provider_name_length(self): body = {"name": "x" * 250, "provider_name": "X" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_name_mandatory(self): body = {"provider_name": "test1", constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_provider_name_mandatory(self): body = {"name": "test_name", constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_meta_mandatory(self): body = {"name": "test_name", "provider_name": "test1"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestAvailabilityZoneProfilePOST(base.BaseTypesTest, TestAvailabilityZoneProfile): _type = azp_type.AvailabilityZoneProfilePOST ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_availability_zones.py0000664000175000017500000000672300000000000026516 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from octavia.api.v2.types import availability_zones as availability_zone_type from octavia.tests.unit.api.common import base class TestAvailabilityZone(object): _type = None def test_availability_zone(self): body = {"name": "test_name", "description": "test_description", "availability_zone_profile_id": uuidutils.generate_uuid()} availability_zone = wsme_json.fromjson(self._type, body) self.assertTrue(availability_zone.enabled) def test_invalid_name(self): body = {"name": 0, "availability_zone_profile_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_name_length(self): body = {"name": "x" * 256, "availability_zone_profile_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_description(self): body = {"availability_zone_profile_id": uuidutils.generate_uuid(), "description": 0, "name": "test"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_description_length(self): body = {"name": "x" * 250, "availability_zone_profile_id": uuidutils.generate_uuid(), "description": "0" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_enabled(self): body = {"name": "test_name", "availability_zone_profile_id": uuidutils.generate_uuid(), "enabled": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_name_mandatory(self): body = {"description": "xyz", "availability_zone_profile_id": uuidutils.generate_uuid(), "enabled": True} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_availability_zone_profile_id_mandatory(self): body = {"name": "test_name"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestAvailabilityZonePOST(base.BaseTypesTest, TestAvailabilityZone): _type = availability_zone_type.AvailabilityZonePOST def test_non_uuid_project_id(self): body = {"name": "test_name", "description": "test_description", "availability_zone_profile_id": uuidutils.generate_uuid()} lb = wsme_json.fromjson(self._type, body) self.assertEqual(lb.availability_zone_profile_id, body['availability_zone_profile_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_flavor_profiles.py0000664000175000017500000000473700000000000026025 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import exc from wsme.rest import json as wsme_json from octavia.api.v2.types import flavor_profile as fp_type from octavia.common import constants from octavia.tests.unit.api.common import base class TestFlavorProfile(object): _type = None def test_flavor_profile(self): body = {"name": "test_name", "provider_name": "test1", constants.FLAVOR_DATA: '{"hello": "world"}'} flavor = wsme_json.fromjson(self._type, body) self.assertEqual(flavor.name, body["name"]) def test_invalid_name(self): body = {"name": 0} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_name_length(self): body = {"name": "x" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_provider_name_length(self): body = {"name": "x" * 250, "provider_name": "X" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_name_mandatory(self): body = {"provider_name": "test1", constants.FLAVOR_DATA: '{"hello": "world"}'} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_provider_name_mandatory(self): body = {"name": "test_name", constants.FLAVOR_DATA: '{"hello": "world"}'} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_meta_mandatory(self): body = {"name": "test_name", "provider_name": "test1"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestFlavorProfilePOST(base.BaseTypesTest, TestFlavorProfile): _type = fp_type.FlavorProfilePOST ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_flavors.py0000664000175000017500000000630700000000000024300 0ustar00zuulzuul00000000000000# Copyright 2017 Walmart Stores Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from octavia.api.v2.types import flavors as flavor_type from octavia.tests.unit.api.common import base class TestFlavor(object): _type = None def test_flavor(self): body = {"name": "test_name", "description": "test_description", "flavor_profile_id": uuidutils.generate_uuid()} flavor = wsme_json.fromjson(self._type, body) self.assertTrue(flavor.enabled) def test_invalid_name(self): body = {"name": 0, "flavor_profile_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_name_length(self): body = {"name": "x" * 256, "flavor_profile_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_description(self): body = {"flavor_profile_id": uuidutils.generate_uuid(), "description": 0, "name": "test"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_description_length(self): body = {"name": "x" * 250, "flavor_profile_id": uuidutils.generate_uuid(), "description": "0" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_enabled(self): body = {"name": "test_name", "flavor_profile_id": uuidutils.generate_uuid(), "enabled": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_name_mandatory(self): body = {"description": "xyz", "flavor_profile_id": uuidutils.generate_uuid(), "enabled": True} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_flavor_profile_id_mandatory(self): body = {"name": "test_name"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestFlavorPOST(base.BaseTypesTest, TestFlavor): _type = flavor_type.FlavorPOST def test_non_uuid_project_id(self): body = {"name": "test_name", "description": "test_description", "flavor_profile_id": uuidutils.generate_uuid()} lb = wsme_json.fromjson(self._type, body) self.assertEqual(lb.flavor_profile_id, body['flavor_profile_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_health_monitors.py0000664000175000017500000002224400000000000026021 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from wsme import types as wsme_types from octavia.api.v2.types import health_monitor as hm_type from octavia.common import constants from octavia.tests.unit.api.v2.types import base class TestHealthMonitor(object): _type = None def test_invalid_type(self): body = {"delay": 1, "timeout": 1, "max_retries": 1} if self._type is hm_type.HealthMonitorPOST: body.update({"type": 1, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_delay(self): body = {"delay": "one", "timeout": 1, "max_retries": 1} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_timeout(self): body = {"delay": 1, "timeout": "one", "max_retries": 1} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_max_retries_down(self): body = {"delay": 1, "timeout": 1, "max_retries": "one"} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_max_retries(self): body = {"delay": 1, "timeout": 1, "max_retries": "one"} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_http_method(self): body = {"delay": 1, "timeout": 1, "max_retries": 1, "http_method": 1} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_url_path(self): body = {"delay": 1, "timeout": 1, "max_retries": 1, "url_path": 1} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_url_path_with_url(self): body = {"delay": 1, "timeout": 1, "max_retries": 1, "url_path": 'https://www.openstack.org'} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_url_path_no_leading_slash(self): body = {"delay": 1, "timeout": 1, "max_retries": 1, "url_path": 'blah'} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_expected_codes(self): body = {"delay": 1, "timeout": 1, "max_retries": 1, "expected_codes": "lol"} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"tags": "invalid_tag"} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid(), "delay": 1, "timeout": 1, "max_retries": 1}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"tags": [1, 2]} if self._type is hm_type.HealthMonitorPOST: body.update({"type": constants.PROTOCOL_HTTP, "pool_id": uuidutils.generate_uuid(), "delay": 1, "timeout": 1, "max_retries": 1}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestHealthMonitorPOST(base.BaseTypesTest, TestHealthMonitor): _type = hm_type.HealthMonitorPOST def test_health_monitor(self): body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, "timeout": 1, "max_retries_down": 1, "max_retries": 1, "pool_id": uuidutils.generate_uuid(), "tags": ['test_tag']} hm = wsme_json.fromjson(self._type, body) self.assertTrue(hm.admin_state_up) def test_type_mandatory(self): body = {"delay": 80, "timeout": 1, "max_retries": 1, "pool_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_delay_mandatory(self): body = {"type": constants.HEALTH_MONITOR_HTTP, "timeout": 1, "max_retries": 1, "pool_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_timeout_mandatory(self): body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, "max_retries": 1, "pool_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_max_retries_mandatory(self): body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, "timeout": 1, "pool_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_default_health_monitor_values(self): # http_method = 'GET' # url_path = '/' # expected_codes = '200' # max_retries_down = 3 # admin_state_up = True # The above are not required but should have the above example defaults body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, "timeout": 1, "max_retries": 1, "pool_id": uuidutils.generate_uuid()} hmpost = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, hmpost.http_method) self.assertEqual(wsme_types.Unset, hmpost.url_path) self.assertEqual(wsme_types.Unset, hmpost.expected_codes) self.assertEqual(3, hmpost.max_retries_down) self.assertTrue(hmpost.admin_state_up) def test_url_path_with_query_and_fragment(self): url_path = "/v2/index?a=12,b=34#123dd" body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, "timeout": 1, "max_retries": 1, "pool_id": uuidutils.generate_uuid(), "url_path": url_path} hmpost = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, hmpost.http_method) self.assertEqual(url_path, hmpost.url_path) self.assertEqual(wsme_types.Unset, hmpost.expected_codes) self.assertEqual(3, hmpost.max_retries_down) self.assertTrue(hmpost.admin_state_up) def test_non_uuid_project_id(self): body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, "timeout": 1, "max_retries_down": 1, "max_retries": 1, "project_id": "non-uuid", "pool_id": uuidutils.generate_uuid()} hm = wsme_json.fromjson(self._type, body) self.assertEqual(hm.project_id, body['project_id']) class TestHealthMonitorPUT(base.BaseTypesTest, TestHealthMonitor): _type = hm_type.HealthMonitorPUT def test_health_monitor(self): body = {"http_method": constants.HEALTH_MONITOR_HTTP_METHOD_HEAD, "tags": ['test_tag']} hm = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, hm.admin_state_up) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_l7policies.py0000664000175000017500000001633400000000000024677 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from wsme import types as wsme_types from octavia.api.v2.types import l7policy as l7policy_type from octavia.common import constants from octavia.tests.unit.api.common import base class TestL7PolicyPOST(base.BaseTypesTest): _type = l7policy_type.L7PolicyPOST def setUp(self): super(TestL7PolicyPOST, self).setUp() self.listener_id = uuidutils.generate_uuid() def test_l7policy(self): body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "tags": ['test_tag']} l7policy = wsme_json.fromjson(self._type, body) self.assertEqual(self.listener_id, l7policy.listener_id) self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) self.assertEqual(wsme_types.Unset, l7policy.redirect_url) self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id) self.assertTrue(l7policy.admin_state_up) def test_action_mandatory(self): body = {"listener_id": self.listener_id} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_listener_id_mandatory(self): body = {"action": constants.L7POLICY_ACTION_REJECT} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_action(self): body = {"listener_id": self.listener_id, "action": "test"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_with_redirect_url(self): url = "http://www.example.com/" body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REDIRECT_TO_URL, "redirect_url": url} l7policy = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) self.assertEqual(url, l7policy.redirect_url) self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id) def test_invalid_position(self): body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "position": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_l7policy_min_position(self): body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "position": constants.MIN_POLICY_POSITION - 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "position": constants.MIN_POLICY_POSITION} l7policy = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position) def test_l7policy_max_position(self): body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "position": constants.MAX_POLICY_POSITION + 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "position": constants.MAX_POLICY_POSITION} l7policy = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) def test_invalid_admin_state_up(self): body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REJECT, "admin_state_up": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_url(self): body = {"listener_id": self.listener_id, "action": constants.L7POLICY_ACTION_REDIRECT_TO_URL, "redirect_url": "notvalid"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestL7PolicyPUT(base.BaseTypesTest): _type = l7policy_type.L7PolicyPUT def test_l7policy(self): body = {"action": constants.L7POLICY_ACTION_REJECT, "position": constants.MIN_POLICY_POSITION, "tags": ['test_tag']} l7policy = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position) self.assertEqual(wsme_types.Unset, l7policy.redirect_url) self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id) def test_l7policy_min_position(self): body = {"position": constants.MIN_POLICY_POSITION - 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"position": constants.MIN_POLICY_POSITION} l7policy = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position) def test_l7policy_max_position(self): body = {"position": constants.MAX_POLICY_POSITION + 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"position": constants.MAX_POLICY_POSITION} l7policy = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) def test_invalid_position(self): body = {"position": "test"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_action(self): body = {"action": "test"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_l7rules.py0000664000175000017500000001445000000000000024217 0ustar00zuulzuul00000000000000# Copyright 2016 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import exc from wsme.rest import json as wsme_json from wsme import types as wsme_types from octavia.api.v2.types import l7rule as l7rule_type from octavia.common import constants from octavia.tests.unit.api.common import base class TestL7RulePOST(base.BaseTypesTest): _type = l7rule_type.L7RulePOST def test_l7rule(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api", "tags": ['test_tag']} l7rule = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, l7rule.key) self.assertFalse(l7rule.invert) self.assertTrue(l7rule.admin_state_up) def test_type_mandatory(self): body = {"compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_compare_type_mandatory(self): body = {"type": constants.L7RULE_TYPE_PATH, "value": "/api"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_value_mandatory(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_type(self): body = {"type": "notvalid", "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_compare_type(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": "notvalid", "value": "/api"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_value(self): body = {"type": "notvalid", "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": 123} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_invert(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api", "invert": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_admin_state_up(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api", "admin_state_up": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_key(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api", "key": 123} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api", "tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api", "tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestL7RulePUT(base.BaseTypesTest): _type = l7rule_type.L7RulePUT def test_l7rule(self): body = {"type": constants.L7RULE_TYPE_PATH, "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, "value": "/api", "tags": ['test_tag']} l7rule = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, l7rule.key) self.assertFalse(l7rule.invert) def test_invalid_type(self): body = {"type": "notvalid"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_compare_type(self): body = {"compare_type": "notvalid"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_value(self): body = {"value": 123} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_invert(self): body = {"invert": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_admin_state_up(self): body = {"admin_state_up": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_key(self): body = {"key": 123} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_listeners.py0000664000175000017500000001507100000000000024632 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from wsme import types as wsme_types from octavia.api.v2.types import listener as lis_type from octavia.common import constants from octavia.tests.unit.api.common import base class TestListener(object): _type = None def test_invalid_name(self): body = {"name": 0} if self._type is lis_type.ListenerPOST: body.update({"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_description(self): body = {"description": 0} if self._type is lis_type.ListenerPOST: body.update({"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_admin_state_up(self): body = {"admin_state_up": "notvalid"} if self._type is lis_type.ListenerPOST: body.update({"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_connection_limit(self): body = {"connection_limit": "test"} if self._type is lis_type.ListenerPOST: body.update({"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"tags": "invalid_tag"} if self._type is lis_type.ListenerPOST: body.update({"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"tags": [1, 2]} if self._type is lis_type.ListenerPOST: body.update({"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestListenerPOST(base.BaseTypesTest, TestListener): _type = lis_type.ListenerPOST def test_listener(self): body = {"name": "test", "description": "test", "connection_limit": 10, "protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "default_pool_id": uuidutils.generate_uuid(), "loadbalancer_id": uuidutils.generate_uuid(), "tags": ['test_tag']} listener = wsme_json.fromjson(self._type, body) self.assertTrue(listener.admin_state_up) def test_protocol_mandatory(self): body = {"protocol_port": 80, "loadbalancer_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_protocol_port_mandatory(self): body = {"protocol": constants.PROTOCOL_HTTP, "loadbalancer_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_protocol(self): body = {"protocol": "http", "protocol_port": 80} if self._type is lis_type.ListenerPOST: body.update({"loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_protocol_port(self): body = {"protocol": constants.PROTOCOL_HTTP, "protocol_port": "test"} if self._type is lis_type.ListenerPOST: body.update({"loadbalancer_id": uuidutils.generate_uuid()}) self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_loadbalancer_id_mandatory(self): body = {"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_loadbalancer_id(self): body = {"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "loadbalancer_id": "a"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_non_uuid_project_id(self): body = {"name": "test", "description": "test", "connection_limit": 10, "protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, "default_pool_id": uuidutils.generate_uuid(), "loadbalancer_id": uuidutils.generate_uuid(), "project_id": "non-uuid"} listener = wsme_json.fromjson(self._type, body) self.assertEqual(listener.project_id, body['project_id']) class TestListenerPUT(base.BaseTypesTest, TestListener): _type = lis_type.ListenerPUT def test_listener(self): body = {"name": "test", "description": "test", "connection_limit": 10, "default_tls_container_ref": uuidutils.generate_uuid(), "sni_container_refs": [uuidutils.generate_uuid(), uuidutils.generate_uuid()], "default_pool_id": uuidutils.generate_uuid(), "insert_headers": {"a": "1", "b": "2"}, "tags": ['test_tag']} listener = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, listener.admin_state_up) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_load_balancers.py0000664000175000017500000001022100000000000025543 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from wsme import types as wsme_types from octavia.api.v2.types import load_balancer as lb_type from octavia.tests.unit.api.common import base class TestLoadBalancer(object): _type = None def test_load_balancer(self): body = {"name": "test_name", "description": "test_description", "vip_subnet_id": uuidutils.generate_uuid(), "tags": ['test']} lb = wsme_json.fromjson(self._type, body) self.assertTrue(lb.admin_state_up) def test_invalid_name(self): body = {"name": 0} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_name_length(self): body = {"name": "x" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_description(self): body = {"description": 0} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_description_length(self): body = {"name": "x" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_enabled(self): body = {"admin_state_up": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_qos_policy_id(self): body = {"vip_qos_policy_id": "invalid_uuid"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestLoadBalancerPOST(base.BaseTypesTest, TestLoadBalancer): _type = lb_type.LoadBalancerPOST def test_non_uuid_project_id(self): body = {"name": "test_name", "description": "test_description", "vip_subnet_id": uuidutils.generate_uuid(), "project_id": "non-uuid"} lb = wsme_json.fromjson(self._type, body) self.assertEqual(lb.project_id, body['project_id']) def test_vip(self): body = {"vip_subnet_id": uuidutils.generate_uuid(), "vip_port_id": uuidutils.generate_uuid(), "vip_qos_policy_id": uuidutils.generate_uuid()} wsme_json.fromjson(self._type, body) def test_invalid_ip_address(self): body = {"vip_address": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_port_id(self): body = {"vip_port_id": "invalid_uuid"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_subnet_id(self): body = {"vip_subnet_id": "invalid_uuid"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestLoadBalancerPUT(base.BaseTypesTest, TestLoadBalancer): _type = lb_type.LoadBalancerPUT def test_load_balancer(self): body = {"name": "test_name", "description": "test_description", "tags": ['test_tag']} lb = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, lb.admin_state_up) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_members.py0000664000175000017500000001432200000000000024252 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import exc from wsme.rest import json as wsme_json from wsme import types as wsme_types from octavia.api.v2.types import member as member_type from octavia.common import constants from octavia.tests.unit.api.v2.types import base class TestMemberPOST(base.BaseTypesTest): _type = member_type.MemberPOST def test_member(self): body = {"name": "member1", "address": "10.0.0.1", "protocol_port": 80, "tags": ['test_tag']} member = wsme_json.fromjson(self._type, body) self.assertTrue(member.admin_state_up) self.assertEqual(1, member.weight) self.assertEqual(wsme_types.Unset, member.subnet_id) def test_address_mandatory(self): body = {} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_protocol_mandatory(self): body = {} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_address(self): body = {"address": "test", "protocol_port": 443} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_subnet_id(self): body = {"address": "10.0.0.1", "protocol_port": 443, "subnet_id": "invalid_uuid"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_admin_state_up(self): body = {"address": "10.0.0.1", "protocol_port": 443, "admin_state_up": "notvalid"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_protocol_port(self): body = {"address": "10.0.0.1", "protocol_port": "test"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_weight(self): body = {"address": "10.0.0.1", "protocol_port": 443, "weight": "test"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"address": "10.0.0.1", "protocol_port": 443, "tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"address": "10.0.0.1", "protocol_port": 443, "tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_min_weight(self): body = {"address": "10.0.0.1", "protocol_port": 443, "weight": constants.MIN_WEIGHT - 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"address": "10.0.0.1", "protocol_port": 443, "weight": constants.MIN_WEIGHT} member = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MIN_WEIGHT, member.weight) def test_max_weight(self): body = {"address": "10.0.0.1", "protocol_port": 443, "weight": constants.MAX_WEIGHT + 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"address": "10.0.0.1", "protocol_port": 443, "weight": constants.MAX_WEIGHT} member = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MAX_WEIGHT, member.weight) def test_non_uuid_project_id(self): body = {"address": "10.0.0.1", "protocol_port": 80, "project_id": "non-uuid"} member = wsme_json.fromjson(self._type, body) self.assertEqual(member.project_id, body['project_id']) class TestMemberPUT(base.BaseTypesTest): _type = member_type.MemberPUT def test_member(self): body = {"name": "new_name", "tags": ['new_tag']} member = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, member.weight) self.assertEqual(wsme_types.Unset, member.admin_state_up) def test_member_full(self): name = "new_name" weight = 1 admin_state = True body = {"name": name, "weight": weight, "admin_state_up": admin_state} member = wsme_json.fromjson(self._type, body) self.assertEqual(name, member.name) self.assertEqual(weight, member.weight) self.assertEqual(admin_state, member.admin_state_up) def test_invalid_admin_state(self): body = {"admin_state_up": "test"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_weight(self): body = {"weight": "test"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_min_weight(self): body = {"weight": constants.MIN_WEIGHT - 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"weight": constants.MIN_WEIGHT} member = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MIN_WEIGHT, member.weight) def test_max_weight(self): body = {"weight": constants.MAX_WEIGHT + 1} self.assertRaises( exc.InvalidInput, wsme_json.fromjson, self._type, body) body = {"weight": constants.MAX_WEIGHT} member = wsme_json.fromjson(self._type, body) self.assertEqual(constants.MAX_WEIGHT, member.weight) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/api/v2/types/test_pools.py0000664000175000017500000001740100000000000023755 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from wsme import exc from wsme.rest import json as wsme_json from wsme import types as wsme_types from octavia.api.v2.types import pool as pool_type from octavia.common import constants from octavia.tests.unit.api.common import base class TestSessionPersistence(object): _type = None def test_session_persistence(self): body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE} sp = wsme_json.fromjson(self._type, body) self.assertIsNotNone(sp.type) def test_invalid_type(self): body = {"type": "source_ip"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_cookie_name(self): body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, "cookie_name": 10} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestPoolPOST(base.BaseTypesTest): _type = pool_type.PoolPOST def test_pool(self): body = { "loadbalancer_id": uuidutils.generate_uuid(), "listener_id": uuidutils.generate_uuid(), "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, "tags": ['test_tag']} pool = wsme_json.fromjson(self._type, body) self.assertTrue(pool.admin_state_up) def test_load_balancer_mandatory(self): body = {"loadbalancer_id": uuidutils.generate_uuid()} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_protocol_mandatory(self): body = {"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_lb_algorithm_mandatory(self): body = {"protocol": constants.PROTOCOL_HTTP} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_name(self): body = {"name": 10, "loadbalancer_id": uuidutils.generate_uuid(), "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_description(self): body = {"description": 10, "loadbalancer_id": uuidutils.generate_uuid(), "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_too_long_name(self): body = {"name": "n" * 256, "loadbalancer_id": uuidutils.generate_uuid(), "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_too_long_description(self): body = {"description": "d" * 256, "loadbalancer_id": uuidutils.generate_uuid(), "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_load_balacer_id(self): body = {"loadbalancer_id": 10, "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_protocol(self): body = {"loadbalancer_id": uuidutils.generate_uuid(), "protocol": "http", "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_lb_algorithm(self): body = {"loadbalancer_id": uuidutils.generate_uuid(), "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": "source_ip"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_non_uuid_project_id(self): body = {"loadbalancer_id": uuidutils.generate_uuid(), "protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, "project_id": "non-uuid"} pool = wsme_json.fromjson(self._type, body) self.assertEqual(pool.project_id, body['project_id']) def test_invalid_tags(self): body = {"protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, "tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"protocol": constants.PROTOCOL_HTTP, "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, "tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestPoolPUT(base.BaseTypesTest): _type = pool_type.PoolPUT def test_pool(self): body = {"name": "test_name", "tags": ['new_tag']} pool = wsme_json.fromjson(self._type, body) self.assertEqual(wsme_types.Unset, pool.admin_state_up) def test_invalid_name(self): body = {"name": 10} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_too_long_name(self): body = {"name": "n" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_too_long_description(self): body = {"description": "d" * 256} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_description(self): body = {"description": 10} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_lb_algorithm(self): body = {"lb_algorithm": "source_ip"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) def test_invalid_tags(self): body = {"tags": "invalid_tag"} self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) body = {"tags": [1, 2]} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestSessionPersistencePOST(base.BaseTypesTest, TestSessionPersistence): _type = pool_type.SessionPersistencePOST def test_type_mandatory(self): body = {"cookie_name": "test_name"} self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, body) class TestSessionPersistencePUT(base.BaseTypesTest, TestSessionPersistence): _type = pool_type.SessionPersistencePUT ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/base.py0000664000175000017500000000431300000000000020226 0ustar00zuulzuul00000000000000# Copyright 2014, Doug Wiegley, A10 Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging import conffixture as messaging_conffixture import testtools from octavia.common import clients from octavia.common import rpc # needed for tests to function when run independently: from octavia.common import config # noqa: F401 class TestCase(testtools.TestCase): def setUp(self): super(TestCase, self).setUp() config.register_cli_opts() self.addCleanup(mock.patch.stopall) self.addCleanup(self.clean_caches) def clean_caches(self): clients.NovaAuth.nova_client = None clients.NeutronAuth.neutron_client = None class TestRpc(testtools.TestCase): def __init__(self, *args, **kwargs): super(TestRpc, self).__init__(*args, **kwargs) self._buses = {} def _fake_create_transport(self, url): if url not in self._buses: self._buses[url] = messaging.get_rpc_transport( cfg.CONF, url=url) return self._buses[url] def setUp(self): super(TestRpc, self).setUp() self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(cfg.CONF) self.messaging_conf.transport_url = 'fake:/' self.useFixture(self.messaging_conf) self.useFixture(fixtures.MonkeyPatch( 'octavia.common.rpc.create_transport', self._fake_create_transport)) with mock.patch('octavia.common.rpc.get_transport_url') as mock_gtu: mock_gtu.return_value = None rpc.init() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/certificates/0000775000175000017500000000000000000000000021406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/__init__.py0000664000175000017500000000107400000000000023521 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/certificates/common/0000775000175000017500000000000000000000000022676 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/common/__init__.py0000664000175000017500000000107400000000000025011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/certificates/common/auth/0000775000175000017500000000000000000000000023637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/common/auth/__init__.py0000664000175000017500000000107400000000000025752 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/common/auth/test_barbican_acl.py0000664000175000017500000001011500000000000027626 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from barbicanclient.v1 import acls from oslo_config import cfg from oslo_config import fixture as oslo_fixture import octavia.certificates.common.auth.barbican_acl as barbican_acl import octavia.certificates.manager.barbican as barbican_cert_mgr from octavia.common import keystone import octavia.tests.unit.base as base CONF = cfg.CONF class TestBarbicanACLAuth(base.TestCase): def setUp(self): super(TestBarbicanACLAuth, self).setUp() # Reset the client keystone._SESSION = None self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) self.conf.config(group="certificates", region_name='RegionOne') self.conf.config(group="certificates", endpoint_type='publicURL') @mock.patch('keystoneauth1.session.Session', mock.Mock()) def test_get_barbican_client(self): # Mock out the keystone session and get the client acl_auth_object = barbican_acl.BarbicanACLAuth() bc1 = acl_auth_object.get_barbican_client() # Our returned object should have elements that proves it is a real # Barbican client object. We shouldn't use `isinstance` because that's # an evil pattern, instead we can check for very unique things in the # stable client API like "register_consumer", since this should fairly # reliably prove we're dealing with a Barbican client. self.assertTrue(hasattr(bc1, 'containers') and hasattr(bc1.containers, 'register_consumer')) # Getting the session again with new class should get the same object acl_auth_object2 = barbican_acl.BarbicanACLAuth() bc2 = acl_auth_object2.get_barbican_client() self.assertIs(bc1, bc2) def test_load_auth_driver(self): bcm = barbican_cert_mgr.BarbicanCertManager() self.assertIsInstance(bcm.auth, barbican_acl.BarbicanACLAuth) @mock.patch('barbicanclient.v1.acls.ACLManager.get') @mock.patch('octavia.common.keystone.KeystoneSession') def test_ensure_secret_access(self, mock_ksession, mock_aclm): acl = mock.MagicMock(spec=acls.SecretACL) mock_aclm.return_value = acl acl_auth_object = barbican_acl.BarbicanACLAuth() acl_auth_object.ensure_secret_access(mock.Mock(), mock.Mock()) acl.submit.assert_called_once() @mock.patch('barbicanclient.v1.acls.ACLManager.get') @mock.patch('octavia.common.keystone.KeystoneSession') def test_revoke_secret_access(self, mock_ksession, mock_aclm): service_user_id = 'uuid1' mock_ksession().get_service_user_id.return_value = service_user_id acl = mock.MagicMock(spec=acls.SecretACL) poacl = mock.MagicMock(spec=acls._PerOperationACL) type(poacl).users = mock.PropertyMock(return_value=[service_user_id]) acl.get.return_value = poacl mock_aclm.return_value = acl acl_auth_object = barbican_acl.BarbicanACLAuth() acl_auth_object.revoke_secret_access(mock.Mock(), mock.Mock()) acl.submit.assert_called_once() @mock.patch('octavia.common.keystone.KeystoneSession') def test_get_barbican_client_user_auth(self, mock_ksession): acl_auth_object = barbican_acl.BarbicanACLAuth() bc = acl_auth_object.get_barbican_client_user_auth(mock.Mock()) self.assertTrue(hasattr(bc, 'containers') and hasattr(bc.containers, 'register_consumer')) self.assertEqual('publicURL', bc.client.interface) self.assertEqual('RegionOne', bc.client.region_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/common/test_barbican.py0000664000175000017500000000755700000000000026066 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from barbicanclient.v1 import containers from barbicanclient.v1 import secrets import octavia.certificates.common.barbican as barbican_common from octavia.common import utils as octavia_utils import octavia.tests.common.sample_certs as sample import octavia.tests.unit.base as base class TestBarbicanCert(base.TestCase): def _prepare(self): self.certificate_secret = secrets.Secret( api=mock.MagicMock(), payload=self.certificate ) self.intermediates_secret = secrets.Secret( api=mock.MagicMock(), payload=sample.X509_IMDS ) self.private_key_secret = secrets.Secret( api=mock.MagicMock(), payload=self.private_key ) self.private_key_passphrase_secret = secrets.Secret( api=mock.MagicMock(), payload=self.private_key_passphrase ) def test_barbican_cert(self): # Certificate data self.certificate = bytes(sample.X509_CERT) self.intermediates = sample.X509_IMDS_LIST self.private_key = bytes(sample.X509_CERT_KEY_ENCRYPTED) self.private_key_passphrase = sample.X509_CERT_KEY_PASSPHRASE self._prepare() container = containers.CertificateContainer( api=mock.MagicMock(), certificate=self.certificate_secret, intermediates=self.intermediates_secret, private_key=self.private_key_secret, private_key_passphrase=self.private_key_passphrase_secret ) # Create a cert cert = barbican_common.BarbicanCert( cert_container=container ) # Validate the cert functions self.assertEqual(cert.get_certificate(), sample.X509_CERT) self.assertEqual(cert.get_intermediates(), sample.X509_IMDS_LIST) self.assertEqual(cert.get_private_key(), sample.X509_CERT_KEY_ENCRYPTED) self.assertEqual(cert.get_private_key_passphrase(), octavia_utils.b(sample.X509_CERT_KEY_PASSPHRASE)) def test_barbican_cert_text(self): # Certificate data self.certificate = str(sample.X509_CERT) self.intermediates = str(sample.X509_IMDS_LIST) self.private_key = str(sample.X509_CERT_KEY_ENCRYPTED) self.private_key_passphrase = str(sample.X509_CERT_KEY_PASSPHRASE) self._prepare() container = containers.CertificateContainer( api=mock.MagicMock(), certificate=self.certificate_secret, intermediates=self.intermediates_secret, private_key=self.private_key_secret, private_key_passphrase=self.private_key_passphrase_secret ) # Create a cert cert = barbican_common.BarbicanCert( cert_container=container ) # Validate the cert functions self.assertEqual(cert.get_certificate(), octavia_utils.b(str(sample.X509_CERT))) self.assertEqual(cert.get_intermediates(), sample.X509_IMDS_LIST) self.assertEqual(cert.get_private_key(), octavia_utils.b(str( sample.X509_CERT_KEY_ENCRYPTED))) self.assertEqual(cert.get_private_key_passphrase(), octavia_utils.b(sample.X509_CERT_KEY_PASSPHRASE)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/common/test_local.py0000664000175000017500000000320300000000000025377 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace US, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import octavia.certificates.common.local as local_cert import octavia.tests.unit.base as base class TestLocalCommon(base.TestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" super(TestLocalCommon, self).setUp() def test_local_cert(self): # Create a cert cert = local_cert.LocalCert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4182167 octavia-6.2.2/octavia/tests/unit/certificates/generator/0000775000175000017500000000000000000000000023374 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/generator/__init__.py0000664000175000017500000000107400000000000025507 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/generator/local_csr.py0000664000175000017500000001021100000000000025702 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cryptography.hazmat import backends from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 import octavia.tests.unit.base as base class BaseLocalCSRTestCase(base.TestCase): def setUp(self): self.signing_digest = "sha256" # Set up CSR data csr_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=backends.default_backend() ) csr = x509.CertificateSigningRequestBuilder().subject_name( x509.Name([ x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, u"test"), ])).sign(csr_key, hashes.SHA256(), backends.default_backend()) self.certificate_signing_request = csr.public_bytes( serialization.Encoding.PEM) # Set up keys self.ca_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=backends.default_backend() ) self.ca_private_key_passphrase = b"Testing" self.ca_private_key = self.ca_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.BestAvailableEncryption( self.ca_private_key_passphrase), ) super(BaseLocalCSRTestCase, self).setUp() def test_generate_csr(self): cn = 'testCN' # Attempt to generate a CSR csr = self.cert_generator._generate_csr( cn=cn, private_key=self.ca_private_key, passphrase=self.ca_private_key_passphrase ) # Attempt to load the generated CSR csro = x509.load_pem_x509_csr(data=csr, backend=backends.default_backend()) # Make sure the CN is correct self.assertEqual(cn, csro.subject.get_attributes_for_oid( x509.oid.NameOID.COMMON_NAME)[0].value) def test_generate_private_key(self): bit_length = 1024 # Attempt to generate a private key pk = self.cert_generator._generate_private_key( bit_length=bit_length ) # Attempt to load the generated private key pko = serialization.load_pem_private_key( data=pk, password=None, backend=backends.default_backend()) # Make sure the bit_length is what we set self.assertEqual(pko.key_size, bit_length) def test_generate_private_key_with_passphrase(self): bit_length = 2048 # Attempt to generate a private key pk = self.cert_generator._generate_private_key( bit_length=bit_length, passphrase=self.ca_private_key_passphrase ) # Attempt to load the generated private key pko = serialization.load_pem_private_key( data=pk, password=self.ca_private_key_passphrase, backend=backends.default_backend()) # Make sure the bit_length is what we set self.assertEqual(pko.key_size, bit_length) def test_generate_cert_key_pair_mock(self): cn = 'testCN' with mock.patch.object(self.cert_generator, 'sign_cert') as m: # Attempt to generate a cert/key pair self.cert_generator.generate_cert_key_pair( cn=cn, validity=2 * 365 * 24 * 60 * 60, ) self.assertTrue(m.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/generator/test_local.py0000664000175000017500000001703600000000000026106 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace US, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from cryptography import exceptions as crypto_exceptions from cryptography.hazmat import backends from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 import octavia.certificates.generator.local as local_cert_gen from octavia.tests.unit.certificates.generator import local_csr class TestLocalGenerator(local_csr.BaseLocalCSRTestCase): def setUp(self): super(TestLocalGenerator, self).setUp() self.signing_digest = "sha256" # Setup CA data ca_cert = x509.CertificateBuilder() valid_from_datetime = datetime.datetime.utcnow() valid_until_datetime = (datetime.datetime.utcnow() + datetime.timedelta( seconds=2 * 365 * 24 * 60 * 60)) ca_cert = ca_cert.not_valid_before(valid_from_datetime) ca_cert = ca_cert.not_valid_after(valid_until_datetime) ca_cert = ca_cert.serial_number(1) subject_name = x509.Name([ x509.NameAttribute(x509.oid.NameOID.COUNTRY_NAME, u"US"), x509.NameAttribute(x509.oid.NameOID.STATE_OR_PROVINCE_NAME, u"Oregon"), x509.NameAttribute(x509.oid.NameOID.LOCALITY_NAME, u"Springfield"), x509.NameAttribute(x509.oid.NameOID.ORGANIZATION_NAME, u"Springfield Nuclear Power Plant"), x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, u"maggie1"), ]) ca_cert = ca_cert.subject_name(subject_name) ca_cert = ca_cert.issuer_name(subject_name) ca_cert = ca_cert.public_key(self.ca_key.public_key()) signed_cert = ca_cert.sign(private_key=self.ca_key, algorithm=hashes.SHA256(), backend=backends.default_backend()) self.ca_certificate = signed_cert.public_bytes( encoding=serialization.Encoding.PEM) self.cert_generator = local_cert_gen.LocalCertGenerator def test_sign_cert(self): # Attempt sign a cert signed_cert = self.cert_generator.sign_cert( csr=self.certificate_signing_request, validity=2 * 365 * 24 * 60 * 60, ca_cert=self.ca_certificate, ca_key=self.ca_private_key, ca_key_pass=self.ca_private_key_passphrase, ca_digest=self.signing_digest ) self.assertIn("-----BEGIN CERTIFICATE-----", signed_cert.decode('ascii')) # Load the cert for specific tests cert = x509.load_pem_x509_certificate( data=signed_cert, backend=backends.default_backend()) # Make sure expiry time is accurate should_expire = (datetime.datetime.utcnow() + datetime.timedelta(seconds=2 * 365 * 24 * 60 * 60)) diff = should_expire - cert.not_valid_after self.assertLess(diff, datetime.timedelta(seconds=10)) # Make sure this is a version 3 X509. self.assertEqual('v3', cert.version.name) # Make sure this cert is marked as Server and Client Cert via the # extended Key Usage extension self.assertIn(x509.oid.ExtendedKeyUsageOID.SERVER_AUTH, cert.extensions.get_extension_for_class( x509.ExtendedKeyUsage).value._usages) self.assertIn(x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH, cert.extensions.get_extension_for_class( x509.ExtendedKeyUsage).value._usages) # Make sure this cert can't sign other certs self.assertFalse(cert.extensions.get_extension_for_class( x509.BasicConstraints).value.ca) def test_sign_cert_passphrase_none(self): # Attempt sign a cert ca_private_key = self.ca_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() ) signed_cert = self.cert_generator.sign_cert( csr=self.certificate_signing_request, validity=2 * 365 * 24 * 60 * 60, ca_cert=self.ca_certificate, ca_key=ca_private_key, ca_key_pass=None, ca_digest=self.signing_digest ) self.assertIn("-----BEGIN CERTIFICATE-----", signed_cert.decode('ascii')) # Load the cert for specific tests cert = x509.load_pem_x509_certificate( data=signed_cert, backend=backends.default_backend()) # Make sure expiry time is accurate should_expire = (datetime.datetime.utcnow() + datetime.timedelta(seconds=2 * 365 * 24 * 60 * 60)) diff = should_expire - cert.not_valid_after self.assertLess(diff, datetime.timedelta(seconds=10)) # Make sure this is a version 3 X509. self.assertEqual('v3', cert.version.name) # Make sure this cert is marked as Server and Client Cert via the # extended Key Usage extension self.assertIn(x509.oid.ExtendedKeyUsageOID.SERVER_AUTH, cert.extensions.get_extension_for_class( x509.ExtendedKeyUsage).value._usages) self.assertIn(x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH, cert.extensions.get_extension_for_class( x509.ExtendedKeyUsage).value._usages) # Make sure this cert can't sign other certs self.assertFalse(cert.extensions.get_extension_for_class( x509.BasicConstraints).value.ca) def test_sign_cert_invalid_algorithm(self): self.assertRaises( crypto_exceptions.UnsupportedAlgorithm, self.cert_generator.sign_cert, csr=self.certificate_signing_request, validity=2 * 365 * 24 * 60 * 60, ca_cert=self.ca_certificate, ca_key=self.ca_private_key, ca_key_pass=self.ca_private_key_passphrase, ca_digest='not_an_algorithm' ) def test_generate_cert_key_pair(self): cn = 'testCN' bit_length = 512 # Attempt to generate a cert/key pair cert_object = self.cert_generator.generate_cert_key_pair( cn=cn, validity=2 * 365 * 24 * 60 * 60, bit_length=bit_length, passphrase=self.ca_private_key_passphrase, ca_cert=self.ca_certificate, ca_key=self.ca_private_key, ca_key_pass=self.ca_private_key_passphrase ) # Validate that the cert and key are loadable cert = x509.load_pem_x509_certificate( data=cert_object.certificate, backend=backends.default_backend()) self.assertIsNotNone(cert) key = serialization.load_pem_private_key( data=cert_object.private_key, password=cert_object.private_key_passphrase, backend=backends.default_backend()) self.assertIsNotNone(key) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/certificates/manager/0000775000175000017500000000000000000000000023020 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/manager/__init__.py0000664000175000017500000000107400000000000025133 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/manager/test_barbican.py0000664000175000017500000001673400000000000026205 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from barbicanclient.v1 import secrets from OpenSSL import crypto import octavia.certificates.common.barbican as barbican_common import octavia.certificates.common.cert as cert import octavia.certificates.manager.barbican as barbican_cert_mgr from octavia.common import exceptions import octavia.tests.common.sample_certs as sample import octavia.tests.unit.base as base PROJECT_ID = "12345" class TestBarbicanManager(base.TestCase): def setUp(self): # Make a fake Secret and contents self.barbican_endpoint = 'http://localhost:9311/v1' self.secret_uuid = uuid.uuid4() self.secret_ref = '{0}/secrets/{1}'.format( self.barbican_endpoint, self.secret_uuid ) self.name = 'My Fancy Cert' self.secret_pkcs12 = secrets.Secret( api=mock.MagicMock(), payload=sample.PKCS12_BUNDLE ) self.fake_secret = 'Fake secret' self.secret = secrets.Secret(api=mock.MagicMock(), payload=self.fake_secret) self.empty_secret = mock.Mock(spec=secrets.Secret) # Mock out the client self.bc = mock.Mock() barbican_auth = mock.Mock(spec=barbican_common.BarbicanAuth) barbican_auth.get_barbican_client.return_value = self.bc self.cert_manager = barbican_cert_mgr.BarbicanCertManager() self.cert_manager.auth = barbican_auth self.context = mock.Mock() self.context.project_id = PROJECT_ID super(TestBarbicanManager, self).setUp() def test_store_cert(self): # Mock out the client self.bc.secrets.create.return_value = ( self.empty_secret) # Attempt to store a cert secret_ref = self.cert_manager.store_cert( context=self.context, certificate=sample.X509_CERT, private_key=sample.X509_CERT_KEY, intermediates=sample.X509_IMDS, name=self.name ) self.assertEqual(secret_ref, self.empty_secret.secret_ref) # create_secret should be called once with our data calls = [ mock.call(payload=mock.ANY, expiration=None, name=self.name) ] self.bc.secrets.create.assert_has_calls(calls) # Container should be stored once self.empty_secret.store.assert_called_once_with() def test_store_cert_failure(self): # Mock out the client self.bc.secrets.create.return_value = ( self.empty_secret) self.empty_secret.store.side_effect = ValueError() # Attempt to store a cert self.assertRaises( ValueError, self.cert_manager.store_cert, context=self.context, certificate=sample.X509_CERT, private_key=sample.X509_CERT_KEY, intermediates=sample.X509_IMDS, name=self.name ) # create_certificate should be called once self.assertEqual(1, self.bc.secrets.create.call_count) # Container should be stored once self.empty_secret.store.assert_called_once_with() def test_get_cert(self): # Mock out the client self.bc.secrets.get.return_value = self.secret_pkcs12 # Get the secret data data = self.cert_manager.get_cert( context=self.context, cert_ref=self.secret_ref, resource_ref=self.secret_ref, service_name='Octavia' ) # 'get_secret' should be called once with the secret_ref self.bc.secrets.get.assert_called_once_with( secret_ref=self.secret_ref ) # The returned data should be a Cert object with the correct values self.assertIsInstance(data, cert.Cert) self.assertEqual(sample.X509_CERT_KEY, data.get_private_key()) self.assertEqual(sample.X509_CERT, data.get_certificate()) self.assertEqual(sorted(sample.X509_IMDS_LIST), sorted(data.get_intermediates())) self.assertIsNone(data.get_private_key_passphrase()) @mock.patch('OpenSSL.crypto.load_pkcs12') def test_get_cert_bad_pkcs12(self, mock_load_pkcs12): mock_load_pkcs12.side_effect = [crypto.Error] # Mock out the client self.bc.secrets.get.return_value = self.secret_pkcs12 # Test bad pkcs12 bundle re-raises UnreadablePKCS12 self.assertRaises(exceptions.UnreadablePKCS12, self.cert_manager.get_cert, context=self.context, cert_ref=self.secret_ref, resource_ref=self.secret_ref, service_name='Octavia') def test_delete_cert_legacy(self): # Attempt to deregister as a consumer self.cert_manager.delete_cert( context=self.context, cert_ref=self.secret_ref, resource_ref=self.secret_ref, service_name='Octavia' ) # remove_consumer should be called once with the container_ref (legacy) self.bc.containers.remove_consumer.assert_called_once_with( container_ref=self.secret_ref, url=self.secret_ref, name='Octavia' ) def test_set_acls(self): # if used pkcs12 certificate containers.get raises exception self.bc.containers.get.side_effect = Exception("container not found") self.cert_manager.set_acls( context=self.context, cert_ref=self.secret_ref ) # our mock_bc should have one call to ensure_secret_access self.cert_manager.auth.ensure_secret_access.assert_called_once_with( self.context, self.secret_ref ) def test_unset_acls(self): # if used pkcs12 certificate containers.get raises exception self.bc.containers.get.side_effect = Exception("container not found") self.cert_manager.unset_acls( context=self.context, cert_ref=self.secret_ref ) # our mock_bc should have one call to revoke_secret_access self.cert_manager.auth.revoke_secret_access.assert_called_once_with( self.context, self.secret_ref ) def test_get_secret(self): # Mock out the client self.bc.secrets.get.side_effect = [self.secret, Exception] # Get the secret data data = self.cert_manager.get_secret( context=self.context, secret_ref=self.secret_ref, ) # 'get_secret' should be called once with the secret_ref self.bc.secrets.get.assert_called_once_with( secret_ref=self.secret_ref ) self.assertEqual(self.fake_secret, data) # Test with a failure self.assertRaises(exceptions.CertificateRetrievalException, self.cert_manager.get_secret, context=self.context, secret_ref=self.secret_ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/manager/test_barbican_legacy.py0000664000175000017500000002673100000000000027527 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from barbicanclient.v1 import containers from barbicanclient.v1 import secrets from oslo_utils import uuidutils import octavia.certificates.common.barbican as barbican_common import octavia.certificates.common.cert as cert import octavia.certificates.manager.barbican_legacy as barbican_cert_mgr from octavia.common import utils as octavia_utils import octavia.tests.common.sample_certs as sample import octavia.tests.unit.base as base PROJECT_ID = "12345" class TestBarbicanManager(base.TestCase): def setUp(self): # Make a fake Container and contents self.barbican_endpoint = 'http://localhost:9311/v1' self.container_uuid = uuidutils.generate_uuid() self.certificate_uuid = uuidutils.generate_uuid() self.intermediates_uuid = uuidutils.generate_uuid() self.private_key_uuid = uuidutils.generate_uuid() self.private_key_passphrase_uuid = uuidutils.generate_uuid() self.container_ref = '{0}/containers/{1}'.format( self.barbican_endpoint, self.container_uuid ) self.barbican_api = mock.MagicMock() self.name = 'My Fancy Cert' self.certificate = secrets.Secret( api=self.barbican_api, payload=sample.X509_CERT, secret_ref=self.certificate_uuid ) self.intermediates = secrets.Secret( api=self.barbican_api, payload=sample.X509_IMDS, secret_ref=self.intermediates_uuid ) self.private_key = secrets.Secret( api=self.barbican_api, payload=sample.X509_CERT_KEY_ENCRYPTED, secret_ref=self.private_key_uuid ) self.private_key_passphrase = secrets.Secret( api=self.barbican_api, payload=sample.X509_CERT_KEY_PASSPHRASE, secret_ref=self.private_key_passphrase_uuid ) container = mock.Mock(spec=containers.CertificateContainer) container.container_ref = self.container_ref container.name = self.name container.private_key = self.private_key container.certificate = self.certificate container.intermediates = self.intermediates container.private_key_passphrase = self.private_key_passphrase self.container = container self.empty_container = mock.Mock(spec=containers.CertificateContainer) self.secret1 = mock.Mock(spec=secrets.Secret) self.secret2 = mock.Mock(spec=secrets.Secret) self.secret3 = mock.Mock(spec=secrets.Secret) self.secret4 = mock.Mock(spec=secrets.Secret) # Mock out the client self.bc = mock.Mock() self.bc.containers.get.return_value = self.container barbican_auth = mock.Mock(spec=barbican_common.BarbicanAuth) barbican_auth.get_barbican_client.return_value = self.bc self.cert_manager = barbican_cert_mgr.BarbicanCertManager() self.cert_manager.auth = barbican_auth self.context = mock.Mock() self.context.project_id = PROJECT_ID super(TestBarbicanManager, self).setUp() def test_store_cert(self): # Mock out the client self.bc.containers.create_certificate.return_value = ( self.empty_container) # Attempt to store a cert container_ref = self.cert_manager.store_cert( context=self.context, certificate=self.certificate, private_key=self.private_key, intermediates=self.intermediates, private_key_passphrase=self.private_key_passphrase, name=self.name ) self.assertEqual(self.empty_container.container_ref, container_ref) # create_secret should be called four times with our data calls = [ mock.call(payload=self.certificate, expiration=None, name=mock.ANY), mock.call(payload=self.private_key, expiration=None, name=mock.ANY), mock.call(payload=self.intermediates, expiration=None, name=mock.ANY), mock.call(payload=self.private_key_passphrase, expiration=None, name=mock.ANY) ] self.bc.secrets.create.assert_has_calls(calls, any_order=True) # create_certificate should be called once self.assertEqual(1, self.bc.containers.create_certificate.call_count) # Container should be stored once self.empty_container.store.assert_called_once_with() def test_store_cert_failure(self): # Mock out the client self.bc.containers.create_certificate.return_value = ( self.empty_container) test_secrets = [ self.secret1, self.secret2, self.secret3, self.secret4 ] self.bc.secrets.create.side_effect = test_secrets self.empty_container.store.side_effect = ValueError() # Attempt to store a cert self.assertRaises( ValueError, self.cert_manager.store_cert, context=self.context, certificate=self.certificate, private_key=self.private_key, intermediates=self.intermediates, private_key_passphrase=self.private_key_passphrase, name=self.name ) # create_secret should be called four times with our data calls = [ mock.call(payload=self.certificate, expiration=None, name=mock.ANY), mock.call(payload=self.private_key, expiration=None, name=mock.ANY), mock.call(payload=self.intermediates, expiration=None, name=mock.ANY), mock.call(payload=self.private_key_passphrase, expiration=None, name=mock.ANY) ] self.bc.secrets.create.assert_has_calls(calls, any_order=True) # create_certificate should be called once self.assertEqual(1, self.bc.containers.create_certificate.call_count) # Container should be stored once self.empty_container.store.assert_called_once_with() # All secrets should be deleted (or at least an attempt made) for s in test_secrets: s.delete.assert_called_once_with() def test_get_cert(self): # Mock out the client self.bc.containers.register_consumer.return_value = self.container # Get the container data data = self.cert_manager.get_cert( context=self.context, cert_ref=self.container_ref, resource_ref=self.container_ref, service_name='Octavia' ) # 'register_consumer' should be called once with the container_ref self.bc.containers.register_consumer.assert_called_once_with( container_ref=self.container_ref, url=self.container_ref, name='Octavia' ) # The returned data should be a Cert object with the correct values self.assertIsInstance(data, cert.Cert) self.assertEqual(data.get_private_key(), self.private_key.payload) self.assertEqual(data.get_certificate(), self.certificate.payload) self.assertEqual(data.get_intermediates(), sample.X509_IMDS_LIST) self.assertEqual(data.get_private_key_passphrase(), octavia_utils.b(self.private_key_passphrase.payload)) def test_get_cert_no_registration(self): self.bc.containers.get.return_value = self.container # Get the container data data = self.cert_manager.get_cert( context=self.context, cert_ref=self.container_ref, check_only=True ) # 'get' should be called once with the container_ref self.bc.containers.get.assert_called_once_with( container_ref=self.container_ref ) # The returned data should be a Cert object with the correct values self.assertIsInstance(data, cert.Cert) self.assertEqual(data.get_private_key(), self.private_key.payload) self.assertEqual(data.get_certificate(), self.certificate.payload) self.assertEqual(data.get_intermediates(), sample.X509_IMDS_LIST) self.assertEqual(data.get_private_key_passphrase(), octavia_utils.b(self.private_key_passphrase.payload)) def test_get_cert_no_registration_raise_on_secret_access_failure(self): self.bc.containers.get.return_value = self.container type(self.certificate).payload = mock.PropertyMock( side_effect=ValueError) # Get the container data self.assertRaises( ValueError, self.cert_manager.get_cert, context=self.context, cert_ref=self.container_ref, check_only=True ) # 'get' should be called once with the container_ref self.bc.containers.get.assert_called_once_with( container_ref=self.container_ref ) def test_delete_cert(self): # Attempt to deregister as a consumer self.cert_manager.delete_cert( context=self.context, cert_ref=self.container_ref, resource_ref=self.container_ref, service_name='Octavia' ) # remove_consumer should be called once with the container_ref self.bc.containers.remove_consumer.assert_called_once_with( container_ref=self.container_ref, url=self.container_ref, name='Octavia' ) def test_set_acls(self): self.cert_manager.set_acls( context=self.context, cert_ref=self.container_ref ) # our mock_bc should have one call to ensure_secret_access for each # of our secrets, and the container self.cert_manager.auth.ensure_secret_access.assert_has_calls([ mock.call(self.context, self.certificate_uuid), mock.call(self.context, self.intermediates_uuid), mock.call(self.context, self.private_key_uuid), mock.call(self.context, self.private_key_passphrase_uuid) ], any_order=True) def test_unset_acls(self): self.cert_manager.unset_acls( context=self.context, cert_ref=self.container_ref ) # our mock_bc should have one call to revoke_secret_access for each # of our secrets, and the container self.cert_manager.auth.revoke_secret_access.assert_has_calls([ mock.call(self.context, self.certificate_uuid), mock.call(self.context, self.intermediates_uuid), mock.call(self.context, self.private_key_uuid), mock.call(self.context, self.private_key_passphrase_uuid) ], any_order=True) def test_get_secret(self): self.assertIsNone(self.cert_manager.get_secret('fake context', 'fake secret ref')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/manager/test_castellan_mgr.py0000664000175000017500000000344600000000000027253 0ustar00zuulzuul00000000000000# Copyright 2019 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.certificates.manager import castellan_mgr from octavia.common import exceptions import octavia.tests.unit.base as base class TestCastellanCertManager(base.TestCase): def setUp(self): self.fake_secret = 'Fake secret' self.manager = mock.MagicMock() self.certbag = mock.MagicMock() self.manager.get.return_value = self.certbag super(TestCastellanCertManager, self).setUp() @mock.patch('castellan.key_manager.API') def test_get_secret(self, mock_api): mock_api.return_value = self.manager castellan_mgr_obj = castellan_mgr.CastellanCertManager() self.certbag.get_encoded.side_effect = [self.fake_secret, Exception('boom')] result = castellan_mgr_obj.get_secret('context', 'secret_ref') self.assertEqual(self.fake_secret, result) self.manager.get.assert_called_once_with('context', 'secret_ref') self.certbag.get_encoded.assert_called_once() self.assertRaises(exceptions.CertificateRetrievalException, castellan_mgr_obj.get_secret, 'context', 'secret_ref') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/certificates/manager/test_local.py0000664000175000017500000001404700000000000025531 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace US, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils import octavia.certificates.common.cert as cert import octavia.certificates.manager.local as local_cert_mgr from octavia.common import exceptions from octavia.tests.common import sample_certs import octavia.tests.unit.base as base class TestLocalManager(base.TestCase): def setUp(self): self.certificate = sample_certs.X509_CERT.decode('utf-8') self.intermediates = sample_certs.X509_IMDS.decode('utf-8') self.private_key = sample_certs.X509_CERT_KEY.decode('utf-8') self.private_key_passphrase = "My Private Key Passphrase" conf = oslo_fixture.Config(cfg.CONF) conf.config(group="certificates", storage_path="/tmp/") super(TestLocalManager, self).setUp() def _store_cert(self): fd_mock = mock.mock_open() open_mock = mock.Mock() # Attempt to store the cert with mock.patch('os.open', open_mock), mock.patch.object( os, 'fdopen', fd_mock): cert_id = local_cert_mgr.LocalCertManager.store_cert( context=None, certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Check that something came back self.assertIsNotNone(cert_id) # Verify the correct files were opened flags = os.O_WRONLY | os.O_CREAT mode = stat.S_IRUSR | stat.S_IWUSR # mode 0600 open_mock.assert_has_calls([ mock.call( os.path.join('/tmp/{0}.crt'.format(cert_id)), flags, mode), mock.call( os.path.join('/tmp/{0}.key'.format(cert_id)), flags, mode), mock.call( os.path.join('/tmp/{0}.int'.format(cert_id)), flags, mode), mock.call( os.path.join('/tmp/{0}.pass'.format(cert_id)), flags, mode) ], any_order=True) # Verify the writes were made fd_mock().write.assert_has_calls([ mock.call(self.certificate), mock.call(self.intermediates), mock.call(self.private_key), mock.call(self.private_key_passphrase) ], any_order=True) return cert_id def _get_cert(self, cert_id): fd_mock = mock.mock_open() fd_mock.side_effect = [ mock.mock_open(read_data=self.certificate).return_value, mock.mock_open(read_data=self.private_key).return_value, mock.mock_open(read_data=self.intermediates).return_value, mock.mock_open(read_data=self.private_key_passphrase).return_value ] open_mock = mock.Mock() # Attempt to retrieve the cert with mock.patch('os.open', open_mock), mock.patch.object( os, 'fdopen', fd_mock): data = local_cert_mgr.LocalCertManager.get_cert(None, cert_id) # Verify the correct files were opened flags = os.O_RDONLY open_mock.assert_has_calls([ mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id)), flags), mock.call(os.path.join('/tmp/{0}.key'.format(cert_id)), flags), mock.call(os.path.join('/tmp/{0}.int'.format(cert_id)), flags), mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)), flags) ], any_order=True) # The returned data should be a Cert object self.assertIsInstance(data, cert.Cert) return data def _delete_cert(self, cert_id): remove_mock = mock.Mock() # Delete the cert with mock.patch('os.remove', remove_mock): local_cert_mgr.LocalCertManager.delete_cert(None, cert_id) # Verify the correct files were removed remove_mock.assert_has_calls([ mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id))), mock.call(os.path.join('/tmp/{0}.key'.format(cert_id))), mock.call(os.path.join('/tmp/{0}.int'.format(cert_id))), mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id))) ], any_order=True) def test_store_cert(self): self._store_cert() def test_get_cert(self): # Get the cert self._get_cert("cert1") def test_delete_cert(self): # Store a cert cert_id = self._store_cert() # Verify the cert exists self._get_cert(cert_id) # Delete the cert self._delete_cert(cert_id) def test_get_secret(self): fd_mock = mock.mock_open() open_mock = mock.Mock() secret_id = uuidutils.generate_uuid() # Attempt to retrieve the secret with mock.patch('os.open', open_mock), mock.patch.object( os, 'fdopen', fd_mock): local_cert_mgr.LocalCertManager.get_secret(None, secret_id) # Verify the correct files were opened flags = os.O_RDONLY open_mock.assert_called_once_with('/tmp/{0}.crt'.format(secret_id), flags) # Test failure path with mock.patch('os.open', open_mock), mock.patch.object( os, 'fdopen', fd_mock) as mock_open: mock_open.side_effect = IOError self.assertRaises(exceptions.CertificateRetrievalException, local_cert_mgr.LocalCertManager.get_secret, None, secret_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/cmd/0000775000175000017500000000000000000000000017504 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/cmd/__init__.py0000664000175000017500000000107400000000000021617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/cmd/test_agent.py0000664000175000017500000000336600000000000022223 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ssl from unittest import mock from octavia.cmd import agent from octavia.tests.unit import base class TestAmphoraAgentCMD(base.TestCase): def setUp(self): super(TestAmphoraAgentCMD, self).setUp() @mock.patch('octavia.cmd.agent.AmphoraAgent') @mock.patch('octavia.amphorae.backends.agent.api_server.server.Server') @mock.patch('multiprocessing.Process') @mock.patch('octavia.common.service.prepare_service') def test_main(self, mock_service, mock_process, mock_server, mock_amp): mock_health_proc = mock.MagicMock() mock_server_instance = mock.MagicMock() mock_amp_instance = mock.MagicMock() mock_process.return_value = mock_health_proc mock_server.return_value = mock_server_instance mock_amp.return_value = mock_amp_instance agent.main() # Ensure gunicorn is initialized with the correct cert_reqs option. # This option is what enforces use of a valid client certificate. self.assertEqual( ssl.CERT_REQUIRED, mock_amp.call_args[0][1]['cert_reqs']) mock_health_proc.start.assert_called_once_with() mock_amp_instance.run.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/cmd/test_driver_agent.py0000664000175000017500000002301000000000000023562 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal from unittest import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture import octavia.api.drivers.driver_agent.driver_listener from octavia.cmd import driver_agent from octavia.tests.unit import base CONF = cfg.CONF class TestDriverAgentCMD(base.TestCase): def setUp(self): super(TestDriverAgentCMD, self).setUp() self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) @mock.patch('os.kill') @mock.patch('octavia.cmd.driver_agent.CONF') def test_handle_mutate_config(self, mock_conf, mock_os_kill): driver_agent._handle_mutate_config(1, 2) mock_conf.mutate_config_files.assert_called_once() os_calls = [mock.call(1, signal.SIGHUP), mock.call(2, signal.SIGHUP)] mock_os_kill.assert_has_calls(os_calls, any_order=True) def test_check_if_provider_agent_enabled(self): mock_extension = mock.MagicMock() self.CONF.config(group="driver_agent", enabled_provider_agents=[ 'spiffy_agent', 'super_agent']) mock_extension.name = 'super_agent' self.assertTrue( driver_agent._check_if_provider_agent_enabled(mock_extension)) mock_extension.name = 'bogus_agent' self.assertFalse( driver_agent._check_if_provider_agent_enabled(mock_extension)) @mock.patch('setproctitle.setproctitle') @mock.patch('signal.signal') def test_process_wrapper(self, mock_signal, mock_setproctitle): mock_exit_event = mock.MagicMock() mock_function = mock.MagicMock() mock_function.side_effect = [ mock.DEFAULT, Exception('boom'), mock.DEFAULT, Exception('boom'), mock.DEFAULT] mock_exit_event.is_set.side_effect = [False, False, True, False, False, True] signal_calls = [mock.call(signal.SIGINT, signal.SIG_IGN), mock.call(signal.SIGHUP, driver_agent._mutate_config)] # With agent_name driver_agent._process_wrapper( mock_exit_event, 'test_proc_name', mock_function, agent_name='test_agent_name') mock_signal.assert_has_calls(signal_calls) mock_setproctitle.assert_called_once_with( 'octavia-driver-agent - test_proc_name -- test_agent_name') mock_function.assert_called_once_with(mock_exit_event) # With agent_name - With function exception mock_signal.reset_mock() mock_setproctitle.reset_mock() mock_function.reset_mock() driver_agent._process_wrapper( mock_exit_event, 'test_proc_name', mock_function, agent_name='test_agent_name') mock_signal.assert_has_calls(signal_calls) mock_setproctitle.assert_called_once_with( 'octavia-driver-agent - test_proc_name -- test_agent_name') mock_function.assert_called_once_with(mock_exit_event) # Without agent_name mock_signal.reset_mock() mock_setproctitle.reset_mock() mock_function.reset_mock() driver_agent._process_wrapper( mock_exit_event, 'test_proc_name', mock_function) mock_signal.assert_has_calls(signal_calls) mock_setproctitle.assert_called_once_with( 'octavia-driver-agent - test_proc_name') mock_function.assert_called_once_with(mock_exit_event) # Without agent_name - With function exception mock_signal.reset_mock() mock_setproctitle.reset_mock() mock_function.reset_mock() driver_agent._process_wrapper( mock_exit_event, 'test_proc_name', mock_function) mock_signal.assert_has_calls(signal_calls) mock_setproctitle.assert_called_once_with( 'octavia-driver-agent - test_proc_name') mock_function.assert_called_once_with(mock_exit_event) @mock.patch('octavia.cmd.driver_agent.multiprocessing') @mock.patch('stevedore.enabled.EnabledExtensionManager') def test_start_provider_agents(self, mock_stevedore, mock_multiprocessing): mock_extension = mock.MagicMock() mock_extension.name = 'test_extension' mock_exit_event = mock.MagicMock() mock_stevedore.return_value = [mock_extension] mock_ext_proc = mock.MagicMock() mock_multiprocessing.Process.return_value = mock_ext_proc driver_agent._start_provider_agents(mock_exit_event) mock_stevedore.assert_called_once_with( namespace='octavia.driver_agent.provider_agents', check_func=driver_agent._check_if_provider_agent_enabled) mock_multiprocessing.Process.assert_called_once_with( name='test_extension', target=driver_agent._process_wrapper, args=(mock_exit_event, 'provider_agent', mock_extension.plugin), kwargs={'agent_name': 'test_extension'}) mock_ext_proc.start.assert_called_once_with() @mock.patch('os.kill') @mock.patch('octavia.cmd.driver_agent.multiprocessing') @mock.patch('oslo_reports.guru_meditation_report.TextGuruMeditation.' 'setup_autorun') @mock.patch('octavia.common.service.prepare_service') def test_main(self, mock_prep_srvc, mock_gmr, mock_multiprocessing, mock_kill): mock_exit_event = mock.MagicMock() mock_multiprocessing.Event.return_value = mock_exit_event mock_status_listener_proc = mock.MagicMock() mock_stats_listener_proc = mock.MagicMock() mock_get_listener_proc = mock.MagicMock() mock_multiprocessing.Process.side_effect = [ mock_status_listener_proc, mock_stats_listener_proc, mock_get_listener_proc, mock_status_listener_proc, mock_stats_listener_proc, mock_get_listener_proc, mock_status_listener_proc, mock_stats_listener_proc, mock_get_listener_proc, mock_status_listener_proc, mock_stats_listener_proc, mock_get_listener_proc, mock_status_listener_proc, mock_stats_listener_proc, mock_get_listener_proc] driver_agent.main() mock_prep_srvc.assert_called_once() mock_gmr.assert_called_once() mock_status_listener_proc.start.assert_called_once() mock_stats_listener_proc.start.assert_called_once() mock_get_listener_proc.start.assert_called_once() process_calls = [mock.call( args=mock_exit_event, name='status_listener', target=(octavia.api.drivers.driver_agent.driver_listener. status_listener)), mock.call( args=mock_exit_event, name='stats_listener', target=(octavia.api.drivers.driver_agent.driver_listener. stats_listener)), mock.call( args=mock_exit_event, name='get_listener', target=(octavia.api.drivers.driver_agent.driver_listener. get_listener))] mock_multiprocessing.Process.has_calls(process_calls, any_order=True) # Test keyboard interrupt path mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] driver_agent.main() mock_exit_event.set.assert_called_once() # Test keyboard interrupt with provider agents mock_exit_event.reset_mock() mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] mock_provider_proc = mock.MagicMock() mock_provider_proc.pid = 'not-valid-pid' mock_provider_proc.exitcode = 1 driver_agent.PROVIDER_AGENT_PROCESSES = [mock_provider_proc] driver_agent.main() mock_exit_event.set.assert_called_once() mock_provider_proc.join.assert_called_once_with( CONF.driver_agent.provider_agent_shutdown_timeout) # Test keyboard interrupt with provider agents fails to stop mock_exit_event.reset_mock() mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] mock_provider_proc = mock.MagicMock() mock_provider_proc.pid = 'not-valid-pid' mock_provider_proc.exitcode = None driver_agent.PROVIDER_AGENT_PROCESSES = [mock_provider_proc] driver_agent.main() mock_exit_event.set.assert_called_once() mock_provider_proc.join.assert_called_once_with( CONF.driver_agent.provider_agent_shutdown_timeout) mock_kill.assert_called_once_with('not-valid-pid', signal.SIGKILL) # Test keyboard interrupt with provider agents join exception mock_exit_event.reset_mock() mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] mock_provider_proc = mock.MagicMock() mock_provider_proc.pid = 'not-valid-pid' mock_provider_proc.join.side_effect = Exception('boom') driver_agent.PROVIDER_AGENT_PROCESSES = [mock_provider_proc] driver_agent.main() mock_exit_event.set.assert_called_once() mock_provider_proc.join.assert_called_once_with( CONF.driver_agent.provider_agent_shutdown_timeout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/cmd/test_haproxy_vrrp_check.py0000664000175000017500000000317400000000000025022 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.cmd import haproxy_vrrp_check from octavia.tests.unit import base class TestHAproxyVRRPCheckCMD(base.TestCase): def setUp(self): super(TestHAproxyVRRPCheckCMD, self).setUp() @mock.patch('socket.socket') def test_health_check(self, mock_socket): socket_mock = mock.MagicMock() mock_socket.return_value = socket_mock recv_mock = mock.MagicMock() recv_mock.side_effect = [b'1', Exception('BREAK')] socket_mock.recv = recv_mock self.assertRaisesRegex(Exception, 'BREAK', haproxy_vrrp_check.health_check, '10.0.0.1') @mock.patch('octavia.cmd.haproxy_vrrp_check.health_check') @mock.patch('sys.argv') @mock.patch('sys.exit') def test_main(self, mock_exit, mock_argv, mock_health_check): mock_health_check.side_effect = [1, Exception('FAIL')] haproxy_vrrp_check.main() mock_exit.assert_called_once_with(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/cmd/test_health_manager.py0000664000175000017500000001016700000000000024061 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal from unittest import mock from octavia.cmd import health_manager from octavia.tests.unit import base class TestHealthManagerCMD(base.TestCase): def setUp(self): super(TestHealthManagerCMD, self).setUp() @mock.patch('multiprocessing.Event') @mock.patch('octavia.amphorae.drivers.health.' 'heartbeat_udp.UDPStatusGetter') def test_hm_listener(self, mock_getter, mock_event): mock_event.is_set.side_effect = [False, False, True] getter_mock = mock.MagicMock() check_mock = mock.MagicMock() getter_mock.check = check_mock getter_mock.check.side_effect = [None, Exception('break')] mock_getter.return_value = getter_mock health_manager.hm_listener(mock_event) mock_getter.assert_called_once() self.assertEqual(2, getter_mock.check.call_count) @mock.patch('multiprocessing.Event') @mock.patch('futurist.periodics.PeriodicWorker.start') @mock.patch('futurist.periodics.PeriodicWorker.__init__') @mock.patch('signal.signal') @mock.patch('octavia.controller.healthmanager.' 'health_manager.HealthManager') def test_hm_health_check(self, mock_health, mock_signal, mock_worker, mock_start, mock_event): mock_event.is_set.side_effect = [False, True] hm_mock = mock.MagicMock() mock_worker.return_value = None health_check_mock = mock.MagicMock() hm_mock.health_check = health_check_mock mock_health.return_value = hm_mock health_manager.hm_health_check(mock_event) mock_health.assert_called_once_with(mock_event) @mock.patch('multiprocessing.Process') @mock.patch('octavia.common.service.prepare_service') def test_main(self, mock_service, mock_process): mock_listener_proc = mock.MagicMock() mock_health_proc = mock.MagicMock() mock_process.side_effect = [mock_listener_proc, mock_health_proc] health_manager.main() mock_listener_proc.start.assert_called_once_with() mock_health_proc.start.assert_called_once_with() mock_listener_proc.join.assert_called_once_with() mock_health_proc.join.assert_called_once_with() @mock.patch('os.kill') @mock.patch('multiprocessing.Process') @mock.patch('octavia.common.service.prepare_service') def test_main_keyboard_interrupt(self, mock_service, mock_process, mock_kill): mock_listener_proc = mock.MagicMock() mock_health_proc = mock.MagicMock() mock_join = mock.MagicMock() mock_join.side_effect = [KeyboardInterrupt, None] mock_listener_proc.join = mock_join mock_process.side_effect = [mock_listener_proc, mock_health_proc] health_manager.main() mock_listener_proc.start.assert_called_once_with() mock_health_proc.start.assert_called_once_with() self.assertEqual(2, mock_listener_proc.join.call_count) mock_health_proc.join.assert_called_once_with() mock_kill.assert_called_once_with(mock_health_proc.pid, signal.SIGINT) @mock.patch('os.kill') @mock.patch('oslo_config.cfg.CONF.mutate_config_files') def test_handle_mutate_config(self, mock_mutate, mock_kill): health_manager._handle_mutate_config(1, 2) mock_mutate.assert_called_once() calls = [mock.call(1, signal.SIGHUP), mock.call(2, signal.SIGHUP)] mock_kill.assert_has_calls(calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/cmd/test_house_keeping.py0000664000175000017500000002003100000000000023736 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from octavia.cmd import house_keeping from octavia.tests.unit import base class TestHouseKeepingCMD(base.TestCase): def setUp(self): super(TestHouseKeepingCMD, self).setUp() @mock.patch('octavia.cmd.house_keeping.spare_amp_thread_event') @mock.patch('octavia.controller.housekeeping.' 'house_keeping.SpareAmphora') def test_spare_amphora_check(self, mock_SpareAmphora, spare_amp_thread_event_mock): spare_amp_mock = mock.MagicMock() spare_check_mock = mock.MagicMock() spare_amp_mock.spare_check = spare_check_mock mock_SpareAmphora.return_value = spare_amp_mock # mock spare_amp_thread_event.is_set() in the while loop spare_amp_thread_event_mock.is_set = mock.MagicMock() spare_amp_thread_event_mock.is_set.side_effect = [False, Exception('break')] self.assertRaisesRegex(Exception, 'break', house_keeping.spare_amphora_check) mock_SpareAmphora.assert_called_once_with() self.assertEqual(1, spare_amp_mock.spare_check.call_count) @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') @mock.patch('octavia.controller.housekeeping.' 'house_keeping.DatabaseCleanup') def test_db_cleanup(self, mock_DatabaseCleanup, db_cleanup_event_mock): db_cleanup = mock.MagicMock() delete_old_amphorae = mock.MagicMock() db_cleanup.delete_old_amphorae = delete_old_amphorae mock_DatabaseCleanup.return_value = db_cleanup # mock db_cleanup_thread_event.is_set() in the while loop db_cleanup_event_mock.is_set = mock.MagicMock() db_cleanup_event_mock.is_set.side_effect = [False, Exception('break')] self.assertRaisesRegex(Exception, 'break', house_keeping.db_cleanup) mock_DatabaseCleanup.assert_called_once_with() self.assertEqual(1, db_cleanup.delete_old_amphorae.call_count) @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') @mock.patch('octavia.controller.housekeeping.' 'house_keeping.CertRotation') def test_hk_cert_rotation_with_exception(self, mock_CertRotation, cert_rotate_event_mock): # mock cert_rotate object cert_rotate_mock = mock.MagicMock() # mock rotate() rotate_mock = mock.MagicMock() cert_rotate_mock.rotate = rotate_mock mock_CertRotation.return_value = cert_rotate_mock # mock cert_rotate_thread_event.is_set() in the while loop cert_rotate_event_mock.is_set = mock.MagicMock() cert_rotate_event_mock.is_set.side_effect = [False, Exception('break')] self.assertRaisesRegex(Exception, 'break', house_keeping.cert_rotation) mock_CertRotation.assert_called_once_with() self.assertEqual(1, cert_rotate_mock.rotate.call_count) @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') @mock.patch('octavia.controller.housekeeping.' 'house_keeping.CertRotation') def test_hk_cert_rotation_without_exception(self, mock_CertRotation, cert_rotate_event_mock): # mock cert_rotate object cert_rotate_mock = mock.MagicMock() # mock rotate() rotate_mock = mock.MagicMock() cert_rotate_mock.rotate = rotate_mock mock_CertRotation.return_value = cert_rotate_mock # mock cert_rotate_thread_event.is_set() in the while loop cert_rotate_event_mock.is_set = mock.MagicMock() cert_rotate_event_mock.is_set.side_effect = [False, True] self.assertIsNone(house_keeping.cert_rotation()) mock_CertRotation.assert_called_once_with() self.assertEqual(1, cert_rotate_mock.rotate.call_count) @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') @mock.patch('octavia.cmd.house_keeping.spare_amp_thread_event') @mock.patch('threading.Thread') @mock.patch('octavia.common.service.prepare_service') def test_main(self, mock_service, mock_thread, spare_amp_thread_event_mock, db_cleanup_thread_event_mock, cert_rotate_thread_event_mock): spare_amp_thread_mock = mock.MagicMock() db_cleanup_thread_mock = mock.MagicMock() cert_rotate_thread_mock = mock.MagicMock() mock_thread.side_effect = [spare_amp_thread_mock, db_cleanup_thread_mock, cert_rotate_thread_mock] spare_amp_thread_mock.daemon.return_value = True db_cleanup_thread_mock.daemon.return_value = True cert_rotate_thread_mock.daemon.return_value = True house_keeping.main() spare_amp_thread_mock.start.assert_called_once_with() db_cleanup_thread_mock.start.assert_called_once_with() cert_rotate_thread_mock.start.assert_called_once_with() self.assertTrue(spare_amp_thread_mock.daemon) self.assertTrue(db_cleanup_thread_mock.daemon) self.assertTrue(cert_rotate_thread_mock.daemon) @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') @mock.patch('octavia.cmd.house_keeping.spare_amp_thread_event') @mock.patch('threading.Thread') @mock.patch('octavia.common.service.prepare_service') def test_main_keyboard_interrupt(self, mock_service, mock_thread, spare_amp_thread_event_mock, db_cleanup_thread_event_mock, cert_rotate_thread_event_mock): spare_amp_thread_mock = mock.MagicMock() db_cleanup_thread_mock = mock.MagicMock() cert_rotate_thread_mock = mock.MagicMock() mock_thread.side_effect = [spare_amp_thread_mock, db_cleanup_thread_mock, cert_rotate_thread_mock] spare_amp_thread_mock.daemon.return_value = True db_cleanup_thread_mock.daemon.return_value = True cert_rotate_thread_mock.daemon.return_value = True mock_join = mock.MagicMock() mock_join.side_effect = [KeyboardInterrupt, None] spare_amp_thread_mock.join = mock_join house_keeping.main() spare_amp_thread_event_mock.set.assert_called_once_with() db_cleanup_thread_event_mock.set.assert_called_once_with() cert_rotate_thread_event_mock.set.assert_called_once_with() spare_amp_thread_mock.start.assert_called_once_with() db_cleanup_thread_mock.start.assert_called_once_with() cert_rotate_thread_mock.start.assert_called_once_with() self.assertTrue(spare_amp_thread_mock.daemon) self.assertTrue(db_cleanup_thread_mock.daemon) self.assertTrue(cert_rotate_thread_mock.daemon) self.assertEqual(2, spare_amp_thread_mock.join.call_count) db_cleanup_thread_mock.join.assert_called_once_with() cert_rotate_thread_mock.join.assert_called_once_with() @mock.patch('oslo_config.cfg.CONF.mutate_config_files') def test_mutate_config(self, mock_mutate): house_keeping._mutate_config() mock_mutate.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/cmd/test_status.py0000664000175000017500000000177700000000000022454 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_upgradecheck.upgradecheck import Code from octavia.cmd import status from octavia.tests.unit import base class TestUpgradeChecks(base.TestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() self.cmd = status.Checks() def test__sample_check(self): check_result = self.cmd._sample_check() self.assertEqual( Code.SUCCESS, check_result.code) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/0000775000175000017500000000000000000000000020231 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/__init__.py0000664000175000017500000000107400000000000022344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/jinja/0000775000175000017500000000000000000000000021324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/__init__.py0000664000175000017500000000107400000000000023437 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/0000775000175000017500000000000000000000000023016 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/__init__.py0000664000175000017500000000107400000000000025131 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/combined_listeners/0000775000175000017500000000000000000000000026666 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/combined_listeners/__init__.py0000664000175000017500000000000000000000000030765 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py0000664000175000017500000017610600000000000032224 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.common import constants from octavia.common.jinja.haproxy.combined_listeners import jinja_cfg from octavia.tests.unit import base from octavia.tests.unit.common.sample_configs import sample_configs_combined CONF = cfg.CONF class TestHaproxyCfg(base.TestCase): def setUp(self): super(TestHaproxyCfg, self).setUp() self.jinja_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs') def test_get_template(self): template = self.jinja_cfg._get_template() self.assertEqual('haproxy.cfg.j2', template.name) def test_render_template_tls(self): conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') FAKE_CRT_LIST_FILENAME = os.path.join( CONF.haproxy_amphora.base_cert_dir, 'sample_loadbalancer_id_1/sample_listener_id_1.pem') fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " redirect scheme https if !{{ ssl_fc }}\n" " bind 10.0.0.2:443 " "ssl crt-list {crt_list} " "ca-file /var/lib/octavia/certs/sample_loadbalancer_id_1/" "client_ca.pem verify required crl-file /var/lib/octavia/" "certs/sample_loadbalancer_id_1/SHA_ID.pem ciphers {ciphers}\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, crt_list=FAKE_CRT_LIST_FILENAME, ciphers=constants.CIPHERS_OWASP_SUITE_B) be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) tls_tupe = {'cont_id_1': sample_configs_combined.sample_tls_container_tuple( id='tls_container_id', certificate='imaCert1', private_key='imaPrivateKey1', primary_cn='FakeCN'), 'cont_id_ca': 'client_ca.pem', 'cont_id_crl': 'SHA_ID.pem'} rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True, sni=True, client_ca_cert=True, client_crl_cert=True)], tls_tupe) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_tls_no_sni(self): conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') FAKE_CRT_LIST_FILENAME = os.path.join( CONF.haproxy_amphora.base_cert_dir, 'sample_loadbalancer_id_1/sample_listener_id_1.pem') fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " redirect scheme https if !{{ ssl_fc }}\n" " bind 10.0.0.2:443 ssl crt-list {crt_list}" " ciphers {ciphers}\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, crt_list=FAKE_CRT_LIST_FILENAME, ciphers=constants.CIPHERS_OWASP_SUITE_B) be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True)], tls_certs={'cont_id_1': sample_configs_combined.sample_tls_container_tuple( id='tls_container_id', certificate='ImAalsdkfjCert', private_key='ImAsdlfksdjPrivateKey', primary_cn="FakeCN")}) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_tls_no_ciphers(self): conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') FAKE_CRT_LIST_FILENAME = os.path.join( CONF.haproxy_amphora.base_cert_dir, 'sample_loadbalancer_id_1/sample_listener_id_1.pem') fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " redirect scheme https if !{{ ssl_fc }}\n" " bind 10.0.0.2:443 ssl crt-list {crt_list}\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, crt_list=FAKE_CRT_LIST_FILENAME) be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True, tls_ciphers=None)], tls_certs={'cont_id_1': sample_configs_combined.sample_tls_container_tuple( id='tls_container_id', certificate='ImAalsdkfjCert', private_key='ImAsdlfksdjPrivateKey', primary_cn="FakeCN")}) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_http(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple()]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_member_backup(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_2 backup\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( monitor_ip_port=True, backup_member=True)]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_custom_timeouts(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 2\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 1\n" " timeout server 3\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( timeout_member_connect=1, timeout_client_data=2, timeout_member_data=3)]) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_null_timeouts(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( timeout_member_connect=None, timeout_client_data=None, timeout_member_data=None)]) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_member_monitor_addr_port(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( monitor_ip_port=True)]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_https_real_monitor(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check check-ssl verify none inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check check-ssl verify none inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple(proto='HTTPS')]) self.assertEqual(sample_configs_combined.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_https_hello_monitor(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option ssl-hello-chk\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='HTTPS', monitor_proto='TLS-HELLO')]) self.assertEqual(sample_configs_combined.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_no_monitor_http(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple(proto='HTTP', monitor=False)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_disabled_member(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2 disabled\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='HTTP', monitor=False, disabled_member=True)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_ping_monitor_http(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option external-check\n" " external-check command /var/lib/octavia/ping-wrapper.sh\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) go = " maxconn {maxconn}\n external-check\n\n".format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='HTTP', monitor_proto='PING')]) self.assertEqual(sample_configs_combined.sample_base_expected_config( backend=be, global_opts=go), rendered_obj) def test_render_template_no_monitor_https(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple(proto='HTTPS', monitor=False)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_health_monitor_http_check(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.1\\r\\nHost:\\ " "testlab.com\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='HTTP', monitor_proto='HTTP', hm_host_http_check=True)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_no_persistence_https(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode tcp\n" " balance roundrobin\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='HTTPS', monitor=False, persistence=False)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_no_persistence_http(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='HTTP', monitor=False, persistence=False)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_sourceip_persistence(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " stick-table type ip size 10k\n" " stick on src\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( persistence_type='SOURCE_IP')]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_appcookie_persistence(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " stick-table type string len 64 size 10k\n" " stick store-response res.cook(JSESSIONID)\n" " stick match req.cook(JSESSIONID)\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( persistence_type='APP_COOKIE', persistence_cookie='JSESSIONID')]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_unlimited_connections(self): sample_amphora = sample_configs_combined.sample_amphora_tuple() sample_listener = sample_configs_combined.sample_listener_tuple( proto='HTTPS', monitor=False) fe = ("frontend {listener_id}\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend {pool_id}:{listener_id}\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, pool_id=sample_listener.default_pool.id, listener_id=sample_listener.id) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend {pool_id}:{listener_id}\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, pool_id=sample_listener.default_pool.id, listener_id=sample_listener.id) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_amphora, [sample_listener]) self.assertEqual(sample_configs_combined.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_limited_connections(self): fe = ("frontend sample_listener_id_1\n" " maxconn 2014\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n") lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn 2014\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n") g_opts = " maxconn 2014\n\n" rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='HTTPS', monitor=False, connection_limit=2014)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( frontend=fe, logging=lg, backend=be, global_opts=g_opts), rendered_obj) def test_render_template_l7policies(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " acl sample_l7rule_id_1 path -m beg /api\n" " use_backend sample_pool_id_2:sample_listener_id_1" " if sample_l7rule_id_1\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 location http://www.example.com if " "!sample_l7rule_id_2 sample_l7rule_id_3\n" " acl sample_l7rule_id_4 path_end -m str jpg\n" " acl sample_l7rule_id_5 req.hdr(host) -i -m end " ".example.com\n" " http-request deny if sample_l7rule_id_4 " "sample_l7rule_id_5\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 prefix https://example.com if " "!sample_l7rule_id_2 sample_l7rule_id_3\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_2\n" "\n" "backend sample_pool_id_2:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /healthmon.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_3 10.0.0.97:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_3\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple(l7=True)]) self.assertEqual(sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_http_xff(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " option forwardfor\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( insert_headers={'X-Forwarded-For': 'true'})]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_http_xff_xfport(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " option forwardfor\n" " http-request set-header X-Forwarded-Port %[dst_port]\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( insert_headers={'X-Forwarded-For': 'true', 'X-Forwarded-Port': 'true'})]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_pool_proxy_protocol(self): be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1 send-proxy\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2 send-proxy\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple(be_proto='PROXY')]) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_pool_cert(self): cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, 'sample_listener_id_1', 'fake path') be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " "{opts}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " "{opts}\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, opts="ssl crt %s verify none sni ssl_fc_sni" % cert_file_path + " ciphers " + constants.CIPHERS_OWASP_SUITE_B) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( pool_cert=True, tls_enabled=True, backend_tls_ciphers=constants.CIPHERS_OWASP_SUITE_B)], tls_certs={ 'sample_pool_id_1': {'client_cert': cert_file_path, 'ca_cert': None, 'crl': None}}) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_pool_cert_no_ciphers(self): cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, 'sample_listener_id_1', 'fake path') be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " "{opts}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " "{opts}\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, opts="ssl crt %s verify none sni ssl_fc_sni" % cert_file_path) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( pool_cert=True, tls_enabled=True)], tls_certs={ 'sample_pool_id_1': {'client_cert': cert_file_path, 'ca_cert': None, 'crl': None}}) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_with_full_pool_cert(self): pool_client_cert = '/foo/cert.pem' pool_ca_cert = '/foo/ca.pem' pool_crl = '/foo/crl.pem' be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " "{opts}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " "{opts}\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, opts="%s %s %s %s %s %s" % ( "ssl", "crt", pool_client_cert, "ca-file %s" % pool_ca_cert, "crl-file %s" % pool_crl, "verify required sni ssl_fc_sni")) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True, tls_enabled=True)], tls_certs={ 'sample_pool_id_1': {'client_cert': pool_client_cert, 'ca_cert': pool_ca_cert, 'crl': pool_crl}}) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_transform_session_persistence(self): in_persistence = ( sample_configs_combined.sample_session_persistence_tuple()) ret = self.jinja_cfg._transform_session_persistence( in_persistence, {}) self.assertEqual(sample_configs_combined.RET_PERSISTENCE, ret) def test_transform_health_monitor(self): in_persistence = sample_configs_combined.sample_health_monitor_tuple() ret = self.jinja_cfg._transform_health_monitor(in_persistence, {}) self.assertEqual(sample_configs_combined.RET_MONITOR_1, ret) def test_transform_member(self): in_member = sample_configs_combined.sample_member_tuple( 'sample_member_id_1', '10.0.0.99') ret = self.jinja_cfg._transform_member(in_member, {}) self.assertEqual(sample_configs_combined.RET_MEMBER_1, ret) def test_transform_pool(self): in_pool = sample_configs_combined.sample_pool_tuple() ret = self.jinja_cfg._transform_pool(in_pool, {}) self.assertEqual(sample_configs_combined.RET_POOL_1, ret) def test_transform_pool_2(self): in_pool = sample_configs_combined.sample_pool_tuple(sample_pool=2) ret = self.jinja_cfg._transform_pool(in_pool, {}) self.assertEqual(sample_configs_combined.RET_POOL_2, ret) def test_transform_pool_http_reuse(self): in_pool = sample_configs_combined.sample_pool_tuple(sample_pool=2) ret = self.jinja_cfg._transform_pool( in_pool, {constants.HTTP_REUSE: True}) expected_config = copy.copy(sample_configs_combined.RET_POOL_2) expected_config[constants.HTTP_REUSE] = True self.assertEqual(expected_config, ret) def test_transform_pool_cert(self): in_pool = sample_configs_combined.sample_pool_tuple(pool_cert=True) cert_path = os.path.join(self.jinja_cfg.base_crt_dir, 'test_listener_id', 'pool_cert.pem') ret = self.jinja_cfg._transform_pool( in_pool, {}, pool_tls_certs={'client_cert': cert_path}) expected_config = copy.copy(sample_configs_combined.RET_POOL_1) expected_config['client_cert'] = cert_path self.assertEqual(expected_config, ret) def test_transform_listener(self): in_listener = sample_configs_combined.sample_listener_tuple() ret = self.jinja_cfg._transform_listener(in_listener, None, {}, in_listener.load_balancer) self.assertEqual(sample_configs_combined.RET_LISTENER, ret) def test_transform_listener_with_l7(self): in_listener = sample_configs_combined.sample_listener_tuple(l7=True) ret = self.jinja_cfg._transform_listener(in_listener, None, {}, in_listener.load_balancer) self.assertEqual(sample_configs_combined.RET_LISTENER_L7, ret) def test_transform_loadbalancer(self): in_amphora = sample_configs_combined.sample_amphora_tuple() in_listener = sample_configs_combined.sample_listener_tuple() ret = self.jinja_cfg._transform_loadbalancer( in_amphora, in_listener.load_balancer, [in_listener], None, {}) self.assertEqual(sample_configs_combined.RET_LB, ret) def test_transform_two_loadbalancers(self): in_amphora = sample_configs_combined.sample_amphora_tuple() in_listener1 = sample_configs_combined.sample_listener_tuple() in_listener2 = sample_configs_combined.sample_listener_tuple() ret = self.jinja_cfg._transform_loadbalancer( in_amphora, in_listener1.load_balancer, [in_listener1, in_listener2], None, {}) self.assertEqual(ret['global_connection_limit'], constants.HAPROXY_DEFAULT_MAXCONN + constants.HAPROXY_DEFAULT_MAXCONN) def test_transform_many_loadbalancers(self): in_amphora = sample_configs_combined.sample_amphora_tuple() in_listeners = [] # Create many listeners, until the sum of connection_limits # is greater than MAX_MAXCONN connection_limit_sum = 0 while connection_limit_sum <= constants.HAPROXY_MAX_MAXCONN: in_listener = ( sample_configs_combined.sample_listener_tuple()) connection_limit_sum += constants.HAPROXY_DEFAULT_MAXCONN in_listeners.append(in_listener) ret = self.jinja_cfg._transform_loadbalancer( in_amphora, in_listeners[0].load_balancer, in_listeners, None, {}) self.assertEqual(ret['global_connection_limit'], constants.HAPROXY_MAX_MAXCONN) self.assertLess(ret['global_connection_limit'], connection_limit_sum) def test_transform_amphora(self): in_amphora = sample_configs_combined.sample_amphora_tuple() ret = self.jinja_cfg._transform_amphora(in_amphora, {}) self.assertEqual(sample_configs_combined.RET_AMPHORA, ret) def test_transform_loadbalancer_with_l7(self): in_amphora = sample_configs_combined.sample_amphora_tuple() in_listener = sample_configs_combined.sample_listener_tuple(l7=True) ret = self.jinja_cfg._transform_loadbalancer( in_amphora, in_listener.load_balancer, [in_listener], None, {}) self.assertEqual(sample_configs_combined.RET_LB_L7, ret) def test_transform_l7policy(self): in_l7policy = sample_configs_combined.sample_l7policy_tuple( 'sample_l7policy_id_1') ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_combined.RET_L7POLICY_1, ret) def test_transform_l7policy_2_8(self): in_l7policy = sample_configs_combined.sample_l7policy_tuple( 'sample_l7policy_id_2', sample_policy=2) ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_combined.RET_L7POLICY_2, ret) # test invalid action without redirect_http_code in_l7policy = sample_configs_combined.sample_l7policy_tuple( 'sample_l7policy_id_8', sample_policy=2, redirect_http_code=None) ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_combined.RET_L7POLICY_8, ret) def test_transform_l7policy_disabled_rule(self): in_l7policy = sample_configs_combined.sample_l7policy_tuple( 'sample_l7policy_id_6', sample_policy=6) ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_combined.RET_L7POLICY_6, ret) def test_escape_haproxy_config_string(self): self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string_with_none'), 'string_with_none') self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string with spaces'), 'string\\ with\\ spaces') self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string\\with\\backslashes'), 'string\\\\with\\\\backslashes') self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string\\ with\\ all'), 'string\\\\\\ with\\\\\\ all') def test_render_template_no_log(self): j_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs', connection_logging=False) defaults = ("defaults\n" " no log\n" " retries 3\n" " option redispatch\n" " option splice-request\n" " option splice-response\n" " option http-keep-alive\n\n") rendered_obj = j_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple()] ) self.assertEqual( sample_configs_combined.sample_base_expected_config( defaults=defaults, logging="\n"), rendered_obj) def test_http_reuse(self): j_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs') sample_amphora = sample_configs_combined.sample_amphora_tuple() sample_proxy_listener = sample_configs_combined.sample_listener_tuple( be_proto='PROXY') # With http-reuse be = ("backend {pool_id}:{listener_id}\n" " mode http\n" " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1 send-proxy\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2 send-proxy\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, pool_id=sample_proxy_listener.default_pool.id, listener_id=sample_proxy_listener.id) rendered_obj = j_cfg.build_config( sample_amphora, [sample_proxy_listener], tls_certs=None, haproxy_versions=("1", "8", "1")) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) # Without http-reuse be = ("backend {pool_id}:{listener_id}\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1 send-proxy\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2 send-proxy\n\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, pool_id=sample_proxy_listener.default_pool.id, listener_id=sample_proxy_listener.id) rendered_obj = j_cfg.build_config( sample_amphora, [sample_proxy_listener], tls_certs=None, haproxy_versions=("1", "5", "18")) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), rendered_obj) def test_ssl_types_l7rules(self): j_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs') fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " redirect scheme https if !{{ ssl_fc }}\n" " bind 10.0.0.2:443 ciphers {ciphers}\n" " mode http\n" " acl sample_l7rule_id_1 path -m beg /api\n" " use_backend sample_pool_id_2:sample_listener_id_1" " if sample_l7rule_id_1\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 location http://www.example.com " "if !sample_l7rule_id_2 sample_l7rule_id_3\n" " acl sample_l7rule_id_4 path_end -m str jpg\n" " acl sample_l7rule_id_5 req.hdr(host) -i -m end " ".example.com\n" " http-request deny " "if sample_l7rule_id_4 sample_l7rule_id_5\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 prefix https://example.com " "if !sample_l7rule_id_2 sample_l7rule_id_3\n" " acl sample_l7rule_id_7 ssl_c_used\n" " acl sample_l7rule_id_8 ssl_c_verify eq 1\n" " acl sample_l7rule_id_9 ssl_c_s_dn(STREET) -m reg " "^STREET.*NO\\\\.$\n" " acl sample_l7rule_id_10 ssl_c_s_dn(OU-3) -m beg " "Orgnization\\ Bala\n" " acl sample_l7rule_id_11 path -m beg /api\n" " redirect code 302 location " "http://www.ssl-type-l7rule-test.com " "if sample_l7rule_id_7 !sample_l7rule_id_8 !sample_l7rule_id_9 " "!sample_l7rule_id_10 sample_l7rule_id_11\n" " default_backend sample_pool_id_1:sample_listener_id_1\n" " timeout client 50000\n".format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN, ciphers=constants.CIPHERS_OWASP_SUITE_B)) be = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_2\n\n" "backend sample_pool_id_2:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /healthmon.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_3 10.0.0.97:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_3\n\n".format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN)) sample_listener = sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_TERMINATED_HTTPS, l7=True, ssl_type_l7=True) rendered_obj = j_cfg.build_config( sample_configs_combined.sample_amphora_tuple(), [sample_listener], tls_certs=None, haproxy_versions=("1", "5", "18")) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/split_listeners/0000775000175000017500000000000000000000000026241 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/split_listeners/__init__.py0000664000175000017500000000000000000000000030340 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/haproxy/split_listeners/test_jinja_cfg.py0000664000175000017500000015424200000000000031574 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.common import constants from octavia.common.jinja.haproxy.split_listeners import jinja_cfg from octavia.tests.unit import base from octavia.tests.unit.common.sample_configs import sample_configs_split CONF = cfg.CONF class TestHaproxyCfg(base.TestCase): def setUp(self): super(TestHaproxyCfg, self).setUp() self.jinja_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs') def test_get_template(self): template = self.jinja_cfg._get_template() self.assertEqual('haproxy.cfg.j2', template.name) def test_render_template_tls(self): conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') FAKE_CRT_LIST_FILENAME = os.path.join( CONF.haproxy_amphora.base_cert_dir, 'sample_listener_id_1/sample_listener_id_1.pem') fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " redirect scheme https if !{{ ssl_fc }}\n" " bind 10.0.0.2:443 " "ssl crt-list {crt_list} " "ca-file /var/lib/octavia/certs/sample_listener_id_1/" "client_ca.pem verify required crl-file /var/lib/octavia/" "certs/sample_listener_id_1/SHA_ID.pem\n" " mode http\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN, crt_list=FAKE_CRT_LIST_FILENAME) be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True, sni=True, client_ca_cert=True, client_crl_cert=True), client_ca_filename='client_ca.pem', client_crl='SHA_ID.pem') self.assertEqual( sample_configs_split.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_tls_no_sni(self): conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') FAKE_CRT_LIST_FILENAME = os.path.join( CONF.haproxy_amphora.base_cert_dir, 'sample_listener_id_1/sample_listener_id_1.pem') fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " redirect scheme https if !{{ ssl_fc }}\n" " bind 10.0.0.2:443 " "ssl crt-list {crt_list}\n" " mode http\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN, crt_list=FAKE_CRT_LIST_FILENAME) be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True)) self.assertEqual( sample_configs_split.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple()) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_member_backup(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_2 backup\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( monitor_ip_port=True, backup_member=True)) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_custom_timeouts(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1\n" " timeout client 2\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 1\n" " timeout server 3\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( timeout_member_connect=1, timeout_client_data=2, timeout_member_data=3)) self.assertEqual( sample_configs_split.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_null_timeouts(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( timeout_member_connect=None, timeout_client_data=None, timeout_member_data=None)) self.assertEqual( sample_configs_split.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_member_monitor_addr_port(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "addr 192.168.1.1 port 9000 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple(monitor_ip_port=True)) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_https_real_monitor(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check check-ssl verify none inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check check-ssl verify none inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple(proto='HTTPS')) self.assertEqual(sample_configs_split.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_https_hello_monitor(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option ssl-hello-chk\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTPS', monitor_proto='TLS-HELLO')) self.assertEqual(sample_configs_split.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_no_monitor_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTP', monitor=False)) self.assertEqual(sample_configs_split.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_disabled_member(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2 disabled\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTP', monitor=False, disabled_member=True)) self.assertEqual(sample_configs_split.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_ping_monitor_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option external-check\n" " external-check command /var/lib/octavia/ping-wrapper.sh\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) go = " maxconn {maxconn}\n external-check\n\n".format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTP', monitor_proto='PING')) self.assertEqual(sample_configs_split.sample_base_expected_config( backend=be, global_opts=go), rendered_obj) def test_render_template_no_monitor_https(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTPS', monitor=False)) self.assertEqual(sample_configs_split.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_health_monitor_http_check(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.1\\r\\nHost:\\ " "testlab.com\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTP', monitor_proto='HTTP', hm_host_http_check=True)) self.assertEqual(sample_configs_split.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_no_persistence_https(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13\n\n").format(maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTPS', monitor=False, persistence=False)) self.assertEqual(sample_configs_split.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_no_persistence_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13\n\n").format(maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTP', monitor=False, persistence=False)) self.assertEqual(sample_configs_split.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_sourceip_persistence(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " stick-table type ip size 10k\n" " stick on src\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( persistence_type='SOURCE_IP')) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_appcookie_persistence(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " stick-table type string len 64 size 10k\n" " stick store-response res.cook(JSESSIONID)\n" " stick match req.cook(JSESSIONID)\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( persistence_type='APP_COOKIE', persistence_cookie='JSESSIONID')) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_unlimited_connections(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTPS', monitor=False)) self.assertEqual(sample_configs_split.sample_base_expected_config( frontend=fe, logging=lg, backend=be), rendered_obj) def test_render_template_limited_connections(self): fe = ("frontend sample_listener_id_1\n" " maxconn 2014\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n") lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " fullconn 2014\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n") g_opts = " maxconn 2014\n\n" rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( proto='HTTPS', monitor=False, connection_limit=2014)) self.assertEqual(sample_configs_split.sample_base_expected_config( frontend=fe, logging=lg, backend=be, global_opts=g_opts), rendered_obj) def test_render_template_l7policies(self): fe = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " acl sample_l7rule_id_1 path -m beg /api\n" " use_backend sample_pool_id_2 if sample_l7rule_id_1\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 location http://www.example.com if " "!sample_l7rule_id_2 sample_l7rule_id_3\n" " acl sample_l7rule_id_4 path_end -m str jpg\n" " acl sample_l7rule_id_5 req.hdr(host) -i -m end " ".example.com\n" " http-request deny if sample_l7rule_id_4 " "sample_l7rule_id_5\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 prefix https://example.com if " "!sample_l7rule_id_2 sample_l7rule_id_3\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_2\n" "\n" "backend sample_pool_id_2\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /healthmon.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_3 10.0.0.97:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_3\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple(l7=True)) self.assertEqual(sample_configs_split.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_http_xff(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " option forwardfor\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( insert_headers={'X-Forwarded-For': 'true'})) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_http_xff_xfport(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " option forwardfor\n" " http-request set-header X-Forwarded-Port %[dst_port]\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( insert_headers={'X-Forwarded-For': 'true', 'X-Forwarded-Port': 'true'})) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_pool_proxy_protocol(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1 send-proxy\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2 send-proxy\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( be_proto='PROXY')) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_pool_cert(self): cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, 'sample_listener_id_1', 'fake path') be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " "{opts}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " "{opts}\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN, opts="ssl crt %s verify none sni ssl_fc_sni" % cert_file_path) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( pool_cert=True, tls_enabled=True), pool_tls_certs={ 'sample_pool_id_1': {'client_cert': cert_file_path, 'ca_cert': None, 'crl': None}}) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_with_full_pool_cert(self): pool_client_cert = '/foo/cert.pem' pool_ca_cert = '/foo/ca.pem' pool_crl = '/foo/crl.pem' be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " "{opts}\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " "{opts}\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN, opts="%s %s %s %s %s %s" % ( "ssl", "crt", pool_client_cert, "ca-file %s" % pool_ca_cert, "crl-file %s" % pool_crl, "verify required sni ssl_fc_sni")) rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True, tls_enabled=True), pool_tls_certs={ 'sample_pool_id_1': {'client_cert': pool_client_cert, 'ca_cert': pool_ca_cert, 'crl': pool_crl}}) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_transform_session_persistence(self): in_persistence = ( sample_configs_split.sample_session_persistence_tuple()) ret = self.jinja_cfg._transform_session_persistence( in_persistence, {}) self.assertEqual(sample_configs_split.RET_PERSISTENCE, ret) def test_transform_health_monitor(self): in_persistence = sample_configs_split.sample_health_monitor_tuple() ret = self.jinja_cfg._transform_health_monitor(in_persistence, {}) self.assertEqual(sample_configs_split.RET_MONITOR_1, ret) def test_transform_member(self): in_member = sample_configs_split.sample_member_tuple( 'sample_member_id_1', '10.0.0.99') ret = self.jinja_cfg._transform_member(in_member, {}) self.assertEqual(sample_configs_split.RET_MEMBER_1, ret) def test_transform_pool(self): in_pool = sample_configs_split.sample_pool_tuple() ret = self.jinja_cfg._transform_pool(in_pool, {}) self.assertEqual(sample_configs_split.RET_POOL_1, ret) def test_transform_pool_2(self): in_pool = sample_configs_split.sample_pool_tuple(sample_pool=2) ret = self.jinja_cfg._transform_pool(in_pool, {}) self.assertEqual(sample_configs_split.RET_POOL_2, ret) def test_transform_pool_http_reuse(self): in_pool = sample_configs_split.sample_pool_tuple(sample_pool=2) ret = self.jinja_cfg._transform_pool( in_pool, {constants.HTTP_REUSE: True}) expected_config = copy.copy(sample_configs_split.RET_POOL_2) expected_config[constants.HTTP_REUSE] = True self.assertEqual(expected_config, ret) def test_transform_pool_cert(self): in_pool = sample_configs_split.sample_pool_tuple(pool_cert=True) cert_path = os.path.join(self.jinja_cfg.base_crt_dir, 'test_listener_id', 'pool_cert.pem') ret = self.jinja_cfg._transform_pool( in_pool, {}, pool_tls_certs={'client_cert': cert_path}) expected_config = copy.copy(sample_configs_split.RET_POOL_1) expected_config['client_cert'] = cert_path self.assertEqual(expected_config, ret) def test_transform_listener(self): in_listener = sample_configs_split.sample_listener_tuple() ret = self.jinja_cfg._transform_listener(in_listener, {}, in_listener.load_balancer) self.assertEqual(sample_configs_split.RET_LISTENER, ret) def test_transform_listener_with_l7(self): in_listener = sample_configs_split.sample_listener_tuple(l7=True) ret = self.jinja_cfg._transform_listener(in_listener, {}, in_listener.load_balancer) self.assertEqual(sample_configs_split.RET_LISTENER_L7, ret) def test_transform_loadbalancer(self): in_amphora = sample_configs_split.sample_amphora_tuple() in_listener = sample_configs_split.sample_listener_tuple() ret = self.jinja_cfg._transform_loadbalancer( in_amphora, in_listener.load_balancer, in_listener, {}) self.assertEqual(sample_configs_split.RET_LB, ret) def test_transform_amphora(self): in_amphora = sample_configs_split.sample_amphora_tuple() ret = self.jinja_cfg._transform_amphora(in_amphora, {}) self.assertEqual(sample_configs_split.RET_AMPHORA, ret) def test_transform_loadbalancer_with_l7(self): in_amphora = sample_configs_split.sample_amphora_tuple() in_listener = sample_configs_split.sample_listener_tuple(l7=True) ret = self.jinja_cfg._transform_loadbalancer( in_amphora, in_listener.load_balancer, in_listener, {}) self.assertEqual(sample_configs_split.RET_LB_L7, ret) def test_transform_l7policy(self): in_l7policy = sample_configs_split.sample_l7policy_tuple( 'sample_l7policy_id_1') ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_split.RET_L7POLICY_1, ret) def test_transform_l7policy_2_8(self): in_l7policy = sample_configs_split.sample_l7policy_tuple( 'sample_l7policy_id_2', sample_policy=2) ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_split.RET_L7POLICY_2, ret) # test invalid action without redirect_http_code in_l7policy = sample_configs_split.sample_l7policy_tuple( 'sample_l7policy_id_8', sample_policy=2, redirect_http_code=None) ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_split.RET_L7POLICY_8, ret) def test_transform_l7policy_disabled_rule(self): in_l7policy = sample_configs_split.sample_l7policy_tuple( 'sample_l7policy_id_6', sample_policy=6) ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}) self.assertEqual(sample_configs_split.RET_L7POLICY_6, ret) def test_escape_haproxy_config_string(self): self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string_with_none'), 'string_with_none') self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string with spaces'), 'string\\ with\\ spaces') self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string\\with\\backslashes'), 'string\\\\with\\\\backslashes') self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( 'string\\ with\\ all'), 'string\\\\\\ with\\\\\\ all') def test_render_template_no_log(self): j_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs', connection_logging=False) defaults = ("defaults\n" " no log\n" " retries 3\n" " option redispatch\n" " option splice-request\n" " option splice-response\n" " option http-keep-alive\n\n") rendered_obj = j_cfg.render_loadbalancer_obj( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple() ) self.assertEqual( sample_configs_split.sample_base_expected_config( defaults=defaults, logging="\n"), rendered_obj) def test_http_reuse(self): j_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs') # With http-reuse be = ("backend sample_pool_id_1\n" " mode http\n" " http-reuse safe\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1 send-proxy\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2 send-proxy\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = j_cfg.build_config( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple(be_proto='PROXY'), haproxy_versions=("1", "8", "1")) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) # Without http-reuse be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_1 send-proxy\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 rise 2 " "cookie sample_member_id_2 send-proxy\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) rendered_obj = j_cfg.build_config( sample_configs_split.sample_amphora_tuple(), sample_configs_split.sample_listener_tuple(be_proto='PROXY'), haproxy_versions=("1", "5", "18")) self.assertEqual( sample_configs_split.sample_base_expected_config(backend=be), rendered_obj) def test_ssl_types_l7rules(self): j_cfg = jinja_cfg.JinjaTemplater( base_amp_path='/var/lib/octavia', base_crt_dir='/var/lib/octavia/certs') fe = ("frontend sample_listener_id_1\n" " maxconn 1000000\n" " redirect scheme https if !{ ssl_fc }\n" " bind 10.0.0.2:443\n" " mode http\n" " acl sample_l7rule_id_1 path -m beg /api\n" " use_backend sample_pool_id_2 if sample_l7rule_id_1\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 location http://www.example.com " "if !sample_l7rule_id_2 sample_l7rule_id_3\n" " acl sample_l7rule_id_4 path_end -m str jpg\n" " acl sample_l7rule_id_5 req.hdr(host) -i -m end " ".example.com\n" " http-request deny " "if sample_l7rule_id_4 sample_l7rule_id_5\n" " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " "This\\ string\\\\\\ with\\ stuff\n" " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " "this.*|that\n" " redirect code 302 prefix https://example.com " "if !sample_l7rule_id_2 sample_l7rule_id_3\n" " acl sample_l7rule_id_7 ssl_c_used\n" " acl sample_l7rule_id_8 ssl_c_verify eq 1\n" " acl sample_l7rule_id_9 ssl_c_s_dn(STREET) -m reg " "^STREET.*NO\\\\.$\n" " acl sample_l7rule_id_10 ssl_c_s_dn(OU-3) -m beg " "Orgnization\\ Bala\n" " acl sample_l7rule_id_11 path -m beg /api\n" " redirect code 302 location " "http://www.ssl-type-l7rule-test.com " "if sample_l7rule_id_7 !sample_l7rule_id_8 !sample_l7rule_id_9 " "!sample_l7rule_id_10 sample_l7rule_id_11\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n") be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn 1000000\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_2\n\n" "backend sample_pool_id_2\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /healthmon.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn 1000000\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_3 10.0.0.97:82 weight 13 check " "inter 30s fall 3 rise 2 cookie sample_member_id_3\n\n") sample_listener = sample_configs_split.sample_listener_tuple( proto=constants.PROTOCOL_TERMINATED_HTTPS, l7=True, ssl_type_l7=True) rendered_obj = j_cfg.build_config( sample_configs_split.sample_amphora_tuple(), sample_listener, haproxy_versions=("1", "5", "18")) self.assertEqual( sample_configs_split.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/jinja/logging/0000775000175000017500000000000000000000000022752 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/logging/__init__.py0000664000175000017500000000000000000000000025051 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/logging/test_logging_jinja_cfg.py0000664000175000017500000000615600000000000030013 0ustar00zuulzuul00000000000000# Copyright 2018 Rackspace, US Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia_lib.common import constants as lib_consts from oslo_config import cfg from oslo_config import fixture as oslo_fixture from octavia.common.jinja.logging import logging_jinja_cfg import octavia.tests.unit.base as base class LoggingJinjaTestCase(base.TestCase): def test_build_agent_config(self): conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(debug=False) conf.config( group="amphora_agent", admin_log_targets='192.0.2.17:10514,192.51.100.4:10514') conf.config( group="amphora_agent", tenant_log_targets='192.0.2.7:20514,192.51.100.9:20514') conf.config(group="amphora_agent", log_protocol=lib_consts.PROTOCOL_UDP) conf.config(group="amphora_agent", log_retry_count=5) conf.config(group="amphora_agent", log_retry_interval=2) conf.config(group="amphora_agent", log_queue_size=10000) lj = logging_jinja_cfg.LoggingJinjaTemplater() expected_config = ( u'local0.=info action(type="omfwd" target="192.0.2.7" ' 'port="20514" protocol="UDP" action.resumeRetryCount="5" ' 'action.resumeInterval="2" queue.type="linkedList" ' 'queue.size="10000" )&action(type="omfwd" target="192.51.100.9" ' 'port="20514" protocol="UDP" action.resumeRetryCount="5" ' 'action.resumeInterval="2" queue.type="linkedList" ' 'queue.size="10000" action.execOnlyWhenPreviousIsSuspended="on")\n' 'local1.* action(type="omfwd" target="192.0.2.17" port="10514" ' 'protocol="UDP" action.resumeRetryCount="5" ' 'action.resumeInterval="2" queue.type="linkedList" ' 'queue.size="10000" )&action(type="omfwd" target="192.51.100.4" ' 'port="10514" protocol="UDP" action.resumeRetryCount="5" ' 'action.resumeInterval="2" queue.type="linkedList" ' 'queue.size="10000" action.execOnlyWhenPreviousIsSuspended="on")') logging_cfg = lj.build_logging_config() self.assertEqual(expected_config, logging_cfg) def test_build_agent_config_disable_logs(self): conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(debug=False) conf.config( group="amphora_agent", disable_local_log_storage=True) lj = logging_jinja_cfg.LoggingJinjaTemplater() expected_config = ( u'*.* stop') logging_cfg = lj.build_logging_config() self.assertEqual(expected_config, logging_cfg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/jinja/lvs/0000775000175000017500000000000000000000000022130 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/lvs/__init__.py0000664000175000017500000000000000000000000024227 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py0000664000175000017500000005077300000000000026353 0ustar00zuulzuul00000000000000# Copyright 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common import constants from octavia.common.jinja.lvs import jinja_cfg from octavia.tests.unit import base from octavia.tests.unit.common.sample_configs import sample_configs_combined from oslo_config import cfg from oslo_config import fixture as oslo_fixture BASE_PATH = '/var/lib/octavia' class TestLvsCfg(base.TestCase): def setUp(self): super(TestLvsCfg, self).setUp() self.udp_jinja_cfg = jinja_cfg.LvsJinjaTemplater() conf = oslo_fixture.Config(cfg.CONF) conf.config(group="haproxy_amphora", base_path=BASE_PATH) def test_udp_get_template(self): template = self.udp_jinja_cfg._get_template() self.assertEqual('keepalivedlvs.cfg.j2', template.name) def test_render_template_udp_source_ip(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n" " persistence_timeout 33\n" " persistence_granularity 255.255.0.0\n" " delay_loop 30\n" " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" " # Configuration for Member sample_member_id_1\n" " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.99 82\"\n" " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.98 82\"\n" " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.255.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, connection_limit=98)) self.assertEqual(exp, rendered_obj) def test_render_template_udp_one_packet(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" " # Configuration for Member sample_member_id_1\n" " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.99 82\"\n" " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.98 82\"\n" " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") listener = sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, connection_limit=98, persistence=False) rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj(listener) self.assertEqual(exp, rendered_obj) def test_render_template_udp_with_health_monitor(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" " # Configuration for Member sample_member_id_1\n" " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.99 82\"\n" " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.98 82\"\n" " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, persistence=False, connection_limit=98)) self.assertEqual(exp, rendered_obj) def test_render_template_udp_with_health_monitor_ip_port(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" " # Configuration for Member sample_member_id_1\n" " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 192.168.1.1 9000\"\n" " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 192.168.1.1 9000\"\n" " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor_ip_port=True, monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, persistence=False, connection_limit=98)) self.assertEqual(exp, rendered_obj) def test_render_template_udp_no_other_resources(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n\n") rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor=False, persistence=False, alloc_default_pool=False)) self.assertEqual(exp, rendered_obj) def test_render_template_udp_with_pool_no_member(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n\n\n" " # Configuration for Pool sample_pool_id_0\n" "}\n\n") rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor=False, persistence=False, alloc_default_pool=True, sample_default_pool=0)) self.assertEqual(exp, rendered_obj) def test_render_template_udp_with_disabled_pool(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n\n\n" " # Pool sample_pool_id_1 is disabled\n" " # Configuration for Member sample_member_id_1\n" " real_server 10.0.0.99 82 {\n" " weight 13\n\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n\n" " }\n\n" "}\n\n") rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor=False, persistence=False, alloc_default_pool=True, pool_enabled=False)) self.assertEqual(exp, rendered_obj) def test_udp_transform_session_persistence(self): persistence_src_ip = ( sample_configs_combined.sample_session_persistence_tuple( persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_cookie=None, persistence_timeout=33, persistence_granularity='255.0.0.0' )) exp = sample_configs_combined.UDP_SOURCE_IP_BODY ret = self.udp_jinja_cfg._transform_session_persistence( persistence_src_ip) self.assertEqual(exp, ret) def test_udp_transform_health_monitor(self): in_hm = sample_configs_combined.sample_health_monitor_tuple( proto=constants.HEALTH_MONITOR_UDP_CONNECT ) ret = self.udp_jinja_cfg._transform_health_monitor(in_hm) self.assertEqual(sample_configs_combined.RET_UDP_HEALTH_MONITOR, ret) def test_udp_transform_member(self): in_member = sample_configs_combined.sample_member_tuple( 'member_id_1', '192.0.2.10') ret = self.udp_jinja_cfg._transform_member(in_member) self.assertEqual(sample_configs_combined.RET_UDP_MEMBER, ret) in_member = sample_configs_combined.sample_member_tuple( 'member_id_1', '192.0.2.10', monitor_ip_port=True) ret = self.udp_jinja_cfg._transform_member(in_member) self.assertEqual( sample_configs_combined.RET_UDP_MEMBER_MONITOR_IP_PORT, ret) def test_udp_transform_pool(self): in_pool = sample_configs_combined.sample_pool_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.0.0.0', ) ret = self.udp_jinja_cfg._transform_pool(in_pool) self.assertEqual(sample_configs_combined.RET_UDP_POOL, ret) in_pool = sample_configs_combined.sample_pool_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.0.0.0', monitor=False) sample_configs_combined.RET_UDP_POOL['health_monitor'] = '' ret = self.udp_jinja_cfg._transform_pool(in_pool) self.assertEqual(sample_configs_combined.RET_UDP_POOL, ret) def test_udp_transform_listener(self): in_listener = sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.0.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, connection_limit=98 ) ret = self.udp_jinja_cfg._transform_listener(in_listener) self.assertEqual(sample_configs_combined.RET_UDP_LISTENER, ret) in_listener = sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.0.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, connection_limit=-1) ret = self.udp_jinja_cfg._transform_listener(in_listener) sample_configs_combined.RET_UDP_LISTENER.pop('connection_limit') self.assertEqual(sample_configs_combined.RET_UDP_LISTENER, ret) def test_render_template_udp_listener_with_http_health_monitor(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" " # Configuration for Member sample_member_id_1\n" " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" " HTTP_GET {\n" " url {\n" " path /index.html\n" " status_code 200\n" " }\n" " url {\n" " path /index.html\n" " status_code 201\n" " }\n" " connect_ip 10.0.0.99\n" " connect_port 82\n" " connect_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" " HTTP_GET {\n" " url {\n" " path /index.html\n" " status_code 200\n" " }\n" " url {\n" " path /index.html\n" " status_code 201\n" " }\n" " connect_ip 10.0.0.98\n" " connect_port 82\n" " connect_timeout 31\n" " }\n" " }\n\n" "}\n\n") listener = sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor_proto=constants.HEALTH_MONITOR_HTTP, connection_limit=98, persistence=False, monitor_expected_codes='200-201') rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj(listener) self.assertEqual(exp, rendered_obj) def test_render_template_udp_listener_with_tcp_health_monitor(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Configuration for Listener sample_listener_id_1\n\n" "net_namespace amphora-haproxy\n\n" "virtual_server 10.0.0.2 80 {\n" " lb_algo wrr\n" " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" " # Configuration for Member sample_member_id_1\n" " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" " TCP_CHECK {\n" " connect_ip 10.0.0.99\n" " connect_port 82\n" " connect_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" " TCP_CHECK {\n" " connect_ip 10.0.0.98\n" " connect_port 82\n" " connect_timeout 31\n" " }\n" " }\n\n" "}\n\n") listener = sample_configs_combined.sample_listener_tuple( proto=constants.PROTOCOL_UDP, monitor_proto=constants.HEALTH_MONITOR_TCP, connection_limit=98, persistence=False) rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj(listener) self.assertEqual(exp, rendered_obj) def test_render_template_disabled_udp_listener(self): exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" "# Listener sample_listener_id_1 is disabled\n\n" "net_namespace amphora-haproxy\n\n") rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_listener_tuple( enabled=False, proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.255.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, connection_limit=98)) self.assertEqual(exp, rendered_obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/jinja/test_user_data_jinja_cfg.py0000664000175000017500000000434500000000000026704 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from octavia.common.jinja import user_data_jinja_cfg import octavia.tests.unit.base as base TEST_CONFIG = ('[DEFAULT]\n' 'debug = False\n' '[haproxy_amphora]\n' 'base_cert_dir = /var/lib/octavia/certs\n') EXPECTED_TEST_CONFIG = (' [DEFAULT]\n' ' debug = False\n' ' [haproxy_amphora]\n' ' base_cert_dir = /var/lib/octavia/certs\n\n') BASE_CFG = ('#cloud-config\n' '# vim: syntax=yaml\n' '#\n' '# This configuration with take user-data dict and ' 'build a cloud-init\n' '# script utilizing the write_files module. ' 'The user-data dict should be a\n' '# Key Value pair where the Key is the path to store the ' 'file and the Value\n' '# is the data to store at that location\n' '#\n' '# Example:\n' '# {\'/root/path/to/file.cfg\': \'I\'m a file, ' 'write things in me\'}\n' 'write_files:\n') RUN_CMD = ('runcmd:\n' '- service amphora-agent restart') class TestUserDataJinjaCfg(base.TestCase): def setUp(self): super(TestUserDataJinjaCfg, self).setUp() def test_build_user_data_config(self): udc = user_data_jinja_cfg.UserDataJinjaCfg() expected_config = (BASE_CFG + '- path: /test/config/path\n' ' content: |\n' + EXPECTED_TEST_CONFIG + RUN_CMD) ud_cfg = udc.build_user_data_config({'/test/config/path': TEST_CONFIG}) self.assertEqual(expected_config, ud_cfg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4222167 octavia-6.2.2/octavia/tests/unit/common/sample_configs/0000775000175000017500000000000000000000000023222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/sample_configs/__init__.py0000664000175000017500000000107400000000000025335 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/sample_configs/sample_configs_combined.py0000664000175000017500000012473100000000000030435 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections from oslo_config import cfg from octavia.common import constants from octavia.tests.common import sample_certs CONF = cfg.CONF def sample_amphora_tuple(id='sample_amphora_id_1', lb_network_ip='10.0.1.1', vrrp_ip='10.1.1.1', ha_ip='192.168.10.1', vrrp_port_id='1234', ha_port_id='1234', role=None, status='ACTIVE', vrrp_interface=None, vrrp_priority=None, api_version='1.0'): in_amphora = collections.namedtuple( 'amphora', 'id, lb_network_ip, vrrp_ip, ha_ip, vrrp_port_id, ' 'ha_port_id, role, status, vrrp_interface,' 'vrrp_priority, api_version') return in_amphora( id=id, lb_network_ip=lb_network_ip, vrrp_ip=vrrp_ip, ha_ip=ha_ip, vrrp_port_id=vrrp_port_id, ha_port_id=ha_port_id, role=role, status=status, vrrp_interface=vrrp_interface, vrrp_priority=vrrp_priority, api_version=api_version) RET_PERSISTENCE = { 'type': 'HTTP_COOKIE', 'cookie_name': None} RET_MONITOR_1 = { 'id': 'sample_monitor_id_1', 'type': 'HTTP', 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': '/index.html', 'expected_codes': '418', 'enabled': True, 'http_version': 1.0, 'domain_name': None} RET_MONITOR_2 = { 'id': 'sample_monitor_id_2', 'type': 'HTTP', 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': '/healthmon.html', 'expected_codes': '418', 'enabled': True, 'http_version': 1.0, 'domain_name': None} RET_MEMBER_1 = { 'id': 'sample_member_id_1', 'address': '10.0.0.99', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_MEMBER_2 = { 'id': 'sample_member_id_2', 'address': '10.0.0.98', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_MEMBER_3 = { 'id': 'sample_member_id_3', 'address': '10.0.0.97', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_POOL_1 = { 'id': 'sample_pool_id_1', 'protocol': 'http', 'proxy_protocol': False, 'lb_algorithm': 'roundrobin', 'members': [RET_MEMBER_1, RET_MEMBER_2], 'health_monitor': RET_MONITOR_1, 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', 'stick_size': '10k', constants.HTTP_REUSE: False, 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': False, } RET_POOL_2 = { 'id': 'sample_pool_id_2', 'protocol': 'http', 'proxy_protocol': False, 'lb_algorithm': 'roundrobin', 'members': [RET_MEMBER_3], 'health_monitor': RET_MONITOR_2, 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', 'stick_size': '10k', constants.HTTP_REUSE: False, 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': False, } RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem', 'primary_cn': 'FakeCn'} RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2', 'primary_cn': 'FakeCn'} RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3', 'primary_cn': 'FakeCn2'} RET_L7RULE_1 = { 'id': 'sample_l7rule_id_1', 'type': constants.L7RULE_TYPE_PATH, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, 'key': None, 'value': '/api', 'invert': False, 'enabled': True} RET_L7RULE_2 = { 'id': 'sample_l7rule_id_2', 'type': constants.L7RULE_TYPE_HEADER, 'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS, 'key': 'Some-header', 'value': 'This\\ string\\\\\\ with\\ stuff', 'invert': True, 'enabled': True} RET_L7RULE_3 = { 'id': 'sample_l7rule_id_3', 'type': constants.L7RULE_TYPE_COOKIE, 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, 'key': 'some-cookie', 'value': 'this.*|that', 'invert': False, 'enabled': True} RET_L7RULE_4 = { 'id': 'sample_l7rule_id_4', 'type': constants.L7RULE_TYPE_FILE_TYPE, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'key': None, 'value': 'jpg', 'invert': False, 'enabled': True} RET_L7RULE_5 = { 'id': 'sample_l7rule_id_5', 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'key': None, 'value': '.example.com', 'invert': False, 'enabled': True} RET_L7RULE_6 = { 'id': 'sample_l7rule_id_6', 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'key': None, 'value': '.example.com', 'invert': False, 'enabled': False} RET_L7POLICY_1 = { 'id': 'sample_l7policy_id_1', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool': RET_POOL_2, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_1], 'redirect_http_code': None} RET_L7POLICY_2 = { 'id': 'sample_l7policy_id_2', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_pool': None, 'redirect_url': 'http://www.example.com', 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': 302} RET_L7POLICY_3 = { 'id': 'sample_l7policy_id_3', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_4, RET_L7RULE_5], 'redirect_http_code': None} RET_L7POLICY_4 = { 'id': 'sample_l7policy_id_4', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [], 'redirect_http_code': None} RET_L7POLICY_5 = { 'id': 'sample_l7policy_id_5', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': False, 'l7rules': [RET_L7RULE_5], 'redirect_http_code': None} RET_L7POLICY_6 = { 'id': 'sample_l7policy_id_6', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [], 'redirect_http_code': None} RET_L7POLICY_7 = { 'id': 'sample_l7policy_id_7', 'action': constants.L7POLICY_ACTION_REDIRECT_PREFIX, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': 'https://example.com', 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': 302} RET_L7POLICY_8 = { 'id': 'sample_l7policy_id_8', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_pool': None, 'redirect_url': 'http://www.example.com', 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': None} RET_LISTENER = { 'id': 'sample_listener_id_1', 'protocol_port': '80', 'protocol': 'HTTP', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}, 'timeout_client_data': 50000, 'timeout_member_connect': 5000, 'timeout_member_data': 50000, 'timeout_tcp_inspect': 0, } RET_LISTENER_L7 = { 'id': 'sample_listener_id_1', 'protocol_port': '80', 'protocol': 'HTTP', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', 'l7policies': [RET_L7POLICY_1, RET_L7POLICY_2, RET_L7POLICY_3, RET_L7POLICY_4, RET_L7POLICY_5, RET_L7POLICY_6, RET_L7POLICY_7], 'pools': [RET_POOL_1, RET_POOL_2], 'enabled': True, 'insert_headers': {}, 'timeout_client_data': 50000, 'timeout_member_connect': 5000, 'timeout_member_data': 50000, 'timeout_tcp_inspect': 0, } RET_LISTENER_TLS = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol': 'TERMINATED_HTTPS', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'tls_certificate_id': 'cont_id_1', 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', 'default_tls_container': RET_DEF_TLS_CONT, 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}} RET_LISTENER_TLS_SNI = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol': 'TERMINATED_HTTPS', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'tls_certificate_id': 'cont_id_1', 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', 'default_tls_container': RET_DEF_TLS_CONT, 'crt_dir': '/v2/sample_loadbalancer_id_1', 'sni_container_ids': ['cont_id_2', 'cont_id_3'], 'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2], 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}} RET_AMPHORA = { 'id': 'sample_amphora_id_1', 'lb_network_ip': '10.0.1.1', 'vrrp_ip': '10.1.1.1', 'ha_ip': '192.168.10.1', 'vrrp_port_id': '1234', 'ha_port_id': '1234', 'role': None, 'status': 'ACTIVE', 'vrrp_interface': None, 'vrrp_priority': None} RET_LB = { 'host_amphora': RET_AMPHORA, 'id': 'sample_loadbalancer_id_1', 'vip_address': '10.0.0.2', 'listeners': [RET_LISTENER], 'peer_port': 1024, 'topology': 'SINGLE', 'enabled': True, 'global_connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'amphorae': [sample_amphora_tuple()]} RET_LB_L7 = { 'host_amphora': RET_AMPHORA, 'id': 'sample_loadbalancer_id_1', 'vip_address': '10.0.0.2', 'listeners': [RET_LISTENER_L7], 'peer_port': 1024, 'topology': 'SINGLE', 'enabled': True, 'global_connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, 'amphorae': [sample_amphora_tuple()]} UDP_SOURCE_IP_BODY = { 'type': constants.SESSION_PERSISTENCE_SOURCE_IP, 'persistence_timeout': 33, 'persistence_granularity': '255.0.0.0' } RET_UDP_HEALTH_MONITOR = { 'id': 'sample_monitor_id_1', 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'delay': 30, 'timeout': 31, 'enabled': True, 'fall_threshold': 3, 'check_script_path': (CONF.haproxy_amphora.base_path + '/lvs/check/udp_check.sh') } UDP_HEALTH_MONITOR_NO_SCRIPT = { 'id': 'sample_monitor_id_1', 'check_script_path': None, 'delay': 30, 'enabled': True, 'fall_threshold': 3, 'timeout': 31, 'type': 'UDP' } RET_UDP_MEMBER = { 'id': 'member_id_1', 'address': '192.0.2.10', 'protocol_port': 82, 'weight': 13, 'enabled': True, 'monitor_address': None, 'monitor_port': None } RET_UDP_MEMBER_MONITOR_IP_PORT = { 'id': 'member_id_1', 'address': '192.0.2.10', 'protocol_port': 82, 'weight': 13, 'enabled': True, 'monitor_address': '192.168.1.1', 'monitor_port': 9000 } UDP_MEMBER_1 = { 'id': 'sample_member_id_1', 'address': '10.0.0.99', 'enabled': True, 'protocol_port': 82, 'weight': 13, 'monitor_address': None, 'monitor_port': None, } UDP_MEMBER_2 = { 'id': 'sample_member_id_2', 'address': '10.0.0.98', 'enabled': True, 'protocol_port': 82, 'weight': 13, 'monitor_address': None, 'monitor_port': None } RET_UDP_POOL = { 'id': 'sample_pool_id_1', 'enabled': True, 'health_monitor': UDP_HEALTH_MONITOR_NO_SCRIPT, 'lb_algorithm': 'wrr', 'members': [UDP_MEMBER_1, UDP_MEMBER_2], 'protocol': 'udp', 'session_persistence': UDP_SOURCE_IP_BODY } RET_UDP_LISTENER = { 'connection_limit': 98, 'default_pool': { 'id': 'sample_pool_id_1', 'enabled': True, 'health_monitor': RET_UDP_HEALTH_MONITOR, 'lb_algorithm': 'wrr', 'members': [UDP_MEMBER_1, UDP_MEMBER_2], 'protocol': 'udp', 'session_persistence': UDP_SOURCE_IP_BODY }, 'enabled': True, 'id': 'sample_listener_id_1', 'protocol_mode': 'udp', 'protocol_port': '80' } def sample_listener_loadbalancer_tuple( proto=None, topology=None, enabled=True, pools=None): proto = 'HTTP' if proto is None else proto if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE in_lb = collections.namedtuple( 'load_balancer', 'id, name, protocol, vip, amphorae, topology, ' 'pools, listeners, enabled, project_id') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', protocol=proto, vip=sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', lb_network_ip='10.0.1.2', vrrp_ip='10.1.1.2', role=constants.ROLE_BACKUP)] if more_amp else [sample_amphora_tuple()], topology=topology, pools=pools or [], listeners=[], enabled=enabled, project_id='12345', ) def sample_lb_with_udp_listener_tuple( proto=None, topology=None, enabled=True, pools=None): proto = 'HTTP' if proto is None else proto if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE listeners = [sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.255.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)] in_lb = collections.namedtuple( 'load_balancer', 'id, name, protocol, vip, amphorae, topology, ' 'pools, enabled, project_id, listeners') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', protocol=proto, vip=sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', lb_network_ip='10.0.1.2', vrrp_ip='10.1.1.2', role=constants.ROLE_BACKUP)] if more_amp else [sample_amphora_tuple()], topology=topology, listeners=listeners, pools=pools or [], enabled=enabled, project_id='12345' ) def sample_vrrp_group_tuple(): in_vrrp_group = collections.namedtuple( 'vrrp_group', 'load_balancer_id, vrrp_auth_type, vrrp_auth_pass, ' 'advert_int, smtp_server, smtp_connect_timeout, ' 'vrrp_group_name') return in_vrrp_group( vrrp_group_name='sample_loadbalancer_id_1', load_balancer_id='sample_loadbalancer_id_1', vrrp_auth_type='PASS', vrrp_auth_pass='123', advert_int='1', smtp_server='', smtp_connect_timeout='') def sample_vip_tuple(): vip = collections.namedtuple('vip', 'ip_address') return vip(ip_address='10.0.0.2') def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, persistence=True, persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None, tls=False, sni=False, peer_port=None, topology=None, l7=False, enabled=True, insert_headers=None, be_proto=None, monitor_ip_port=False, monitor_proto=None, monitor_expected_codes=None, backup_member=False, disabled_member=False, connection_limit=constants.DEFAULT_CONNECTION_LIMIT, timeout_client_data=50000, timeout_member_connect=5000, timeout_member_data=50000, timeout_tcp_inspect=0, client_ca_cert=False, client_crl_cert=False, ssl_type_l7=False, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, id='sample_listener_id_1', recursive_nest=False, provisioning_status=constants.ACTIVE, tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, backend_tls_ciphers=None, sample_default_pool=1, pool_enabled=True): proto = 'HTTP' if proto is None else proto if be_proto is None: be_proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto if proto != constants.PROTOCOL_TERMINATED_HTTPS: tls_ciphers = None topology = 'SINGLE' if topology is None else topology port = '443' if proto in ['HTTPS', 'TERMINATED_HTTPS'] else '80' peer_port = 1024 if peer_port is None else peer_port insert_headers = insert_headers or {} in_listener = collections.namedtuple( 'listener', 'id, project_id, protocol_port, protocol, default_pool, ' 'connection_limit, tls_certificate_id, ' 'sni_container_ids, default_tls_container, ' 'sni_containers, load_balancer, peer_port, pools, ' 'l7policies, enabled, insert_headers, timeout_client_data,' 'timeout_member_connect, timeout_member_data, ' 'timeout_tcp_inspect, client_ca_tls_certificate_id, ' 'client_ca_tls_certificate, client_authentication, ' 'client_crl_container_id, provisioning_status, ' 'tls_ciphers') if l7: pools = [ sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, enabled=pool_enabled), sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, sample_pool=2, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, enabled=pool_enabled)] l7policies = [ sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), sample_l7policy_tuple('sample_l7policy_id_3', sample_policy=3), sample_l7policy_tuple('sample_l7policy_id_4', sample_policy=4), sample_l7policy_tuple('sample_l7policy_id_5', sample_policy=5), sample_l7policy_tuple('sample_l7policy_id_6', sample_policy=6), sample_l7policy_tuple('sample_l7policy_id_7', sample_policy=7)] if ssl_type_l7: l7policies.append(sample_l7policy_tuple( 'sample_l7policy_id_8', sample_policy=8)) else: pools = [ sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, backup_member=backup_member, disabled_member=disabled_member, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, listener_id='sample_listener_id_1', tls_ciphers=backend_tls_ciphers, enabled=pool_enabled)] l7policies = [] listener = in_listener( id=id, project_id='12345', protocol_port=port, protocol=proto, load_balancer=sample_listener_loadbalancer_tuple( proto=proto, topology=topology, pools=pools), peer_port=peer_port, default_pool=sample_pool_tuple( listener_id='sample_listener_id_1', proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, persistence_timeout=persistence_timeout, persistence_granularity=persistence_granularity, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, monitor_expected_codes=monitor_expected_codes, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check, sample_pool=sample_default_pool, enabled=pool_enabled ) if alloc_default_pool else '', connection_limit=connection_limit, tls_certificate_id='cont_id_1' if tls else '', sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [], default_tls_container=sample_tls_container_tuple( id='cont_id_1', certificate=sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN ) if tls else '', sni_containers=[ sample_tls_sni_container_tuple( tls_container_id='cont_id_2', tls_container=sample_tls_container_tuple( id='cont_id_2', certificate=sample_certs.X509_CERT_2, private_key=sample_certs.X509_CERT_KEY_2, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN_2)), sample_tls_sni_container_tuple( tls_container_id='cont_id_3', tls_container=sample_tls_container_tuple( id='cont_id_3', certificate=sample_certs.X509_CERT_3, private_key=sample_certs.X509_CERT_KEY_3, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN_3))] if sni else [], pools=pools, l7policies=l7policies, enabled=enabled, insert_headers=insert_headers, timeout_client_data=timeout_client_data, timeout_member_connect=timeout_member_connect, timeout_member_data=timeout_member_data, timeout_tcp_inspect=timeout_tcp_inspect, client_ca_tls_certificate_id='cont_id_ca' if client_ca_cert else '', client_ca_tls_certificate=sample_tls_container_tuple( id='cont_id_ca', certificate=sample_certs.X509_CA_CERT, primary_cn=sample_certs.X509_CA_CERT_CN ) if client_ca_cert else '', client_authentication=( constants.CLIENT_AUTH_MANDATORY if client_ca_cert else constants.CLIENT_AUTH_NONE), client_crl_container_id='cont_id_crl' if client_crl_cert else '', provisioning_status=provisioning_status, tls_ciphers=tls_ciphers ) if recursive_nest: listener.load_balancer.listeners.append(listener) return listener def sample_tls_sni_container_tuple(tls_container_id=None, tls_container=None): sc = collections.namedtuple('sni_container', 'tls_container_id, ' 'tls_container') return sc(tls_container_id=tls_container_id, tls_container=tls_container) def sample_tls_sni_containers_tuple(tls_container_id=None, tls_container=None): sc = collections.namedtuple('sni_containers', 'tls_container_id, ' 'tls_container') return [sc(tls_container_id=tls_container_id, tls_container=tls_container)] def sample_tls_container_tuple(id='cont_id_1', certificate=None, private_key=None, intermediates=None, primary_cn=None): sc = collections.namedtuple( 'tls_container', 'id, certificate, private_key, intermediates, primary_cn') return sc(id=id, certificate=certificate, private_key=private_key, intermediates=intermediates or [], primary_cn=primary_cn) def sample_pool_tuple(listener_id=None, proto=None, monitor=True, persistence=True, persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None, sample_pool=1, monitor_ip_port=False, monitor_proto=None, monitor_expected_codes=None, backup_member=False, disabled_member=False, has_http_reuse=True, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, provisioning_status=constants.ACTIVE, tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, enabled=True): proto = 'HTTP' if proto is None else proto if not tls_enabled: tls_ciphers = None monitor_proto = proto if monitor_proto is None else monitor_proto in_pool = collections.namedtuple( 'pool', 'id, protocol, lb_algorithm, members, health_monitor, ' 'session_persistence, enabled, operating_status, ' 'tls_certificate_id, ca_tls_certificate_id, ' 'crl_container_id, tls_enabled, tls_ciphers, ' 'provisioning_status, ' + constants.HTTP_REUSE) if (proto == constants.PROTOCOL_UDP and persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP): kwargs = {'persistence_type': persistence_type, 'persistence_timeout': persistence_timeout, 'persistence_granularity': persistence_granularity} else: kwargs = {'persistence_type': persistence_type, 'persistence_cookie': persistence_cookie} persis = sample_session_persistence_tuple(**kwargs) mon = None if sample_pool == 0: id = 'sample_pool_id_0' members = [] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) elif sample_pool == 1: id = 'sample_pool_id_1' members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', monitor_ip_port=monitor_ip_port), sample_member_tuple('sample_member_id_2', '10.0.0.98', monitor_ip_port=monitor_ip_port, backup=backup_member, enabled=not disabled_member)] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) elif sample_pool == 2: id = 'sample_pool_id_2' members = [sample_member_tuple('sample_member_id_3', '10.0.0.97', monitor_ip_port=monitor_ip_port)] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, sample_hm=2, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) return in_pool( id=id, protocol=proto, lb_algorithm='ROUND_ROBIN', members=members, health_monitor=mon, session_persistence=persis if persistence is True else None, enabled=enabled, operating_status='ACTIVE', has_http_reuse=has_http_reuse, tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, crl_container_id='pool_crl' if pool_crl else None, tls_enabled=tls_enabled, tls_ciphers=tls_ciphers, provisioning_status=provisioning_status) def sample_member_tuple(id, ip, enabled=True, operating_status='ACTIVE', provisioning_status=constants.ACTIVE, monitor_ip_port=False, backup=False): in_member = collections.namedtuple('member', 'id, ip_address, protocol_port, ' 'weight, subnet_id, ' 'enabled, operating_status, ' 'monitor_address, monitor_port, ' 'backup, provisioning_status') monitor_address = '192.168.1.1' if monitor_ip_port else None monitor_port = 9000 if monitor_ip_port else None return in_member( id=id, ip_address=ip, protocol_port=82, weight=13, subnet_id='10.0.0.1/24', enabled=enabled, operating_status=operating_status, monitor_address=monitor_address, monitor_port=monitor_port, backup=backup, provisioning_status=provisioning_status) def sample_session_persistence_tuple(persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None): spersistence = collections.namedtuple('SessionPersistence', 'type, cookie_name, ' 'persistence_timeout, ' 'persistence_granularity') pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type return spersistence(type=pt, cookie_name=persistence_cookie, persistence_timeout=persistence_timeout, persistence_granularity=persistence_granularity) def sample_health_monitor_tuple(proto='HTTP', sample_hm=1, host_http_check=False, expected_codes=None, provisioning_status=constants.ACTIVE): proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto monitor = collections.namedtuple( 'monitor', 'id, type, delay, timeout, fall_threshold, rise_threshold,' 'http_method, url_path, expected_codes, enabled, ' 'check_script_path, http_version, domain_name, ' 'provisioning_status') if sample_hm == 1: id = 'sample_monitor_id_1' url_path = '/index.html' elif sample_hm == 2: id = 'sample_monitor_id_2' url_path = '/healthmon.html' kwargs = { 'id': id, 'type': proto, 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': url_path, 'expected_codes': '418', 'enabled': True, 'provisioning_status': provisioning_status, } if host_http_check: kwargs.update({'http_version': 1.1, 'domain_name': 'testlab.com'}) else: kwargs.update({'http_version': 1.0, 'domain_name': None}) if expected_codes: kwargs.update({'expected_codes': expected_codes}) if proto == constants.HEALTH_MONITOR_UDP_CONNECT: kwargs['check_script_path'] = (CONF.haproxy_amphora.base_path + 'lvs/check/' + 'udp_check.sh') else: kwargs['check_script_path'] = None return monitor(**kwargs) def sample_l7policy_tuple(id, action=constants.L7POLICY_ACTION_REJECT, redirect_pool=None, redirect_url=None, redirect_prefix=None, enabled=True, redirect_http_code=302, sample_policy=1, provisioning_status=constants.ACTIVE): in_l7policy = collections.namedtuple('l7policy', 'id, action, redirect_pool, ' 'redirect_url, redirect_prefix, ' 'l7rules, enabled,' 'redirect_http_code,' 'provisioning_status') l7rules = [] if sample_policy == 1: action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL redirect_pool = sample_pool_tuple(sample_pool=2) l7rules = [sample_l7rule_tuple('sample_l7rule_id_1')] elif sample_policy == 2: action = constants.L7POLICY_ACTION_REDIRECT_TO_URL redirect_url = 'http://www.example.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] elif sample_policy == 3: action = constants.L7POLICY_ACTION_REJECT l7rules = [sample_l7rule_tuple('sample_l7rule_id_4', sample_rule=4), sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] elif sample_policy == 4: action = constants.L7POLICY_ACTION_REJECT elif sample_policy == 5: action = constants.L7POLICY_ACTION_REJECT enabled = False l7rules = [sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] elif sample_policy == 6: action = constants.L7POLICY_ACTION_REJECT l7rules = [sample_l7rule_tuple('sample_l7rule_id_6', sample_rule=6)] elif sample_policy == 7: action = constants.L7POLICY_ACTION_REDIRECT_PREFIX redirect_prefix = 'https://example.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] elif sample_policy == 8: action = constants.L7POLICY_ACTION_REDIRECT_TO_URL redirect_url = 'http://www.ssl-type-l7rule-test.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_7', sample_rule=7), sample_l7rule_tuple('sample_l7rule_id_8', sample_rule=8), sample_l7rule_tuple('sample_l7rule_id_9', sample_rule=9), sample_l7rule_tuple('sample_l7rule_id_10', sample_rule=10), sample_l7rule_tuple('sample_l7rule_id_11', sample_rule=11)] return in_l7policy( id=id, action=action, redirect_pool=redirect_pool, redirect_url=redirect_url, redirect_prefix=redirect_prefix, l7rules=l7rules, enabled=enabled, redirect_http_code=redirect_http_code if (action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, constants.L7POLICY_ACTION_REDIRECT_PREFIX] and redirect_http_code) else None, provisioning_status=provisioning_status) def sample_l7rule_tuple(id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, key=None, value='/api', invert=False, enabled=True, sample_rule=1, provisioning_status=constants.ACTIVE): in_l7rule = collections.namedtuple('l7rule', 'id, type, compare_type, ' 'key, value, invert, enabled,' 'provisioning_status') if sample_rule == 2: type = constants.L7RULE_TYPE_HEADER compare_type = constants.L7RULE_COMPARE_TYPE_CONTAINS key = 'Some-header' value = 'This string\\ with stuff' invert = True enabled = True if sample_rule == 3: type = constants.L7RULE_TYPE_COOKIE compare_type = constants.L7RULE_COMPARE_TYPE_REGEX key = 'some-cookie' value = 'this.*|that' invert = False enabled = True if sample_rule == 4: type = constants.L7RULE_TYPE_FILE_TYPE compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = 'jpg' invert = False enabled = True if sample_rule == 5: type = constants.L7RULE_TYPE_HOST_NAME compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH key = None value = '.example.com' invert = False enabled = True if sample_rule == 6: type = constants.L7RULE_TYPE_HOST_NAME compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH key = None value = '.example.com' invert = False enabled = False if sample_rule == 7: type = constants.L7RULE_TYPE_SSL_CONN_HAS_CERT compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = 'tRuE' invert = False enabled = True if sample_rule == 8: type = constants.L7RULE_TYPE_SSL_VERIFY_RESULT compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = '1' invert = True enabled = True if sample_rule == 9: type = constants.L7RULE_TYPE_SSL_DN_FIELD compare_type = constants.L7RULE_COMPARE_TYPE_REGEX key = 'STREET' value = r'^STREET.*NO\.$' invert = True enabled = True if sample_rule == 10: type = constants.L7RULE_TYPE_SSL_DN_FIELD compare_type = constants.L7RULE_COMPARE_TYPE_STARTS_WITH key = 'OU-3' value = 'Orgnization Bala' invert = True enabled = True return in_l7rule( id=id, type=type, compare_type=compare_type, key=key, value=value, invert=invert, enabled=enabled, provisioning_status=provisioning_status) def sample_base_expected_config(frontend=None, logging=None, backend=None, peers=None, global_opts=None, defaults=None): if frontend is None: frontend = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1:sample_listener_id_1" "\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) if logging is None: logging = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") if backend is None: backend = ("backend sample_pool_id_1:sample_listener_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2\n" "\n").format(maxconn=constants.HAPROXY_DEFAULT_MAXCONN) if peers is None: peers = "\n\n" if global_opts is None: global_opts = " maxconn {maxconn}\n\n".format( maxconn=constants.HAPROXY_DEFAULT_MAXCONN) if defaults is None: defaults = ("defaults\n" " log global\n" " retries 3\n" " option redispatch\n" " option splice-request\n" " option splice-response\n" " option http-keep-alive\n\n") return ("# Configuration for loadbalancer sample_loadbalancer_id_1\n" "global\n" " daemon\n" " user nobody\n" " log /run/rsyslog/octavia/log local0\n" " log /run/rsyslog/octavia/log local1 notice\n" " stats socket /var/lib/octavia/sample_loadbalancer_id_1.sock" " mode 0666 level user\n" + global_opts + defaults + peers + frontend + logging + backend) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/sample_configs/sample_configs_split.py0000664000175000017500000012361100000000000030004 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections from oslo_config import cfg from octavia.common import constants from octavia.tests.common import sample_certs CONF = cfg.CONF def sample_amphora_tuple(id='sample_amphora_id_1', lb_network_ip='10.0.1.1', vrrp_ip='10.1.1.1', ha_ip='192.168.10.1', vrrp_port_id='1234', ha_port_id='1234', role=None, status='ACTIVE', vrrp_interface=None, vrrp_priority=None, api_version='0.5'): in_amphora = collections.namedtuple( 'amphora', 'id, lb_network_ip, vrrp_ip, ha_ip, vrrp_port_id, ' 'ha_port_id, role, status, vrrp_interface,' 'vrrp_priority, api_version') return in_amphora( id=id, lb_network_ip=lb_network_ip, vrrp_ip=vrrp_ip, ha_ip=ha_ip, vrrp_port_id=vrrp_port_id, ha_port_id=ha_port_id, role=role, status=status, vrrp_interface=vrrp_interface, vrrp_priority=vrrp_priority, api_version=api_version) RET_PERSISTENCE = { 'type': 'HTTP_COOKIE', 'cookie_name': None} RET_MONITOR_1 = { 'id': 'sample_monitor_id_1', 'type': 'HTTP', 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': '/index.html', 'expected_codes': '418', 'enabled': True, 'http_version': 1.0, 'domain_name': None} RET_MONITOR_2 = { 'id': 'sample_monitor_id_2', 'type': 'HTTP', 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': '/healthmon.html', 'expected_codes': '418', 'enabled': True, 'http_version': 1.0, 'domain_name': None} RET_MEMBER_1 = { 'id': 'sample_member_id_1', 'address': '10.0.0.99', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_MEMBER_2 = { 'id': 'sample_member_id_2', 'address': '10.0.0.98', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_MEMBER_3 = { 'id': 'sample_member_id_3', 'address': '10.0.0.97', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'enabled': True, 'operating_status': 'ACTIVE', 'monitor_address': None, 'monitor_port': None, 'backup': False} RET_POOL_1 = { 'id': 'sample_pool_id_1', 'protocol': 'http', 'lb_algorithm': 'roundrobin', 'members': [RET_MEMBER_1, RET_MEMBER_2], 'health_monitor': RET_MONITOR_1, 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', 'stick_size': '10k', constants.HTTP_REUSE: False, 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': False} RET_POOL_2 = { 'id': 'sample_pool_id_2', 'protocol': 'http', 'lb_algorithm': 'roundrobin', 'members': [RET_MEMBER_3], 'health_monitor': RET_MONITOR_2, 'session_persistence': RET_PERSISTENCE, 'enabled': True, 'operating_status': 'ACTIVE', 'stick_size': '10k', constants.HTTP_REUSE: False, 'ca_tls_path': '', 'crl_path': '', 'tls_enabled': False} RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem', 'primary_cn': 'FakeCn'} RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2', 'primary_cn': 'FakeCn'} RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3', 'primary_cn': 'FakeCn2'} RET_L7RULE_1 = { 'id': 'sample_l7rule_id_1', 'type': constants.L7RULE_TYPE_PATH, 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, 'key': None, 'value': '/api', 'invert': False, 'enabled': True} RET_L7RULE_2 = { 'id': 'sample_l7rule_id_2', 'type': constants.L7RULE_TYPE_HEADER, 'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS, 'key': 'Some-header', 'value': 'This\\ string\\\\\\ with\\ stuff', 'invert': True, 'enabled': True} RET_L7RULE_3 = { 'id': 'sample_l7rule_id_3', 'type': constants.L7RULE_TYPE_COOKIE, 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, 'key': 'some-cookie', 'value': 'this.*|that', 'invert': False, 'enabled': True} RET_L7RULE_4 = { 'id': 'sample_l7rule_id_4', 'type': constants.L7RULE_TYPE_FILE_TYPE, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'key': None, 'value': 'jpg', 'invert': False, 'enabled': True} RET_L7RULE_5 = { 'id': 'sample_l7rule_id_5', 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'key': None, 'value': '.example.com', 'invert': False, 'enabled': True} RET_L7RULE_6 = { 'id': 'sample_l7rule_id_6', 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, 'key': None, 'value': '.example.com', 'invert': False, 'enabled': False} RET_L7POLICY_1 = { 'id': 'sample_l7policy_id_1', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool': RET_POOL_2, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_1], 'redirect_http_code': None} RET_L7POLICY_2 = { 'id': 'sample_l7policy_id_2', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_pool': None, 'redirect_url': 'http://www.example.com', 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': 302} RET_L7POLICY_3 = { 'id': 'sample_l7policy_id_3', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_4, RET_L7RULE_5], 'redirect_http_code': None} RET_L7POLICY_4 = { 'id': 'sample_l7policy_id_4', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [], 'redirect_http_code': None} RET_L7POLICY_5 = { 'id': 'sample_l7policy_id_5', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': False, 'l7rules': [RET_L7RULE_5], 'redirect_http_code': None} RET_L7POLICY_6 = { 'id': 'sample_l7policy_id_6', 'action': constants.L7POLICY_ACTION_REJECT, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': None, 'enabled': True, 'l7rules': [], 'redirect_http_code': None} RET_L7POLICY_7 = { 'id': 'sample_l7policy_id_7', 'action': constants.L7POLICY_ACTION_REDIRECT_PREFIX, 'redirect_pool': None, 'redirect_url': None, 'redirect_prefix': 'https://example.com', 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': 302} RET_L7POLICY_8 = { 'id': 'sample_l7policy_id_8', 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_pool': None, 'redirect_url': 'http://www.example.com', 'redirect_prefix': None, 'enabled': True, 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], 'redirect_http_code': None} RET_LISTENER = { 'id': 'sample_listener_id_1', 'protocol_port': '80', 'protocol': 'HTTP', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_MAX_MAXCONN, 'amphorae': [sample_amphora_tuple()], 'peer_port': 1024, 'topology': 'SINGLE', 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}, 'timeout_client_data': 50000, 'timeout_member_connect': 5000, 'timeout_member_data': 50000, 'timeout_tcp_inspect': 0, } RET_LISTENER_L7 = { 'id': 'sample_listener_id_1', 'protocol_port': '80', 'protocol': 'HTTP', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_MAX_MAXCONN, 'amphorae': [sample_amphora_tuple()], 'peer_port': 1024, 'topology': 'SINGLE', 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', 'pools': [RET_POOL_1, RET_POOL_2], 'l7policies': [RET_L7POLICY_1, RET_L7POLICY_2, RET_L7POLICY_3, RET_L7POLICY_4, RET_L7POLICY_5, RET_L7POLICY_6, RET_L7POLICY_7], 'enabled': True, 'insert_headers': {}, 'timeout_client_data': 50000, 'timeout_member_connect': 5000, 'timeout_member_data': 50000, 'timeout_tcp_inspect': 0, } RET_LISTENER_TLS = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol': 'TERMINATED_HTTPS', 'protocol_mode': 'http', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_MAX_MAXCONN, 'tls_certificate_id': 'cont_id_1', 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', 'default_tls_container': RET_DEF_TLS_CONT, 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}} RET_LISTENER_TLS_SNI = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol': 'TERMINATED_HTTPS', 'default_pool': RET_POOL_1, 'connection_limit': constants.HAPROXY_MAX_MAXCONN, 'tls_certificate_id': 'cont_id_1', 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', 'default_tls_container': RET_DEF_TLS_CONT, 'crt_dir': '/v2/sample_loadbalancer_id_1', 'sni_container_ids': ['cont_id_2', 'cont_id_3'], 'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2], 'pools': [RET_POOL_1], 'l7policies': [], 'enabled': True, 'insert_headers': {}} RET_AMPHORA = { 'id': 'sample_amphora_id_1', 'lb_network_ip': '10.0.1.1', 'vrrp_ip': '10.1.1.1', 'ha_ip': '192.168.10.1', 'vrrp_port_id': '1234', 'ha_port_id': '1234', 'role': None, 'status': 'ACTIVE', 'vrrp_interface': None, 'vrrp_priority': None} RET_LB = { 'host_amphora': RET_AMPHORA, 'id': 'sample_loadbalancer_id_1', 'vip_address': '10.0.0.2', 'listener': RET_LISTENER, 'topology': 'SINGLE', 'enabled': True, 'global_connection_limit': constants.HAPROXY_MAX_MAXCONN} RET_LB_L7 = { 'host_amphora': RET_AMPHORA, 'id': 'sample_loadbalancer_id_1', 'vip_address': '10.0.0.2', 'listener': RET_LISTENER_L7, 'topology': 'SINGLE', 'enabled': True, 'global_connection_limit': constants.HAPROXY_MAX_MAXCONN} UDP_SOURCE_IP_BODY = { 'type': constants.SESSION_PERSISTENCE_SOURCE_IP, 'persistence_timeout': 33, 'persistence_granularity': '255.0.0.0' } RET_UDP_HEALTH_MONITOR = { 'id': 'sample_monitor_id_1', 'type': constants.HEALTH_MONITOR_UDP_CONNECT, 'delay': 30, 'timeout': 31, 'enabled': True, 'fall_threshold': 3, 'check_script_path': (CONF.haproxy_amphora.base_path + '/lvs/check/udp_check.sh') } UDP_HEALTH_MONITOR_NO_SCRIPT = { 'id': 'sample_monitor_id_1', 'check_script_path': None, 'delay': 30, 'enabled': True, 'fall_threshold': 3, 'timeout': 31, 'type': 'UDP' } RET_UDP_MEMBER = { 'id': 'member_id_1', 'address': '192.0.2.10', 'protocol_port': 82, 'weight': 13, 'enabled': True, 'monitor_address': None, 'monitor_port': None } RET_UDP_MEMBER_MONITOR_IP_PORT = { 'id': 'member_id_1', 'address': '192.0.2.10', 'protocol_port': 82, 'weight': 13, 'enabled': True, 'monitor_address': '192.168.1.1', 'monitor_port': 9000 } UDP_MEMBER_1 = { 'id': 'sample_member_id_1', 'address': '10.0.0.99', 'enabled': True, 'protocol_port': 82, 'weight': 13, 'monitor_address': None, 'monitor_port': None } UDP_MEMBER_2 = { 'id': 'sample_member_id_2', 'address': '10.0.0.98', 'enabled': True, 'protocol_port': 82, 'weight': 13, 'monitor_address': None, 'monitor_port': None } RET_UDP_POOL = { 'id': 'sample_pool_id_1', 'enabled': True, 'health_monitor': UDP_HEALTH_MONITOR_NO_SCRIPT, 'lb_algorithm': 'wrr', 'members': [UDP_MEMBER_1, UDP_MEMBER_2], 'protocol': 'udp', 'session_persistence': UDP_SOURCE_IP_BODY } RET_UDP_LISTENER = { 'connection_limit': 98, 'default_pool': { 'id': 'sample_pool_id_1', 'enabled': True, 'health_monitor': RET_UDP_HEALTH_MONITOR, 'lb_algorithm': 'rr', 'members': [UDP_MEMBER_1, UDP_MEMBER_2], 'protocol': 'udp', 'session_persistence': UDP_SOURCE_IP_BODY }, 'enabled': True, 'id': 'sample_listener_id_1', 'protocol_mode': 'udp', 'protocol_port': '80' } def sample_loadbalancer_tuple(proto=None, monitor=True, persistence=True, persistence_type=None, tls=False, sni=False, topology=None, l7=False, enabled=True): proto = 'HTTP' if proto is None else proto topology = 'SINGLE' if topology is None else topology in_lb = collections.namedtuple( 'load_balancer', 'id, name, protocol, vip, listeners, amphorae,' ' enabled') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', protocol=proto, vip=sample_vip_tuple(), topology=topology, listeners=[sample_listener_tuple(proto=proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, tls=tls, sni=sni, l7=l7, enabled=enabled)], enabled=enabled ) def sample_listener_loadbalancer_tuple(proto=None, topology=None, enabled=True): proto = 'HTTP' if proto is None else proto if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE in_lb = collections.namedtuple( 'load_balancer', 'id, name, protocol, vip, amphorae, topology, ' 'listeners, enabled, project_id') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', protocol=proto, vip=sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', lb_network_ip='10.0.1.2', vrrp_ip='10.1.1.2', role=constants.ROLE_BACKUP)] if more_amp else [sample_amphora_tuple()], topology=topology, listeners=[], enabled=enabled, project_id='12345' ) def sample_lb_with_udp_listener_tuple( proto=None, topology=None, enabled=True, pools=None): proto = 'HTTP' if proto is None else proto if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: more_amp = True else: more_amp = False topology = constants.TOPOLOGY_SINGLE listeners = [sample_listener_tuple( proto=constants.PROTOCOL_UDP, persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, persistence_timeout=33, persistence_granularity='255.255.0.0', monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)] in_lb = collections.namedtuple( 'load_balancer', 'id, name, protocol, vip, amphorae, topology, ' 'pools, enabled, project_id, listeners') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', protocol=proto, vip=sample_vip_tuple(), amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), sample_amphora_tuple( id='sample_amphora_id_2', lb_network_ip='10.0.1.2', vrrp_ip='10.1.1.2', role=constants.ROLE_BACKUP)] if more_amp else [sample_amphora_tuple()], topology=topology, listeners=listeners, pools=pools or [], enabled=enabled, project_id='12345' ) def sample_vrrp_group_tuple(): in_vrrp_group = collections.namedtuple( 'vrrp_group', 'load_balancer_id, vrrp_auth_type, vrrp_auth_pass, ' 'advert_int, smtp_server, smtp_connect_timeout, ' 'vrrp_group_name') return in_vrrp_group( vrrp_group_name='sample_loadbalancer_id_1', load_balancer_id='sample_loadbalancer_id_1', vrrp_auth_type='PASS', vrrp_auth_pass='123', advert_int='1', smtp_server='', smtp_connect_timeout='') def sample_vip_tuple(): vip = collections.namedtuple('vip', 'ip_address') return vip(ip_address='10.0.0.2') def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, persistence=True, persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None, tls=False, sni=False, peer_port=None, topology=None, l7=False, enabled=True, insert_headers=None, be_proto=None, monitor_ip_port=False, monitor_proto=None, monitor_expected_codes=None, backup_member=False, disabled_member=False, connection_limit=-1, timeout_client_data=50000, timeout_member_connect=5000, timeout_member_data=50000, timeout_tcp_inspect=0, client_ca_cert=False, client_crl_cert=False, ssl_type_l7=False, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, id='sample_listener_id_1', recursive_nest=False, provisioning_status=constants.ACTIVE): proto = 'HTTP' if proto is None else proto if be_proto is None: be_proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto topology = 'SINGLE' if topology is None else topology port = '443' if proto in ['HTTPS', 'TERMINATED_HTTPS'] else '80' peer_port = 1024 if peer_port is None else peer_port insert_headers = insert_headers or {} in_listener = collections.namedtuple( 'listener', 'id, project_id, protocol_port, protocol, default_pool, ' 'connection_limit, tls_certificate_id, ' 'sni_container_ids, default_tls_container, ' 'sni_containers, load_balancer, peer_port, pools, ' 'l7policies, enabled, insert_headers, timeout_client_data,' 'timeout_member_connect, timeout_member_data, ' 'timeout_tcp_inspect, client_ca_tls_certificate_id, ' 'client_ca_tls_certificate, client_authentication, ' 'client_crl_container_id, provisioning_status') if l7: pools = [ sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check), sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, sample_pool=2, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check)] l7policies = [ sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), sample_l7policy_tuple('sample_l7policy_id_3', sample_policy=3), sample_l7policy_tuple('sample_l7policy_id_4', sample_policy=4), sample_l7policy_tuple('sample_l7policy_id_5', sample_policy=5), sample_l7policy_tuple('sample_l7policy_id_6', sample_policy=6), sample_l7policy_tuple('sample_l7policy_id_7', sample_policy=7)] if ssl_type_l7: l7policies.append(sample_l7policy_tuple( 'sample_l7policy_id_8', sample_policy=8)) else: pools = [ sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, backup_member=backup_member, disabled_member=disabled_member, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check)] l7policies = [] listener = in_listener( id=id, project_id='12345', protocol_port=port, protocol=proto, load_balancer=sample_listener_loadbalancer_tuple(proto=proto, topology=topology), peer_port=peer_port, default_pool=sample_pool_tuple( proto=be_proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, persistence_cookie=persistence_cookie, persistence_timeout=persistence_timeout, persistence_granularity=persistence_granularity, monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, monitor_expected_codes=monitor_expected_codes, pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, pool_crl=pool_crl, tls_enabled=tls_enabled, hm_host_http_check=hm_host_http_check ) if alloc_default_pool else '', connection_limit=connection_limit, tls_certificate_id='cont_id_1' if tls else '', sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [], default_tls_container=sample_tls_container_tuple( id='cont_id_1', certificate=sample_certs.X509_CERT, private_key=sample_certs.X509_CERT_KEY, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN ) if tls else '', sni_containers=[ sample_tls_sni_container_tuple( tls_container_id='cont_id_2', tls_container=sample_tls_container_tuple( id='cont_id_2', certificate=sample_certs.X509_CERT_2, private_key=sample_certs.X509_CERT_KEY_2, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN_2)), sample_tls_sni_container_tuple( tls_container_id='cont_id_3', tls_container=sample_tls_container_tuple( id='cont_id_3', certificate=sample_certs.X509_CERT_3, private_key=sample_certs.X509_CERT_KEY_3, intermediates=sample_certs.X509_IMDS_LIST, primary_cn=sample_certs.X509_CERT_CN_3))] if sni else [], pools=pools, l7policies=l7policies, enabled=enabled, insert_headers=insert_headers, timeout_client_data=timeout_client_data, timeout_member_connect=timeout_member_connect, timeout_member_data=timeout_member_data, timeout_tcp_inspect=timeout_tcp_inspect, client_ca_tls_certificate_id='cont_id_ca' if client_ca_cert else '', client_ca_tls_certificate=sample_tls_container_tuple( id='cont_id_ca', certificate=sample_certs.X509_CA_CERT, primary_cn=sample_certs.X509_CA_CERT_CN ) if client_ca_cert else '', client_authentication=( constants.CLIENT_AUTH_MANDATORY if client_ca_cert else constants.CLIENT_AUTH_NONE), client_crl_container_id='cont_id_crl' if client_crl_cert else '', provisioning_status=provisioning_status, ) if recursive_nest: listener.load_balancer.listeners.append(listener) return listener def sample_tls_sni_container_tuple(tls_container_id=None, tls_container=None): sc = collections.namedtuple('sni_container', 'tls_container_id, ' 'tls_container') return sc(tls_container_id=tls_container_id, tls_container=tls_container) def sample_tls_sni_containers_tuple(tls_container_id=None, tls_container=None): sc = collections.namedtuple('sni_containers', 'tls_container_id, ' 'tls_container') return [sc(tls_container_id=tls_container_id, tls_container=tls_container)] def sample_tls_container_tuple(id='cont_id_1', certificate=None, private_key=None, intermediates=None, primary_cn=None): sc = collections.namedtuple( 'tls_container', 'id, certificate, private_key, intermediates, primary_cn') return sc(id=id, certificate=certificate, private_key=private_key, intermediates=intermediates or [], primary_cn=primary_cn) def sample_pool_tuple(proto=None, monitor=True, persistence=True, persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None, sample_pool=1, monitor_ip_port=False, monitor_proto=None, monitor_expected_codes=None, backup_member=False, disabled_member=False, has_http_reuse=True, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, hm_host_http_check=False, provisioning_status=constants.ACTIVE): proto = 'HTTP' if proto is None else proto monitor_proto = proto if monitor_proto is None else monitor_proto in_pool = collections.namedtuple( 'pool', 'id, protocol, lb_algorithm, members, health_monitor, ' 'session_persistence, enabled, operating_status, ' 'tls_certificate_id, ca_tls_certificate_id, ' 'crl_container_id, tls_enabled, provisioning_status, ' + constants.HTTP_REUSE) if (proto == constants.PROTOCOL_UDP and persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP): kwargs = {'persistence_type': persistence_type, 'persistence_timeout': persistence_timeout, 'persistence_granularity': persistence_granularity} else: kwargs = {'persistence_type': persistence_type, 'persistence_cookie': persistence_cookie} persis = sample_session_persistence_tuple(**kwargs) mon = None if sample_pool == 1: id = 'sample_pool_id_1' members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', monitor_ip_port=monitor_ip_port), sample_member_tuple('sample_member_id_2', '10.0.0.98', monitor_ip_port=monitor_ip_port, backup=backup_member, enabled=not disabled_member)] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) elif sample_pool == 2: id = 'sample_pool_id_2' members = [sample_member_tuple('sample_member_id_3', '10.0.0.97', monitor_ip_port=monitor_ip_port)] if monitor is True: mon = sample_health_monitor_tuple( proto=monitor_proto, sample_hm=2, host_http_check=hm_host_http_check, expected_codes=monitor_expected_codes) return in_pool( id=id, protocol=proto, lb_algorithm='ROUND_ROBIN', members=members, health_monitor=mon, session_persistence=persis if persistence is True else None, enabled=True, operating_status='ACTIVE', has_http_reuse=has_http_reuse, tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, crl_container_id='pool_crl' if pool_crl else None, tls_enabled=tls_enabled, provisioning_status=provisioning_status) def sample_member_tuple(id, ip, enabled=True, operating_status=constants.ACTIVE, provisioning_status=constants.ACTIVE, monitor_ip_port=False, backup=False): in_member = collections.namedtuple('member', 'id, ip_address, protocol_port, ' 'weight, subnet_id, ' 'enabled, operating_status, ' 'monitor_address, monitor_port, ' 'backup, provisioning_status') monitor_address = '192.168.1.1' if monitor_ip_port else None monitor_port = 9000 if monitor_ip_port else None return in_member( id=id, ip_address=ip, protocol_port=82, weight=13, subnet_id='10.0.0.1/24', enabled=enabled, operating_status=operating_status, monitor_address=monitor_address, monitor_port=monitor_port, backup=backup, provisioning_status=provisioning_status) def sample_session_persistence_tuple(persistence_type=None, persistence_cookie=None, persistence_timeout=None, persistence_granularity=None): spersistence = collections.namedtuple('SessionPersistence', 'type, cookie_name, ' 'persistence_timeout, ' 'persistence_granularity') pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type return spersistence(type=pt, cookie_name=persistence_cookie, persistence_timeout=persistence_timeout, persistence_granularity=persistence_granularity) def sample_health_monitor_tuple(proto='HTTP', sample_hm=1, host_http_check=False, expected_codes=None, provisioning_status=constants.ACTIVE): proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto monitor = collections.namedtuple( 'monitor', 'id, type, delay, timeout, fall_threshold, rise_threshold,' 'http_method, url_path, expected_codes, enabled, ' 'check_script_path, http_version, domain_name, ' 'provisioning_status') if sample_hm == 1: id = 'sample_monitor_id_1' url_path = '/index.html' elif sample_hm == 2: id = 'sample_monitor_id_2' url_path = '/healthmon.html' kwargs = { 'id': id, 'type': proto, 'delay': 30, 'timeout': 31, 'fall_threshold': 3, 'rise_threshold': 2, 'http_method': 'GET', 'url_path': url_path, 'expected_codes': '418', 'enabled': True, 'provisioning_status': provisioning_status, } if host_http_check: kwargs.update({'http_version': 1.1, 'domain_name': 'testlab.com'}) else: kwargs.update({'http_version': 1.0, 'domain_name': None}) if expected_codes: kwargs.update({'expected_codes': expected_codes}) if proto == constants.HEALTH_MONITOR_UDP_CONNECT: kwargs['check_script_path'] = (CONF.haproxy_amphora.base_path + 'lvs/check/' + 'udp_check.sh') else: kwargs['check_script_path'] = None return monitor(**kwargs) def sample_l7policy_tuple(id, action=constants.L7POLICY_ACTION_REJECT, redirect_pool=None, redirect_url=None, redirect_prefix=None, enabled=True, redirect_http_code=302, sample_policy=1, provisioning_status=constants.ACTIVE): in_l7policy = collections.namedtuple('l7policy', 'id, action, redirect_pool, ' 'redirect_url, redirect_prefix, ' 'l7rules, enabled, ' 'redirect_http_code, ' 'provisioning_status') l7rules = [] if sample_policy == 1: action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL redirect_pool = sample_pool_tuple(sample_pool=2) l7rules = [sample_l7rule_tuple('sample_l7rule_id_1')] elif sample_policy == 2: action = constants.L7POLICY_ACTION_REDIRECT_TO_URL redirect_url = 'http://www.example.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] elif sample_policy == 3: action = constants.L7POLICY_ACTION_REJECT l7rules = [sample_l7rule_tuple('sample_l7rule_id_4', sample_rule=4), sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] elif sample_policy == 4: action = constants.L7POLICY_ACTION_REJECT elif sample_policy == 5: action = constants.L7POLICY_ACTION_REJECT enabled = False l7rules = [sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] elif sample_policy == 6: action = constants.L7POLICY_ACTION_REJECT l7rules = [sample_l7rule_tuple('sample_l7rule_id_6', sample_rule=6)] elif sample_policy == 7: action = constants.L7POLICY_ACTION_REDIRECT_PREFIX redirect_prefix = 'https://example.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] elif sample_policy == 8: action = constants.L7POLICY_ACTION_REDIRECT_TO_URL redirect_url = 'http://www.ssl-type-l7rule-test.com' l7rules = [sample_l7rule_tuple('sample_l7rule_id_7', sample_rule=7), sample_l7rule_tuple('sample_l7rule_id_8', sample_rule=8), sample_l7rule_tuple('sample_l7rule_id_9', sample_rule=9), sample_l7rule_tuple('sample_l7rule_id_10', sample_rule=10), sample_l7rule_tuple('sample_l7rule_id_11', sample_rule=11)] return in_l7policy( id=id, action=action, redirect_pool=redirect_pool, redirect_url=redirect_url, redirect_prefix=redirect_prefix, l7rules=l7rules, enabled=enabled, redirect_http_code=redirect_http_code if (action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, constants.L7POLICY_ACTION_REDIRECT_PREFIX] and redirect_http_code) else None, provisioning_status=provisioning_status) def sample_l7rule_tuple(id, type=constants.L7RULE_TYPE_PATH, compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, key=None, value='/api', invert=False, enabled=True, sample_rule=1, provisioning_status=constants.ACTIVE): in_l7rule = collections.namedtuple('l7rule', 'id, type, compare_type, ' 'key, value, invert, enabled, ' 'provisioning_status') if sample_rule == 2: type = constants.L7RULE_TYPE_HEADER compare_type = constants.L7RULE_COMPARE_TYPE_CONTAINS key = 'Some-header' value = 'This string\\ with stuff' invert = True enabled = True if sample_rule == 3: type = constants.L7RULE_TYPE_COOKIE compare_type = constants.L7RULE_COMPARE_TYPE_REGEX key = 'some-cookie' value = 'this.*|that' invert = False enabled = True if sample_rule == 4: type = constants.L7RULE_TYPE_FILE_TYPE compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = 'jpg' invert = False enabled = True if sample_rule == 5: type = constants.L7RULE_TYPE_HOST_NAME compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH key = None value = '.example.com' invert = False enabled = True if sample_rule == 6: type = constants.L7RULE_TYPE_HOST_NAME compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH key = None value = '.example.com' invert = False enabled = False if sample_rule == 7: type = constants.L7RULE_TYPE_SSL_CONN_HAS_CERT compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = 'tRuE' invert = False enabled = True if sample_rule == 8: type = constants.L7RULE_TYPE_SSL_VERIFY_RESULT compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO key = None value = '1' invert = True enabled = True if sample_rule == 9: type = constants.L7RULE_TYPE_SSL_DN_FIELD compare_type = constants.L7RULE_COMPARE_TYPE_REGEX key = 'STREET' value = r'^STREET.*NO\.$' invert = True enabled = True if sample_rule == 10: type = constants.L7RULE_TYPE_SSL_DN_FIELD compare_type = constants.L7RULE_COMPARE_TYPE_STARTS_WITH key = 'OU-3' value = 'Orgnization Bala' invert = True enabled = True return in_l7rule( id=id, type=type, compare_type=compare_type, key=key, value=value, invert=invert, enabled=enabled, provisioning_status=provisioning_status) def sample_base_expected_config(frontend=None, logging=None, backend=None, peers=None, global_opts=None, defaults=None): if frontend is None: frontend = ("frontend sample_listener_id_1\n" " maxconn {maxconn}\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1\n" " timeout client 50000\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) if logging is None: logging = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " "%ci\\ %cp\\ %t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ " "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " "%tsc\n\n") if backend is None: backend = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31s\n" " option httpchk GET /index.html HTTP/1.0\\r\\n\n" " http-check expect rstatus 418\n" " fullconn {maxconn}\n" " option allbackups\n" " timeout connect 5000\n" " timeout server 50000\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "check inter 30s fall 3 rise 2 cookie sample_member_id_2\n" "\n").format(maxconn=constants.HAPROXY_MAX_MAXCONN) if peers is None: peers = "\n\n" if global_opts is None: global_opts = " maxconn {maxconn}\n\n".format( maxconn=constants.HAPROXY_MAX_MAXCONN) if defaults is None: defaults = ("defaults\n" " log global\n" " retries 3\n" " option redispatch\n" " option splice-request\n" " option splice-response\n" " option http-keep-alive\n\n") return ("# Configuration for loadbalancer sample_loadbalancer_id_1\n" "global\n" " daemon\n" " user nobody\n" " log /run/rsyslog/octavia/log local0\n" " log /run/rsyslog/octavia/log local1 notice\n" " stats socket /var/lib/octavia/sample_listener_id_1.sock" " mode 0666 level user\n" + global_opts + defaults + peers + frontend + logging + backend) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/tests/unit/common/sample_configs/sample_pkcs12.p120000664000175000017500000001331100000000000026211 0ustar00zuulzuul0000000000000000 *H |0x0/ *H  00 *H 0 *H  0=WVC^0QG@X8q}:_!zﳗ Ne{: D(3 dB;_r–~$Br`Jꋆƹõ_Ed0f(D>$qn3̑ V$kG)_^/J87.洨Ff&_8TUsL6 Rvj-2ER>UZR嘕J+ڄo!K8/&#LޒGغj<=מ ] MɰQ.VVt6Un } n=ir*eB))c+EJgQ! Oc@pP#fҞ>/ы{K2 #R'1)уi~cuWy͆rh&ⱬ CZƝB./+٘菅iP7:)[V/ nj[6-wSe,҈dPx%s\>z.koyB[<&ǫjbmO&d_H\RQ"\;j.pT^$lq v4m·B-s$1xy }z̿_< u4Sy8mrOr ^j'0U?^WT0K&m|* W;~1&ҘATq/W} EP,6&]M?=C8)p*0co z U19LzP [k߁QS\Pi&KJC`DH%n/U% 9+M)By~K}zuE4"{D`o#>-N'ƌ ?^Hb QxLʥ76Lv`cvK:\ :G e]+Cw^i8!Ӗ։p n12!y%ψ>ׄ 7e8aJY ;CJlM6]~@h ;r9ON7ΥπT2 -~}+el'نW,p {||qfu,8vMP5Οǔ /}8HX@MvV33 `B u?_piHSqo9H]sN)T@O~@^ZKdx{6g,yj`EIA*`]OL֔3lYϼ1obY>e5ʄH$ǜ4bziV!7"Zp>8G)ƧZA00&$[3Cvl+(YWwq%k 0sItfg'Ħxdt9}M!} oQV_Q [ L%ew2asTV#jLM)Jc+MoW$ Hhd>C(_zr1 3/ID ècU5&x>Oҹs/v. zm7= X+Ec_` aj޷LWeŚuo!~q/Gݻ@ՒΆu;L#kAV![iҮxB:ΖM& a.u]&n:^'-9ad3pz&sKeRW#fc{B{6fU<J܊yKPtvJ|5MOiv99\{lPH2ʩ )l:|oM&)l`](h+3Akx(v?@ஒr8AyBWIWnŌQ4ٺ' MDBiII$:.gRðhj٘K/aVwr#5o 뾨\@ODVw$ոhd. m9:'yX׌#]X~Ÿ؅ )a [$NA?ohEnRr= t&kS=t8"L Ή!}yR{q Q4WǤ{ ^nTME:UD`xN{qY8#&VP2]z)Nf? e,SFƃq3\nAmv7 o<^fR?؂cɥt&/Ӧ Џi!+ [Z3T/J{oT-V5/\F4XX$ +js-'[~Ac(\{%L֠r$&ȝv[Cv_A[(#`_݄~Y$̹r?.fЮ#mLT"N';8&ZW˼ Ԍ3`,y&d(`em\Pe"9:m dN=( z"J`(¼SjwOK2 `D< \M+ =O^gJTWG7z|_Om e>h Xs-Z=IDBNbHZT)pԸcz\jMn &&[YVbсy)z6TjڹCT/8ٽhZ[b&5M |b:%Uo˂9,A<Q-"~^TAȚyn*%9b*BAvm sS}O:QkIn>c# m' )n=ΊyBNێՖ?Tm:ʶ\ *,:G  >d+̱ZXI m[BM@o3a'B:!SV:SDӸٌNʲyHԋ&Q3eS{8_FFŵv2t(t, oyDcfNcG0[AIwȵX_]!Џy80dG(b;A/'Y CxM+:󻴚2KP-u"tw).mǒ"j Ic?pj!尢4p'4t]k$EOge¦Tp+7B0F ;wvuQMO3 XTjBt |5K;?L0l5C٪L#v8%$FP[h+&g ;<; IPPQ2؉'C0"k&c{G'P[Mpz$ ʃ.= CONF.cinder.volume_create_timeout: LOG.error('Timed out waiting to create cinder volume %s', instance_volume.id) instance_volume.delete() raise cinder_exceptions.TimeoutException( obj=volume, action=constants.CINDER_ACTION_CREATE_VOLUME) return volume.id def delete_volume(self, volume_id): """Get glance image from volume :param volume_id: ID of amphora boot volume :return image id """ LOG.debug('Deleting cinder volume %s', volume_id) try: instance_volume = self.manager.get(volume_id) try: instance_volume.delete() LOG.debug("Deleted volume %s", volume_id) except Exception: LOG.exception("Error deleting cinder volume %s", volume_id) raise exceptions.VolumeDeleteException() except cinder_exceptions.NotFound: LOG.warning("Volume %s not found: assuming already deleted", volume_id) def get_image_from_volume(self, volume_id): """Get glance image from volume :param volume_id: ID of amphora boot volume :return image id """ image_id = None LOG.debug('Get glance image for volume %s', volume_id) try: instance_volume = self.manager.get(volume_id) except cinder_exceptions.NotFound: LOG.exception("Volume %s not found", volume_id) raise exceptions.VolumeGetException() if hasattr(instance_volume, 'volume_image_metadata'): image_id = instance_volume.volume_image_metadata.get("image_id") else: LOG.error("Volume %s has no image metadata", volume_id) image_id = None return image_id ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4342167 octavia-6.2.2/octavia/volume/drivers/noop_driver/0000775000175000017500000000000000000000000022133 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/volume/drivers/noop_driver/__init__.py0000664000175000017500000000000000000000000024232 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/volume/drivers/noop_driver/driver.py0000664000175000017500000000416100000000000024002 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils from octavia.volume import volume_base as driver_base LOG = logging.getLogger(__name__) class NoopManager(object): def __init__(self): super(NoopManager, self).__init__() self.volumeconfig = {} def create_volume_from_image(self, image_id): LOG.debug("Volume %s no-op, image id %s", self.__class__.__name__, image_id) self.volumeconfig[image_id] = (image_id, 'create_volume_from_image') volume_id = uuidutils.generate_uuid() return volume_id def delete_volume(self, volume_id): LOG.debug("Volume %s no-op, volume id %s", self.__class__.__name__, volume_id) self.volumeconfig[volume_id] = (volume_id, 'delete') def get_image_from_volume(self, volume_id): LOG.debug("Volume %s no-op, volume id %s", self.__class__.__name__, volume_id) self.volumeconfig[volume_id] = (volume_id, 'get_image_from_volume') image_id = uuidutils.generate_uuid() return image_id class NoopVolumeDriver(driver_base.VolumeBase): def __init__(self): super(NoopVolumeDriver, self).__init__() self.driver = NoopManager() def create_volume_from_image(self, image_id): volume_id = self.driver.create_volume_from_image(image_id) return volume_id def delete_volume(self, volume_id): self.driver.delete_volume(volume_id) def get_image_from_volume(self, volume_id): image_id = self.driver.get_image_from_volume(volume_id) return image_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/octavia/volume/volume_base.py0000664000175000017500000000230300000000000021000 0ustar00zuulzuul00000000000000# Copyright 2011-2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class VolumeBase(object, metaclass=abc.ABCMeta): @abc.abstractmethod def create_volume_from_image(self, image_id): """Create volume for instance :param image_id: ID of amphora image :return volume id """ @abc.abstractmethod def delete_volume(self, volume_id): """Delete volume :param volume_id: ID of amphora volume """ @abc.abstractmethod def get_image_from_volume(self, volume_id): """Get cinder volume :param volume_id: ID of amphora volume :return image id """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3702166 octavia-6.2.2/octavia.egg-info/0000775000175000017500000000000000000000000016312 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/octavia.egg-info/PKG-INFO0000664000175000017500000000640300000000000017412 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: octavia Version: 6.2.2 Summary: OpenStack Octavia Scalable Load Balancer as a Service Home-page: https://docs.openstack.org/octavia/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/octavia.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ======= Octavia ======= .. image:: https://img.shields.io/pypi/v/octavia.svg :target: https://pypi.org/project/octavia/ :alt: Latest Version Octavia is an operator-grade open source scalable load balancer for use in large OpenStack deployments. It delivers load balancing services on `amphorae `_ and provides centralized command and control. Octavia is currently the reference backend for Neutron LBaaS. In the near future, Octavia is likely to become the standard OpenStack LBaaS API endpoint. Octavia is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Project resources ~~~~~~~~~~~~~~~~~ Developer documentation for the Octavia project is available at https://docs.openstack.org/octavia/latest/ Release notes for the Octavia project are available at https://docs.openstack.org/releasenotes/octavia/ The project source code repository is located at https://opendev.org/openstack/octavia Project status, bugs, and requests for feature enhancements are tracked on https://storyboard.openstack.org/#!/project/openstack/octavia For more information on project direction and guiding principles for contributors, please see the CONSTITUTION.rst file in this directory, or specifications in the specs/ sub-directory. The project roadmap is available at https://wiki.openstack.org/wiki/Octavia/Roadmap External Resources ~~~~~~~~~~~~~~~~~~ * Octavia Wiki: https://wiki.openstack.org/wiki/Octavia * For help on usage and hacking of Octavia, please send an email to OpenStack-dev Mailing List with **[Octavia]** tag. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052692.0 octavia-6.2.2/octavia.egg-info/SOURCES.txt0000664000175000017500000021023200000000000020176 0ustar00zuulzuul00000000000000.coveragerc .pylintrc .stestr.conf AUTHORS CONSTITUTION.rst CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst TESTING.rst babel.cfg bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/http-status.yaml api-ref/source/index.rst api-ref/source/parameters.yaml api-ref/source/examples/versions-get-resp.json api-ref/source/v2/amphora.inc api-ref/source/v2/availabilityzone.inc api-ref/source/v2/availabilityzoneprofile.inc api-ref/source/v2/flavor.inc api-ref/source/v2/flavorprofile.inc api-ref/source/v2/general.inc api-ref/source/v2/healthmonitor.inc api-ref/source/v2/index.rst api-ref/source/v2/l7policy.inc api-ref/source/v2/l7rule.inc api-ref/source/v2/listener.inc api-ref/source/v2/loadbalancer.inc api-ref/source/v2/member.inc api-ref/source/v2/pool.inc api-ref/source/v2/provider.inc api-ref/source/v2/quota.inc api-ref/source/v2/examples/amphora-config-curl api-ref/source/v2/examples/amphora-failover-curl api-ref/source/v2/examples/amphora-list-curl api-ref/source/v2/examples/amphora-list-response.json api-ref/source/v2/examples/amphora-show-curl api-ref/source/v2/examples/amphora-show-response.json api-ref/source/v2/examples/amphora-show-stats-curl api-ref/source/v2/examples/amphora-show-stats-response.json api-ref/source/v2/examples/availabilityzone-create-curl api-ref/source/v2/examples/availabilityzone-create-request.json api-ref/source/v2/examples/availabilityzone-create-response.json api-ref/source/v2/examples/availabilityzone-delete-curl api-ref/source/v2/examples/availabilityzone-list-curl api-ref/source/v2/examples/availabilityzone-list-response.json api-ref/source/v2/examples/availabilityzone-show-curl api-ref/source/v2/examples/availabilityzone-show-response.json api-ref/source/v2/examples/availabilityzone-update-curl api-ref/source/v2/examples/availabilityzone-update-request.json api-ref/source/v2/examples/availabilityzone-update-response.json api-ref/source/v2/examples/availabilityzoneprofile-create-curl api-ref/source/v2/examples/availabilityzoneprofile-create-request.json api-ref/source/v2/examples/availabilityzoneprofile-create-response.json api-ref/source/v2/examples/availabilityzoneprofile-delete-curl api-ref/source/v2/examples/availabilityzoneprofile-list-curl api-ref/source/v2/examples/availabilityzoneprofile-list-response.json api-ref/source/v2/examples/availabilityzoneprofile-show-curl api-ref/source/v2/examples/availabilityzoneprofile-show-response.json api-ref/source/v2/examples/availabilityzoneprofile-update-curl api-ref/source/v2/examples/availabilityzoneprofile-update-request.json api-ref/source/v2/examples/availabilityzoneprofile-update-response.json api-ref/source/v2/examples/flavor-create-curl api-ref/source/v2/examples/flavor-create-request.json api-ref/source/v2/examples/flavor-create-response.json api-ref/source/v2/examples/flavor-delete-curl api-ref/source/v2/examples/flavor-list-curl api-ref/source/v2/examples/flavor-profile-list-curl api-ref/source/v2/examples/flavor-show-curl api-ref/source/v2/examples/flavor-show-response.json api-ref/source/v2/examples/flavor-update-curl api-ref/source/v2/examples/flavor-update-request.json api-ref/source/v2/examples/flavor-update-response.json api-ref/source/v2/examples/flavorprofile-create-curl api-ref/source/v2/examples/flavorprofile-create-request.json api-ref/source/v2/examples/flavorprofile-create-response.json api-ref/source/v2/examples/flavorprofile-delete-curl api-ref/source/v2/examples/flavorprofile-show-curl api-ref/source/v2/examples/flavorprofile-show-response.json api-ref/source/v2/examples/flavorprofile-update-curl api-ref/source/v2/examples/flavorprofile-update-request.json api-ref/source/v2/examples/flavorprofile-update-response.json api-ref/source/v2/examples/flavorprofiles-list-response.json api-ref/source/v2/examples/flavors-list-response.json api-ref/source/v2/examples/healthmonitor-create-curl api-ref/source/v2/examples/healthmonitor-create-request.json api-ref/source/v2/examples/healthmonitor-create-response.json api-ref/source/v2/examples/healthmonitor-delete-curl api-ref/source/v2/examples/healthmonitor-list-curl api-ref/source/v2/examples/healthmonitor-show-curl api-ref/source/v2/examples/healthmonitor-show-response.json api-ref/source/v2/examples/healthmonitor-update-curl api-ref/source/v2/examples/healthmonitor-update-request.json api-ref/source/v2/examples/healthmonitor-update-response.json api-ref/source/v2/examples/healthmonitors-list-response.json api-ref/source/v2/examples/http-header-insertion-obj.json api-ref/source/v2/examples/l7policies-list-curl api-ref/source/v2/examples/l7policies-list-response.json api-ref/source/v2/examples/l7policy-create-curl api-ref/source/v2/examples/l7policy-create-request.json api-ref/source/v2/examples/l7policy-create-response.json api-ref/source/v2/examples/l7policy-delete-curl api-ref/source/v2/examples/l7policy-show-curl api-ref/source/v2/examples/l7policy-show-response.json api-ref/source/v2/examples/l7policy-update-curl api-ref/source/v2/examples/l7policy-update-request.json api-ref/source/v2/examples/l7policy-update-response.json api-ref/source/v2/examples/l7rule-create-curl api-ref/source/v2/examples/l7rule-create-request.json api-ref/source/v2/examples/l7rule-create-response.json api-ref/source/v2/examples/l7rule-delete-curl api-ref/source/v2/examples/l7rule-show-curl api-ref/source/v2/examples/l7rule-show-response.json api-ref/source/v2/examples/l7rule-update-curl api-ref/source/v2/examples/l7rule-update-request.json api-ref/source/v2/examples/l7rule-update-response.json api-ref/source/v2/examples/l7rules-list-curl api-ref/source/v2/examples/l7rules-list-response.json api-ref/source/v2/examples/listener-create-curl api-ref/source/v2/examples/listener-create-request.json api-ref/source/v2/examples/listener-create-response.json api-ref/source/v2/examples/listener-delete-curl api-ref/source/v2/examples/listener-show-curl api-ref/source/v2/examples/listener-show-response.json api-ref/source/v2/examples/listener-stats-curl api-ref/source/v2/examples/listener-stats-response.json api-ref/source/v2/examples/listener-update-curl api-ref/source/v2/examples/listener-update-request.json api-ref/source/v2/examples/listener-update-response.json api-ref/source/v2/examples/listeners-list-curl api-ref/source/v2/examples/listeners-list-response.json api-ref/source/v2/examples/loadbalancer-create-curl api-ref/source/v2/examples/loadbalancer-create-request.json api-ref/source/v2/examples/loadbalancer-create-response.json api-ref/source/v2/examples/loadbalancer-delete-curl api-ref/source/v2/examples/loadbalancer-failover-curl api-ref/source/v2/examples/loadbalancer-full-create-request.json api-ref/source/v2/examples/loadbalancer-full-create-response.json api-ref/source/v2/examples/loadbalancer-show-curl api-ref/source/v2/examples/loadbalancer-show-response.json api-ref/source/v2/examples/loadbalancer-stats-curl api-ref/source/v2/examples/loadbalancer-stats-response.json api-ref/source/v2/examples/loadbalancer-status-curl api-ref/source/v2/examples/loadbalancer-status-response.json api-ref/source/v2/examples/loadbalancer-update-curl api-ref/source/v2/examples/loadbalancer-update-request.json api-ref/source/v2/examples/loadbalancer-update-response.json api-ref/source/v2/examples/loadbalancers-list-curl api-ref/source/v2/examples/loadbalancers-list-response.json api-ref/source/v2/examples/member-batch-update-curl api-ref/source/v2/examples/member-batch-update-request.json api-ref/source/v2/examples/member-create-curl api-ref/source/v2/examples/member-create-request.json api-ref/source/v2/examples/member-create-response.json api-ref/source/v2/examples/member-delete-curl api-ref/source/v2/examples/member-show-curl api-ref/source/v2/examples/member-show-response.json api-ref/source/v2/examples/member-update-curl api-ref/source/v2/examples/member-update-request.json api-ref/source/v2/examples/member-update-response.json api-ref/source/v2/examples/members-list-curl api-ref/source/v2/examples/members-list-response.json api-ref/source/v2/examples/pool-create-curl api-ref/source/v2/examples/pool-create-request.json api-ref/source/v2/examples/pool-create-response.json api-ref/source/v2/examples/pool-delete-curl api-ref/source/v2/examples/pool-session-persistence-obj.json api-ref/source/v2/examples/pool-show-curl api-ref/source/v2/examples/pool-show-response.json api-ref/source/v2/examples/pool-update-curl api-ref/source/v2/examples/pool-update-request.json api-ref/source/v2/examples/pool-update-response.json api-ref/source/v2/examples/pools-list-curl api-ref/source/v2/examples/pools-list-response.json api-ref/source/v2/examples/provider-availability-zone-capability-show-curl api-ref/source/v2/examples/provider-availability-zone-capability-show-response.json api-ref/source/v2/examples/provider-flavor-capability-show-curl api-ref/source/v2/examples/provider-flavor-capability-show-response.json api-ref/source/v2/examples/provider-list-curl api-ref/source/v2/examples/provider-list-response.json api-ref/source/v2/examples/quota-reset-curl api-ref/source/v2/examples/quota-show-curl api-ref/source/v2/examples/quota-update-curl api-ref/source/v2/examples/quota-update-request.json api-ref/source/v2/examples/quota-update-response.json api-ref/source/v2/examples/quotas-defaults-curl api-ref/source/v2/examples/quotas-defaults-response.json api-ref/source/v2/examples/quotas-list-curl api-ref/source/v2/examples/quotas-list-response.json api-ref/source/v2/examples/quotas-show-response.json bin/create_dual_intermediate_CA.sh bin/create_single_CA_intermediate_CA.sh bin/openssl.cnf devstack/README.md devstack/plugin.sh devstack/settings devstack/contrib/new-octavia-devstack.sh devstack/etc/octavia/haproxy.cfg devstack/etc/rsyslog/10-octavia-log-offloading.conf devstack/files/debs/octavia devstack/files/rpms/octavia devstack/files/rpms-suse/octavia devstack/files/wsgi/octavia-api.template devstack/pregenerated/regenerate-certs.sh devstack/pregenerated/certs/client_ca.cert.pem devstack/pregenerated/certs/server_ca-chain.cert.pem devstack/pregenerated/certs/server_ca.cert.pem devstack/pregenerated/certs/private/client.cert-and-key.pem devstack/pregenerated/certs/private/server_ca.key.pem devstack/pregenerated/ssh-keys/octavia_ssh_key devstack/pregenerated/ssh-keys/octavia_ssh_key.pub devstack/samples/README-Vagrant.md devstack/samples/providers.rb devstack/samples/multinode/Vagrantfile devstack/samples/multinode/local-2.conf devstack/samples/multinode/local.conf devstack/samples/multinode/local.sh devstack/samples/multinode/webserver.sh devstack/samples/singlenode/Vagrantfile devstack/samples/singlenode/local.conf devstack/samples/singlenode/local.sh devstack/samples/singlenode/webserver.sh devstack/upgrade/resources.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh devstack/upgrade/vm_user_data.sh diskimage-create/README.rst diskimage-create/diskimage-create.sh diskimage-create/image-tests.sh diskimage-create/requirements.txt diskimage-create/test-requirements.txt diskimage-create/tox.ini diskimage-create/version.txt doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/_static/.placeholder doc/source/admin/amphora-image-build.rst doc/source/admin/apache-httpd.rst doc/source/admin/api-audit.rst doc/source/admin/flavors.rst doc/source/admin/healthcheck.rst doc/source/admin/index.rst doc/source/admin/log-offloading.rst doc/source/admin/octavia-status.rst doc/source/admin/guides/certificates.rst doc/source/admin/guides/operator-maintenance.rst doc/source/admin/guides/upgrade.rst doc/source/admin/guides/sample-configs/openssl.cnf doc/source/admin/providers/a10.rst doc/source/admin/providers/amphora.rst doc/source/admin/providers/amphorav2.rst doc/source/admin/providers/f5.rst doc/source/admin/providers/index.rst doc/source/admin/providers/ovn.rst doc/source/admin/providers/radware.rst doc/source/admin/providers/vmware-nsx.rst doc/source/cli/index.rst doc/source/configuration/configref.rst doc/source/configuration/index.rst doc/source/configuration/policy.rst doc/source/contributor/CONSTITUTION.rst doc/source/contributor/HACKING.rst doc/source/contributor/contributing.rst doc/source/contributor/index.rst doc/source/contributor/specs doc/source/contributor/api/haproxy-amphora-api.rst doc/source/contributor/design/version0.5/component-design.rst doc/source/contributor/design/version0.5/v0.5-component-design.dot doc/source/contributor/devref/erd.rst doc/source/contributor/devref/flows.rst doc/source/contributor/devref/gmr.rst doc/source/contributor/guides/dev-quick-start.rst doc/source/contributor/guides/providers.rst doc/source/contributor/guides/remote-debugger.rst doc/source/install/index.rst doc/source/install/install-amphorav2.rst doc/source/install/install-ubuntu.rst doc/source/install/install.rst doc/source/reference/glossary.rst doc/source/reference/index.rst doc/source/reference/introduction.rst doc/source/reference/octavia-component-overview.svg doc/source/user/index.rst doc/source/user/sdks.rst doc/source/user/feature-classification/feature-matrix-healthmonitor.ini doc/source/user/feature-classification/feature-matrix-l7policy.ini doc/source/user/feature-classification/feature-matrix-l7rule.ini doc/source/user/feature-classification/feature-matrix-lb.ini doc/source/user/feature-classification/feature-matrix-listener.ini doc/source/user/feature-classification/feature-matrix-member.ini doc/source/user/feature-classification/feature-matrix-pool.ini doc/source/user/feature-classification/index.rst doc/source/user/guides/basic-cookbook.rst doc/source/user/guides/l7-cookbook.rst doc/source/user/guides/l7.rst elements/amphora-agent/README.rst elements/amphora-agent/element-deps elements/amphora-agent/package-installs.yaml elements/amphora-agent/pkg-map elements/amphora-agent/source-repository-amphora-agent elements/amphora-agent/svc-map elements/amphora-agent/install.d/amphora-agent-source-install/75-amphora-agent-install elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.conf elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.init elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.logrotate elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.service elements/amphora-agent/post-install.d/10-enable-network-scripts elements/amphora-agent/post-install.d/11-enable-amphora-agent-systemd elements/amphora-agent/post-install.d/90-remove-build-deps elements/amphora-agent/static/etc/NetworkManager/conf.d/no-auto-default.conf elements/amphora-agent/static/etc/rsyslog.d/55-octavia-socket.conf elements/certs-ramfs/README.rst elements/certs-ramfs/element-deps elements/certs-ramfs/package-installs.yaml elements/certs-ramfs/svc-map elements/certs-ramfs/init-scripts/systemd/certs-ramfs.service elements/certs-ramfs/init-scripts/sysv/certs-ramfs elements/certs-ramfs/init-scripts/upstart/certs-ramfs.conf elements/certs-ramfs/post-install.d/30-enable-certs-ramfs-service elements/certs-ramfs/static/usr/local/bin/certfs-ramfs elements/disable-makecache/README.rst elements/disable-makecache/svc-map elements/disable-makecache/post-install.d/80-disable-makecache elements/haproxy-octavia/README.rst elements/haproxy-octavia/element-deps elements/haproxy-octavia/package-installs.json elements/haproxy-octavia/pkg-map elements/haproxy-octavia/svc-map elements/haproxy-octavia/install.d/76-haproxy elements/haproxy-octavia/post-install.d/20-disable-default-haproxy elements/haproxy-octavia/post-install.d/20-haproxy-tune-kernel elements/haproxy-octavia/post-install.d/20-haproxy-user-group-config elements/haproxy-octavia/post-install.d/20-setup-haproxy-log elements/haproxy-octavia/post-install.d/21-create-ping-wrapper elements/ipvsadmin/README.rst elements/ipvsadmin/element-deps elements/ipvsadmin/package-installs.json elements/ipvsadmin/svc-map elements/keepalived-octavia/README.rst elements/keepalived-octavia/element-deps elements/keepalived-octavia/package-installs.json elements/keepalived-octavia/pkg-map elements/keepalived-octavia/svc-map elements/no-resolvconf/README.rst elements/no-resolvconf/finalise.d/99-disable-resolv-conf elements/rebind-sshd/README.rst elements/rebind-sshd/finalise.d/98-rebind-sshd-after-dhcp elements/remove-default-ints/README.rst elements/remove-default-ints/post-install.d/91-remove-default-ints elements/remove-sshd/README.rst elements/remove-sshd/package-installs.yaml elements/root-passwd/README.rst elements/root-passwd/post-install.d/99-setup elements/sos/README.rst elements/sos/element-deps elements/sos/package-installs.yaml elements/sos/pkg-map etc/octavia.conf etc/audit/octavia_api_audit_map.conf.sample etc/certificates/openssl.cnf etc/dhcp/dhclient.conf etc/policy/README.rst etc/policy/admin_or_owner-policy.json etc/policy/octavia-policy-generator.conf httpd/octavia-api.conf octavia/__init__.py octavia/i18n.py octavia/opts.py octavia/version.py octavia.egg-info/PKG-INFO octavia.egg-info/SOURCES.txt octavia.egg-info/dependency_links.txt octavia.egg-info/entry_points.txt octavia.egg-info/not-zip-safe octavia.egg-info/pbr.json octavia.egg-info/requires.txt octavia.egg-info/top_level.txt octavia/amphorae/__init__.py octavia/amphorae/backends/__init__.py octavia/amphorae/backends/agent/__init__.py octavia/amphorae/backends/agent/agent_jinja_cfg.py octavia/amphorae/backends/agent/api_server/__init__.py octavia/amphorae/backends/agent/api_server/amphora_info.py octavia/amphorae/backends/agent/api_server/certificate_update.py octavia/amphorae/backends/agent/api_server/haproxy_compatibility.py octavia/amphorae/backends/agent/api_server/keepalived.py octavia/amphorae/backends/agent/api_server/keepalivedlvs.py octavia/amphorae/backends/agent/api_server/loadbalancer.py octavia/amphorae/backends/agent/api_server/osutils.py octavia/amphorae/backends/agent/api_server/plug.py octavia/amphorae/backends/agent/api_server/server.py octavia/amphorae/backends/agent/api_server/udp_listener_base.py octavia/amphorae/backends/agent/api_server/util.py octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 octavia/amphorae/backends/agent/api_server/templates/keepalived.systemd.j2 octavia/amphorae/backends/agent/api_server/templates/keepalived.sysvinit.j2 octavia/amphorae/backends/agent/api_server/templates/keepalived.upstart.j2 octavia/amphorae/backends/agent/api_server/templates/keepalived_check_script.conf.j2 octavia/amphorae/backends/agent/api_server/templates/keepalived_lvs_check_script.sh.j2 octavia/amphorae/backends/agent/api_server/templates/plug_port_ethX.conf.j2 octavia/amphorae/backends/agent/api_server/templates/plug_vip_ethX.conf.j2 octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_ethX.conf.j2 octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_eth_ifdown_local.conf.j2 octavia/amphorae/backends/agent/api_server/templates/rh_plug_port_eth_ifup_local.conf.j2 octavia/amphorae/backends/agent/api_server/templates/rh_plug_vip_ethX.conf.j2 octavia/amphorae/backends/agent/api_server/templates/rh_plug_vip_ethX_alias.conf.j2 octavia/amphorae/backends/agent/api_server/templates/rh_route_ethX.conf.j2 octavia/amphorae/backends/agent/api_server/templates/rh_rule_ethX.conf.j2 octavia/amphorae/backends/agent/api_server/templates/systemd.conf.j2 octavia/amphorae/backends/agent/api_server/templates/sysvinit.conf.j2 octavia/amphorae/backends/agent/api_server/templates/upstart.conf.j2 octavia/amphorae/backends/agent/templates/amphora_agent_conf.template octavia/amphorae/backends/health_daemon/__init__.py octavia/amphorae/backends/health_daemon/health_daemon.py octavia/amphorae/backends/health_daemon/health_sender.py octavia/amphorae/backends/health_daemon/status_message.py octavia/amphorae/backends/utils/__init__.py octavia/amphorae/backends/utils/haproxy_query.py octavia/amphorae/backends/utils/ip_advertisement.py octavia/amphorae/backends/utils/keepalivedlvs_query.py octavia/amphorae/backends/utils/network_namespace.py octavia/amphorae/backends/utils/network_utils.py octavia/amphorae/backends/utils/udp_check.sh octavia/amphorae/driver_exceptions/__init__.py octavia/amphorae/driver_exceptions/exceptions.py octavia/amphorae/drivers/__init__.py octavia/amphorae/drivers/driver_base.py octavia/amphorae/drivers/haproxy/__init__.py octavia/amphorae/drivers/haproxy/data_models.py octavia/amphorae/drivers/haproxy/exceptions.py octavia/amphorae/drivers/haproxy/rest_api_driver.py octavia/amphorae/drivers/health/__init__.py octavia/amphorae/drivers/health/heartbeat_udp.py octavia/amphorae/drivers/keepalived/__init__.py octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py octavia/amphorae/drivers/keepalived/jinja/__init__.py octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template octavia/amphorae/drivers/noop_driver/__init__.py octavia/amphorae/drivers/noop_driver/driver.py octavia/api/__init__.py octavia/api/app.py octavia/api/config.py octavia/api/root_controller.py octavia/api/common/__init__.py octavia/api/common/hooks.py octavia/api/common/pagination.py octavia/api/common/types.py octavia/api/drivers/__init__.py octavia/api/drivers/data_models.py octavia/api/drivers/driver_factory.py octavia/api/drivers/driver_lib.py octavia/api/drivers/provider_base.py octavia/api/drivers/utils.py octavia/api/drivers/amphora_driver/__init__.py octavia/api/drivers/amphora_driver/availability_zone_schema.py octavia/api/drivers/amphora_driver/flavor_schema.py octavia/api/drivers/amphora_driver/v1/__init__.py octavia/api/drivers/amphora_driver/v1/driver.py octavia/api/drivers/amphora_driver/v2/__init__.py octavia/api/drivers/amphora_driver/v2/driver.py octavia/api/drivers/driver_agent/__init__.py octavia/api/drivers/driver_agent/driver_get.py octavia/api/drivers/driver_agent/driver_listener.py octavia/api/drivers/driver_agent/driver_updater.py octavia/api/drivers/noop_driver/__init__.py octavia/api/drivers/noop_driver/agent.py octavia/api/drivers/noop_driver/driver.py octavia/api/healthcheck/healthcheck_plugins.py octavia/api/v2/__init__.py octavia/api/v2/controllers/__init__.py octavia/api/v2/controllers/amphora.py octavia/api/v2/controllers/availability_zone_profiles.py octavia/api/v2/controllers/availability_zones.py octavia/api/v2/controllers/base.py octavia/api/v2/controllers/flavor_profiles.py octavia/api/v2/controllers/flavors.py octavia/api/v2/controllers/health_monitor.py octavia/api/v2/controllers/l7policy.py octavia/api/v2/controllers/l7rule.py octavia/api/v2/controllers/listener.py octavia/api/v2/controllers/load_balancer.py octavia/api/v2/controllers/member.py octavia/api/v2/controllers/pool.py octavia/api/v2/controllers/provider.py octavia/api/v2/controllers/quotas.py octavia/api/v2/types/__init__.py octavia/api/v2/types/amphora.py octavia/api/v2/types/availability_zone_profile.py octavia/api/v2/types/availability_zones.py octavia/api/v2/types/flavor_profile.py octavia/api/v2/types/flavors.py octavia/api/v2/types/health_monitor.py octavia/api/v2/types/l7policy.py octavia/api/v2/types/l7rule.py octavia/api/v2/types/listener.py octavia/api/v2/types/load_balancer.py octavia/api/v2/types/member.py octavia/api/v2/types/pool.py octavia/api/v2/types/provider.py octavia/api/v2/types/quotas.py octavia/certificates/__init__.py octavia/certificates/common/__init__.py octavia/certificates/common/barbican.py octavia/certificates/common/cert.py octavia/certificates/common/local.py octavia/certificates/common/pkcs12.py octavia/certificates/common/auth/__init__.py octavia/certificates/common/auth/barbican_acl.py octavia/certificates/generator/__init__.py octavia/certificates/generator/cert_gen.py octavia/certificates/generator/local.py octavia/certificates/manager/__init__.py octavia/certificates/manager/barbican.py octavia/certificates/manager/barbican_legacy.py octavia/certificates/manager/castellan_mgr.py octavia/certificates/manager/cert_mgr.py octavia/certificates/manager/local.py octavia/cmd/__init__.py octavia/cmd/agent.py octavia/cmd/api.py octavia/cmd/driver_agent.py octavia/cmd/haproxy_vrrp_check.py octavia/cmd/health_manager.py octavia/cmd/house_keeping.py octavia/cmd/octavia_worker.py octavia/cmd/status.py octavia/common/__init__.py octavia/common/base_taskflow.py octavia/common/clients.py octavia/common/config.py octavia/common/constants.py octavia/common/context.py octavia/common/data_models.py octavia/common/decorators.py octavia/common/exceptions.py octavia/common/keystone.py octavia/common/policy.py octavia/common/rpc.py octavia/common/service.py octavia/common/stats.py octavia/common/utils.py octavia/common/validate.py octavia/common/jinja/__init__.py octavia/common/jinja/user_data_jinja_cfg.py octavia/common/jinja/haproxy/__init__.py octavia/common/jinja/haproxy/combined_listeners/__init__.py octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py octavia/common/jinja/haproxy/combined_listeners/templates/base.j2 octavia/common/jinja/haproxy/combined_listeners/templates/haproxy.cfg.j2 octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 octavia/common/jinja/haproxy/split_listeners/__init__.py octavia/common/jinja/haproxy/split_listeners/jinja_cfg.py octavia/common/jinja/haproxy/split_listeners/templates/base.j2 octavia/common/jinja/haproxy/split_listeners/templates/haproxy.cfg.j2 octavia/common/jinja/haproxy/split_listeners/templates/macros.j2 octavia/common/jinja/logging/__init__.py octavia/common/jinja/logging/logging_jinja_cfg.py octavia/common/jinja/logging/templates/10-rsyslog.conf.template octavia/common/jinja/lvs/__init__.py octavia/common/jinja/lvs/jinja_cfg.py octavia/common/jinja/lvs/templates/base.j2 octavia/common/jinja/lvs/templates/keepalivedlvs.cfg.j2 octavia/common/jinja/lvs/templates/macros.j2 octavia/common/jinja/templates/user_data_config_drive.template octavia/common/tls_utils/__init__.py octavia/common/tls_utils/cert_parser.py octavia/compute/__init__.py octavia/compute/compute_base.py octavia/compute/drivers/__init__.py octavia/compute/drivers/nova_driver.py octavia/compute/drivers/noop_driver/__init__.py octavia/compute/drivers/noop_driver/driver.py octavia/controller/__init__.py octavia/controller/healthmanager/__init__.py octavia/controller/healthmanager/health_manager.py octavia/controller/healthmanager/health_drivers/__init__.py octavia/controller/healthmanager/health_drivers/update_base.py octavia/controller/healthmanager/health_drivers/update_db.py octavia/controller/healthmanager/health_drivers/update_logging.py octavia/controller/housekeeping/__init__.py octavia/controller/housekeeping/house_keeping.py octavia/controller/queue/__init__.py octavia/controller/queue/v1/__init__.py octavia/controller/queue/v1/consumer.py octavia/controller/queue/v1/endpoints.py octavia/controller/queue/v2/__init__.py octavia/controller/queue/v2/consumer.py octavia/controller/queue/v2/endpoints.py octavia/controller/worker/__init__.py octavia/controller/worker/amphora_rate_limit.py octavia/controller/worker/task_utils.py octavia/controller/worker/v1/__init__.py octavia/controller/worker/v1/controller_worker.py octavia/controller/worker/v1/flows/__init__.py octavia/controller/worker/v1/flows/amphora_flows.py octavia/controller/worker/v1/flows/health_monitor_flows.py octavia/controller/worker/v1/flows/l7policy_flows.py octavia/controller/worker/v1/flows/l7rule_flows.py octavia/controller/worker/v1/flows/listener_flows.py octavia/controller/worker/v1/flows/load_balancer_flows.py octavia/controller/worker/v1/flows/member_flows.py octavia/controller/worker/v1/flows/pool_flows.py octavia/controller/worker/v1/tasks/__init__.py octavia/controller/worker/v1/tasks/amphora_driver_tasks.py octavia/controller/worker/v1/tasks/cert_task.py octavia/controller/worker/v1/tasks/compute_tasks.py octavia/controller/worker/v1/tasks/database_tasks.py octavia/controller/worker/v1/tasks/lifecycle_tasks.py octavia/controller/worker/v1/tasks/model_tasks.py octavia/controller/worker/v1/tasks/network_tasks.py octavia/controller/worker/v1/tasks/retry_tasks.py octavia/controller/worker/v2/__init__.py octavia/controller/worker/v2/controller_worker.py octavia/controller/worker/v2/taskflow_jobboard_driver.py octavia/controller/worker/v2/flows/__init__.py octavia/controller/worker/v2/flows/amphora_flows.py octavia/controller/worker/v2/flows/flow_utils.py octavia/controller/worker/v2/flows/health_monitor_flows.py octavia/controller/worker/v2/flows/l7policy_flows.py octavia/controller/worker/v2/flows/l7rule_flows.py octavia/controller/worker/v2/flows/listener_flows.py octavia/controller/worker/v2/flows/load_balancer_flows.py octavia/controller/worker/v2/flows/member_flows.py octavia/controller/worker/v2/flows/pool_flows.py octavia/controller/worker/v2/tasks/__init__.py octavia/controller/worker/v2/tasks/amphora_driver_tasks.py octavia/controller/worker/v2/tasks/cert_task.py octavia/controller/worker/v2/tasks/compute_tasks.py octavia/controller/worker/v2/tasks/database_tasks.py octavia/controller/worker/v2/tasks/lifecycle_tasks.py octavia/controller/worker/v2/tasks/network_tasks.py octavia/controller/worker/v2/tasks/retry_tasks.py octavia/db/__init__.py octavia/db/api.py octavia/db/base_models.py octavia/db/healthcheck.py octavia/db/models.py octavia/db/prepare.py octavia/db/repositories.py octavia/db/migration/__init__.py octavia/db/migration/alembic.ini octavia/db/migration/cli.py octavia/db/migration/alembic_migrations/README.rst octavia/db/migration/alembic_migrations/env.py octavia/db/migration/alembic_migrations/script.py.mako octavia/db/migration/alembic_migrations/versions/034756a182a2_amphora_add_image_id.py octavia/db/migration/alembic_migrations/versions/034b2dc2f3e0_modernize_l7policy_fields.py octavia/db/migration/alembic_migrations/versions/0aee2b450512_extend_api_to_accept_qos_policy_id.py octavia/db/migration/alembic_migrations/versions/0f242cf02c74_add_provider_column.py octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_listener.py octavia/db/migration/alembic_migrations/versions/10d38216ad34_add_timestamps_to_amphora.py octavia/db/migration/alembic_migrations/versions/11e4bb2bb8ef_fix_ipv6_vip.py octavia/db/migration/alembic_migrations/versions/13500e2e978d_update_url_and_name_size.py octavia/db/migration/alembic_migrations/versions/14892634e228_update_vip.py octavia/db/migration/alembic_migrations/versions/186509101b9b_add_server_group_id_to_loadbalancer.py octavia/db/migration/alembic_migrations/versions/1afc932f1ca2_l7rule_support_client_cert.py octavia/db/migration/alembic_migrations/versions/1e4c1d83044c_keepalived_configuration_datamodel.py octavia/db/migration/alembic_migrations/versions/211982b05afc_add_flavor_id_to_lb.py octavia/db/migration/alembic_migrations/versions/2351ea316465_adding_terminate_https_tls_ref_support.py octavia/db/migration/alembic_migrations/versions/256852d5ff7c_add_lb_network_ip_to_amphora.py octavia/db/migration/alembic_migrations/versions/27e54d00c3cd_add_monitor_address_and_port_to_member.py octavia/db/migration/alembic_migrations/versions/298eac0640a7_add_amphora_vrrp_port_id_and_ha_port_id.py octavia/db/migration/alembic_migrations/versions/29ff921a6eb_shared_pools.py octavia/db/migration/alembic_migrations/versions/2ad093f6353f_add_listener_client_ca_tls_certificate_.py octavia/db/migration/alembic_migrations/versions/357d17a6d5ac_update_lb_and_amphora_data_model_for_.py octavia/db/migration/alembic_migrations/versions/35dee79d5865_initial_create.py octavia/db/migration/alembic_migrations/versions/36b94648fef8_add_timestamp.py octavia/db/migration/alembic_migrations/versions/392fb85b4419_add_primary_key_to_spares_pool.py octavia/db/migration/alembic_migrations/versions/3a1e1cdb7b27_rename_amphora_host_id.py octavia/db/migration/alembic_migrations/versions/3b199c848b96_create_no_monitor_operational_status.py octavia/db/migration/alembic_migrations/versions/3e5b37a0bdb9_add_vrrp_ip_and_ha_ip_to_amphora.py octavia/db/migration/alembic_migrations/versions/3f8ff3be828e_create_quotas_table.py octavia/db/migration/alembic_migrations/versions/43287cd10fef_make_pool_lb_algorithm_larger.py octavia/db/migration/alembic_migrations/versions/443fe6676637_add_network_id_to_vip.py octavia/db/migration/alembic_migrations/versions/44a2414dd683_adding_name_column_to_member_and_health_.py octavia/db/migration/alembic_migrations/versions/458c9ee2a011_l7_policies_and_rules.py octavia/db/migration/alembic_migrations/versions/46d914b2a5e5_seed_the_spares_pool_table.py octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py octavia/db/migration/alembic_migrations/versions/4a6ec0ab7284_remove_fk_constraints_on_listener_.py octavia/db/migration/alembic_migrations/versions/4aeb9e23ad43_add_draining_operating_status.py octavia/db/migration/alembic_migrations/versions/4c094013699a_update_load_balancer_amphora.py octavia/db/migration/alembic_migrations/versions/4d9cf7d32f2_insert_headers.py octavia/db/migration/alembic_migrations/versions/4f65b4f91c39_amphora_add_flavor_id.py octavia/db/migration/alembic_migrations/versions/4faaa983e7a9_update_member_address_column.py octavia/db/migration/alembic_migrations/versions/4fe8240425b4_update_vip_add_subnet_id.py octavia/db/migration/alembic_migrations/versions/52377704420e_add_timestamps_to_healthmonitor.py octavia/db/migration/alembic_migrations/versions/5309960964f8_add_proxy_protocol_for_pool.py octavia/db/migration/alembic_migrations/versions/543f5d8e4e56_add_a_column_busy_in_table_amphora_health.py octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py octavia/db/migration/alembic_migrations/versions/62816c232310_fix_migration_for_mysql_5_7.py octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py octavia/db/migration/alembic_migrations/versions/6abb04f24c5_tenant_id_to_project_id.py octavia/db/migration/alembic_migrations/versions/6ffc710674ef_spares_pool_table.py octavia/db/migration/alembic_migrations/versions/7432f1d4ea83_add_http_host_head_inject_for_http_health_check.py octavia/db/migration/alembic_migrations/versions/74aae261694c_extend_pool_for_backend_ca_and_crl.py octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py octavia/db/migration/alembic_migrations/versions/7c36b277bfb0_add_listener_ciphers_column.py octavia/db/migration/alembic_migrations/versions/80dba23a159f_tags_support.py octavia/db/migration/alembic_migrations/versions/82b9402e71fd_update_vip_address_size.py octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py octavia/db/migration/alembic_migrations/versions/8c0851bdf6c3_change_tls_container_id_length_in_sni_.py octavia/db/migration/alembic_migrations/versions/92fe9857279_create_healthmanager_table.py octavia/db/migration/alembic_migrations/versions/9b5473976d6d_add_provisioning_status_to_objects.py octavia/db/migration/alembic_migrations/versions/9bf4d21caaea_adding_amphora_id_to_listener_.py octavia/db/migration/alembic_migrations/versions/a1f689aecc1d_extend_pool_for_support_backend_reencryption.py octavia/db/migration/alembic_migrations/versions/a5762a99609a_add_protocol_in_listener_keys.py octavia/db/migration/alembic_migrations/versions/a7f187cd221f_add_tls_boolean_type_for_reencryption.py octavia/db/migration/alembic_migrations/versions/b9c703669314_add_flavor_and_flavor_profile_table.py octavia/db/migration/alembic_migrations/versions/ba35e0fb88e1_add_backup_field_to_member.py octavia/db/migration/alembic_migrations/versions/bf171d0d91c3_amphora_add_cached_zone.py octavia/db/migration/alembic_migrations/versions/c11292016060_add_request_errors_for_stats.py octavia/db/migration/alembic_migrations/versions/c761c8a71579_add_availability_zone_table.py octavia/db/migration/alembic_migrations/versions/d85ca7258d21_modernize_l7rule.py octavia/db/migration/alembic_migrations/versions/da371b422669_allowed_cidr_for_listeners.py octavia/db/migration/alembic_migrations/versions/dcf88e59aae4_add_lb_algorithm_source_ip_port.py octavia/db/migration/alembic_migrations/versions/e37941b010db_add_lb_flavor_constraint.py octavia/db/migration/alembic_migrations/versions/e6672bda93bf_add_ping_and_tlshello_monitor_types.py octavia/db/migration/alembic_migrations/versions/ebbcc72b4e5e_add_octavia_owned_vip_column_to_vip_.py octavia/db/migration/alembic_migrations/versions/f21ae3f21adc_add_client_auth_option.py octavia/db/migration/alembic_migrations/versions/fac584114642_.py octavia/db/migration/alembic_migrations/versions/fbd705961c3a_add_pool_ciphers_column.py octavia/db/migration/alembic_migrations/versions/fc5582da7d8a_create_amphora_build_rate_limit_tables.py octavia/db/migration/alembic_migrations/versions/ffad172e98c1_add_certificate_revoke_list_option.py octavia/distributor/__init__.py octavia/distributor/drivers/__init__.py octavia/distributor/drivers/driver_base.py octavia/distributor/drivers/noop_driver/__init__.py octavia/distributor/drivers/noop_driver/driver.py octavia/hacking/__init__.py octavia/hacking/checks.py octavia/network/__init__.py octavia/network/base.py octavia/network/data_models.py octavia/network/drivers/__init__.py octavia/network/drivers/neutron/__init__.py octavia/network/drivers/neutron/allowed_address_pairs.py octavia/network/drivers/neutron/base.py octavia/network/drivers/neutron/utils.py octavia/network/drivers/noop_driver/__init__.py octavia/network/drivers/noop_driver/driver.py octavia/policies/__init__.py octavia/policies/amphora.py octavia/policies/availability_zone.py octavia/policies/availability_zone_profile.py octavia/policies/base.py octavia/policies/flavor.py octavia/policies/flavor_profile.py octavia/policies/healthmonitor.py octavia/policies/l7policy.py octavia/policies/l7rule.py octavia/policies/listener.py octavia/policies/loadbalancer.py octavia/policies/member.py octavia/policies/pool.py octavia/policies/provider.py octavia/policies/provider_availability_zone.py octavia/policies/provider_flavor.py octavia/policies/quota.py octavia/tests/__init__.py octavia/tests/common/__init__.py octavia/tests/common/constants.py octavia/tests/common/data_model_helpers.py octavia/tests/common/sample_certs.py octavia/tests/common/sample_data_models.py octavia/tests/common/sample_network_data.py octavia/tests/common/utils.py octavia/tests/functional/__init__.py octavia/tests/functional/amphorae/__init__.py octavia/tests/functional/amphorae/backend/__init__.py octavia/tests/functional/amphorae/backend/agent/__init__.py octavia/tests/functional/amphorae/backend/agent/api_server/__init__.py octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py octavia/tests/functional/api/__init__.py octavia/tests/functional/api/test_healthcheck.py octavia/tests/functional/api/test_root_controller.py octavia/tests/functional/api/drivers/__init__.py octavia/tests/functional/api/drivers/driver_agent/__init__.py octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py octavia/tests/functional/api/v2/__init__.py octavia/tests/functional/api/v2/base.py octavia/tests/functional/api/v2/test_amphora.py octavia/tests/functional/api/v2/test_availability_zone_profiles.py octavia/tests/functional/api/v2/test_availability_zones.py octavia/tests/functional/api/v2/test_flavor_profiles.py octavia/tests/functional/api/v2/test_flavors.py octavia/tests/functional/api/v2/test_health_monitor.py octavia/tests/functional/api/v2/test_l7policy.py octavia/tests/functional/api/v2/test_l7rule.py octavia/tests/functional/api/v2/test_listener.py octavia/tests/functional/api/v2/test_load_balancer.py octavia/tests/functional/api/v2/test_member.py octavia/tests/functional/api/v2/test_pool.py octavia/tests/functional/api/v2/test_provider.py octavia/tests/functional/api/v2/test_quotas.py octavia/tests/functional/db/__init__.py octavia/tests/functional/db/base.py octavia/tests/functional/db/test_models.py octavia/tests/functional/db/test_repositories.py octavia/tests/unit/__init__.py octavia/tests/unit/base.py octavia/tests/unit/test_hacking.py octavia/tests/unit/test_opts.py octavia/tests/unit/test_version.py octavia/tests/unit/amphorae/__init__.py octavia/tests/unit/amphorae/backends/__init__.py octavia/tests/unit/amphorae/backends/agent/__init__.py octavia/tests/unit/amphorae/backends/agent/test_agent_jinja_cfg.py octavia/tests/unit/amphorae/backends/agent/api_server/__init__.py octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py octavia/tests/unit/amphorae/backends/agent/api_server/test_haproxy_compatibility.py octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalived.py octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalivedlvs.py octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py octavia/tests/unit/amphorae/backends/health_daemon/__init__.py octavia/tests/unit/amphorae/backends/health_daemon/test_envelope.py octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py octavia/tests/unit/amphorae/backends/utils/__init__.py octavia/tests/unit/amphorae/backends/utils/test_haproxy_query.py octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py octavia/tests/unit/amphorae/backends/utils/test_network_utils.py octavia/tests/unit/amphorae/drivers/__init__.py octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py octavia/tests/unit/amphorae/drivers/haproxy/__init__.py octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_common.py octavia/tests/unit/amphorae/drivers/health/__init__.py octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py octavia/tests/unit/amphorae/drivers/keepalived/__init__.py octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py octavia/tests/unit/amphorae/drivers/keepalived/jinja/__init__.py octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py octavia/tests/unit/api/__init__.py octavia/tests/unit/api/common/__init__.py octavia/tests/unit/api/common/base.py octavia/tests/unit/api/common/test_types.py octavia/tests/unit/api/drivers/__init__.py octavia/tests/unit/api/drivers/test_driver_factory.py octavia/tests/unit/api/drivers/test_driver_lib.py octavia/tests/unit/api/drivers/test_provider_base.py octavia/tests/unit/api/drivers/test_provider_noop_agent.py octavia/tests/unit/api/drivers/test_provider_noop_driver.py octavia/tests/unit/api/drivers/test_utils.py octavia/tests/unit/api/drivers/amphora_driver/__init__.py octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py octavia/tests/unit/api/drivers/driver_agent/__init__.py octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py octavia/tests/unit/api/drivers/driver_agent/test_driver_listener.py octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py octavia/tests/unit/api/hooks/__init__.py octavia/tests/unit/api/hooks/test_query_parameters.py octavia/tests/unit/api/v2/__init__.py octavia/tests/unit/api/v2/types/__init__.py octavia/tests/unit/api/v2/types/base.py octavia/tests/unit/api/v2/types/test_availability_zone_profiles.py octavia/tests/unit/api/v2/types/test_availability_zones.py octavia/tests/unit/api/v2/types/test_flavor_profiles.py octavia/tests/unit/api/v2/types/test_flavors.py octavia/tests/unit/api/v2/types/test_health_monitors.py octavia/tests/unit/api/v2/types/test_l7policies.py octavia/tests/unit/api/v2/types/test_l7rules.py octavia/tests/unit/api/v2/types/test_listeners.py octavia/tests/unit/api/v2/types/test_load_balancers.py octavia/tests/unit/api/v2/types/test_members.py octavia/tests/unit/api/v2/types/test_pools.py octavia/tests/unit/certificates/__init__.py octavia/tests/unit/certificates/common/__init__.py octavia/tests/unit/certificates/common/test_barbican.py octavia/tests/unit/certificates/common/test_local.py octavia/tests/unit/certificates/common/auth/__init__.py octavia/tests/unit/certificates/common/auth/test_barbican_acl.py octavia/tests/unit/certificates/generator/__init__.py octavia/tests/unit/certificates/generator/local_csr.py octavia/tests/unit/certificates/generator/test_local.py octavia/tests/unit/certificates/manager/__init__.py octavia/tests/unit/certificates/manager/test_barbican.py octavia/tests/unit/certificates/manager/test_barbican_legacy.py octavia/tests/unit/certificates/manager/test_castellan_mgr.py octavia/tests/unit/certificates/manager/test_local.py octavia/tests/unit/cmd/__init__.py octavia/tests/unit/cmd/test_agent.py octavia/tests/unit/cmd/test_driver_agent.py octavia/tests/unit/cmd/test_haproxy_vrrp_check.py octavia/tests/unit/cmd/test_health_manager.py octavia/tests/unit/cmd/test_house_keeping.py octavia/tests/unit/cmd/test_status.py octavia/tests/unit/common/__init__.py octavia/tests/unit/common/test_base_taskflow.py octavia/tests/unit/common/test_clients.py octavia/tests/unit/common/test_config.py octavia/tests/unit/common/test_constants.py octavia/tests/unit/common/test_data_models.py octavia/tests/unit/common/test_decorator.py octavia/tests/unit/common/test_exceptions.py octavia/tests/unit/common/test_policy.py octavia/tests/unit/common/test_stats.py octavia/tests/unit/common/test_utils.py octavia/tests/unit/common/test_validations.py octavia/tests/unit/common/jinja/__init__.py octavia/tests/unit/common/jinja/test_user_data_jinja_cfg.py octavia/tests/unit/common/jinja/haproxy/__init__.py octavia/tests/unit/common/jinja/haproxy/combined_listeners/__init__.py octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py octavia/tests/unit/common/jinja/haproxy/split_listeners/__init__.py octavia/tests/unit/common/jinja/haproxy/split_listeners/test_jinja_cfg.py octavia/tests/unit/common/jinja/logging/__init__.py octavia/tests/unit/common/jinja/logging/test_logging_jinja_cfg.py octavia/tests/unit/common/jinja/lvs/__init__.py octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py octavia/tests/unit/common/sample_configs/__init__.py octavia/tests/unit/common/sample_configs/sample_configs_combined.py octavia/tests/unit/common/sample_configs/sample_configs_split.py octavia/tests/unit/common/sample_configs/sample_pkcs12.p12 octavia/tests/unit/common/tls_utils/__init__.py octavia/tests/unit/common/tls_utils/test_cert_parser.py octavia/tests/unit/compute/__init__.py octavia/tests/unit/compute/drivers/__init__.py octavia/tests/unit/compute/drivers/test_compute_noop_driver.py octavia/tests/unit/compute/drivers/test_nova_driver.py octavia/tests/unit/controller/__init__.py octavia/tests/unit/controller/healthmanager/__init__.py octavia/tests/unit/controller/healthmanager/test_health_manager.py octavia/tests/unit/controller/healthmanager/health_drivers/__init__.py octavia/tests/unit/controller/healthmanager/health_drivers/test_update_base.py octavia/tests/unit/controller/healthmanager/health_drivers/test_update_db.py octavia/tests/unit/controller/healthmanager/health_drivers/test_update_logging.py octavia/tests/unit/controller/housekeeping/__init__.py octavia/tests/unit/controller/housekeeping/test_house_keeping.py octavia/tests/unit/controller/queue/__init__.py octavia/tests/unit/controller/queue/v1/__init__.py octavia/tests/unit/controller/queue/v1/test_consumer.py octavia/tests/unit/controller/queue/v1/test_endpoints.py octavia/tests/unit/controller/queue/v2/__init__.py octavia/tests/unit/controller/queue/v2/test_consumer.py octavia/tests/unit/controller/queue/v2/test_endpoints.py octavia/tests/unit/controller/worker/__init__.py octavia/tests/unit/controller/worker/test_amphora_rate_limit.py octavia/tests/unit/controller/worker/test_task_utils.py octavia/tests/unit/controller/worker/v1/__init__.py octavia/tests/unit/controller/worker/v1/test_controller_worker.py octavia/tests/unit/controller/worker/v1/flows/__init__.py octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py octavia/tests/unit/controller/worker/v1/tasks/__init__.py octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py octavia/tests/unit/controller/worker/v2/__init__.py octavia/tests/unit/controller/worker/v2/test_controller_worker.py octavia/tests/unit/controller/worker/v2/flows/__init__.py octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py octavia/tests/unit/controller/worker/v2/tasks/__init__.py octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py octavia/tests/unit/controller/worker/v2/tasks/test_retry_tasks.py octavia/tests/unit/db/__init__.py octavia/tests/unit/network/__init__.py octavia/tests/unit/network/drivers/__init__.py octavia/tests/unit/network/drivers/test_network_noop_driver.py octavia/tests/unit/network/drivers/neutron/__init__.py octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py octavia/tests/unit/network/drivers/neutron/test_base.py octavia/tests/unit/network/drivers/neutron/test_utils.py octavia/tests/unit/volume/__init__.py octavia/tests/unit/volume/drivers/__init__.py octavia/tests/unit/volume/drivers/test_cinder_driver.py octavia/tests/unit/volume/drivers/test_volume_noop_driver.py octavia/volume/__init__.py octavia/volume/volume_base.py octavia/volume/drivers/__init__.py octavia/volume/drivers/cinder_driver.py octavia/volume/drivers/noop_driver/__init__.py octavia/volume/drivers/noop_driver/driver.py playbooks/image-build/post.yaml playbooks/image-build/run.yaml releasenotes/notes/.placeholder releasenotes/notes/3rd-party-neutron-backends-deployment-82691be2bc72a786.yaml releasenotes/notes/Add-Stein-Prelude-7d8290b803db8c56.yaml releasenotes/notes/Add-TLS-client-auth-CA-certificate-6863f64a2fe70a6f.yaml releasenotes/notes/Add-TLS-client-auth-CRL-d0722fd175bc2f51.yaml releasenotes/notes/Add-TLS-client-auth-header-insertion-039debc7e6f06474.yaml releasenotes/notes/Add-TLS-client-auth-option-15d868d1009fc130.yaml releasenotes/notes/Add-UDP-protocol-support-9c011a23525092a1.yaml releasenotes/notes/Add-amphora-agent-config-update-API-298b31e6c0cd715c.yaml releasenotes/notes/Add-amphora-info-endpoint-e2e3b53ae5ab5a85.yaml releasenotes/notes/Add-cached_zone-to-the-amphora-record-7c3231c2b5b96574.yaml releasenotes/notes/Add-driver-agent-get-methods-b624a1342c3e6d0f.yaml releasenotes/notes/Add-log-offloading-233cd8612c0dd2b5.yaml releasenotes/notes/Add-pool-CA-and-CRL-bb467b17188ed022.yaml releasenotes/notes/Add-pool-tls-client-auth-01d3b8acfb78ab14.yaml releasenotes/notes/Add-pool-tls_enabled-f189677c0e13c447.yaml releasenotes/notes/Add-provider-agent-support-a735806c4da4c470.yaml releasenotes/notes/Adds-L7rule-support-for-TLS-client-authentication-22e3ae29aaf7fc26.yaml releasenotes/notes/Allow-configuration-of-listener-timeout-values-9a7600c4e21364e3.yaml releasenotes/notes/Allow-members-to-be-set-as-backup-e68e46bc52f2fc1f.yaml releasenotes/notes/Amphora-Failover-API-612090f761936254.yaml releasenotes/notes/Change-HTTPS-HealthMonitor-functionality-79240ef13e65cd88.yaml releasenotes/notes/Correct-naming-for-quota-resources-8e4309a839208cd1.yaml releasenotes/notes/Fix-API-update-null-None-1b400962017a3d56.yaml releasenotes/notes/Fix-HM-DB-Rollback-no-connection-2664c4f7823ecaec.yaml releasenotes/notes/Fix-allocate_and_associate-deadlock-3ff1464421c1d464.yaml releasenotes/notes/Fix-failover-ip-addresses-exhausted-69110b2fa4683e1a.yaml releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml releasenotes/notes/Fix-ifup-on-member-create-5b405d98eb036718.yaml releasenotes/notes/Fix-noop-batch-member-update-issue-09b76787553e7752.yaml releasenotes/notes/Fix-plug-vip-revert-abandoned-vrrp-port-efff14edce62ad75.yaml releasenotes/notes/IPv6-support-953ef81ed8555fce.yaml releasenotes/notes/Octavia-flavors-2a96424c3d65c224.yaml releasenotes/notes/Octavia-lib-transition-driver-agent-aeefef114898b8f5.yaml releasenotes/notes/Report-more-accurate-haproxy-statuses-7e995bb4c7cc0dd6.yaml releasenotes/notes/Set-retry-defaults-to-prod-values-f3cc10d16baa716a.yaml releasenotes/notes/Support-PKCS12-certificate-objects-1c6e896be9d35977.yaml releasenotes/notes/UDP-listener-health-d8fdf64a32e022d4.yaml releasenotes/notes/Use-Ubuntu-virtual-kernel-for-Amphora-a1e8af8bc7893011.yaml releasenotes/notes/add-ability-setting-barbican-acls-85f36747d4284035.yaml releasenotes/notes/add-ability-to-disable-tls-terminated-listeners-965ec7c1a8a9f732.yaml releasenotes/notes/add-amphora-flavor-field-54d42da0381ced7f.yaml releasenotes/notes/add-anti-affinity-policy-config-39df309fd12d443c.yaml releasenotes/notes/add-api-tag-filtering-8bfb3c3b7cfd6afe.yaml releasenotes/notes/add-batch-member-update-capability-4923bd266a9b2b80.yaml releasenotes/notes/add-compute-flavor-capability-ab202697a7fbdc3d.yaml releasenotes/notes/add-default-ciphers-2eb70b34290711be.yaml releasenotes/notes/add-healthcheck-middleware-6c09150bddd3113f.yaml releasenotes/notes/add-id-column-to-healthmonitor-a331934ad2cede87.yaml releasenotes/notes/add-jobboard-based-controller-599279c7cc172e955.yaml releasenotes/notes/add-lb-algorithm-source-ip-port-ff86433143e43136.yaml releasenotes/notes/add-monitor-address-and-port-to-member-99fa2ee65e2b04b4.yaml releasenotes/notes/add-policy-json-support-38929bb1fb581a7a.yaml releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml releasenotes/notes/add-ptvsd-debugger-33bb632bccf494bb.yaml releasenotes/notes/add-quota-support-fe63a52b6b903789.yaml releasenotes/notes/add-rh-flavors-support-for-amphora-agent-cd3e9f9f519b9ff2.yaml releasenotes/notes/add-rhel-amphora-ab4a7bada2fa3eb7.yaml releasenotes/notes/add-shared-pools-and-l7-ef9edf01bb9058e0.yaml releasenotes/notes/add-sos-element-5d6677471341e7f2.yaml releasenotes/notes/add-support-for-centos-8-e0730de5d20a48be.yaml releasenotes/notes/add-systemd-support-5794252f02bce666.yaml releasenotes/notes/add-upgrade-check-framework-cc440f3f440ba6d2.yaml releasenotes/notes/add-vip-acl-4a7e20d167fe4a49.yaml releasenotes/notes/add-x-forwarded-proto-19a1d971cf43b795.yaml releasenotes/notes/add_API_reference-81d84d0c8598b764.yaml releasenotes/notes/add_ability_to_disable_api_versions-253a8dc4253f0f56.yaml releasenotes/notes/add_api_audit-58dc16bff517eae7.yaml releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml releasenotes/notes/add_tag_support-4735534f4066b9af.yaml releasenotes/notes/additional-udp-healthcheck-types-2414a5edee9f5110.yaml releasenotes/notes/admin-state-up-fix-4aa278eac67646ae.yaml releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml releasenotes/notes/allow-operators-to-disable-ping-healthchecks-42fd8c3b88edaf35.yaml releasenotes/notes/allow-vip-on-mgmt-net-d6c65d4ccb2a8f2c.yaml releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml releasenotes/notes/amp-az-1a0b4255c77fd1dc.yaml releasenotes/notes/api-create-project-id-4bb984b24d56de2e.yaml releasenotes/notes/auth-strategy-keystone-80b3780a18420b6c.yaml releasenotes/notes/auto_detect_haproxy_user_group-c220b6a2c8f1d589.yaml releasenotes/notes/availability-zone-api-a28ff5e00bdcc69a.yaml releasenotes/notes/availability-zones-can-override-valid-vip-networks-5566aa4769c158dc.yaml releasenotes/notes/bug-1797130-8c9bfa50d9b6c955.yaml releasenotes/notes/build-rate-limiting-a2d2d4c9333a8f46.yaml releasenotes/notes/cert-encrypted-ramfs-381ffe3d4a7392d7.yaml releasenotes/notes/change-keystone-backend-config-d246b1e34015c86c.yaml releasenotes/notes/correct-amp-client-auth-vulnerability-6803f4bac2508e4c.yaml releasenotes/notes/custom_eventstreamer_queue_url-7a98bd6a7e92e9de.yaml releasenotes/notes/deleted-404-2cdd751e7afbe036.yaml releasenotes/notes/deprecate-amp_ssh_key_name-e1041a64ed970a9e.yaml releasenotes/notes/disable_logging-3b50f388ee2b8127.yaml releasenotes/notes/diskimage-create-git-branch-9c44e7e3fa70a985.yaml releasenotes/notes/documentation-migration-f72c6a1703a105b7.yaml releasenotes/notes/drop-python-2-7-a6b3f456bf6a3da7.yaml releasenotes/notes/enable-keystone-on-api-b3ebb132ad5ab308.yaml releasenotes/notes/enable-mutable-configuration-1d7f62a133148767.yaml releasenotes/notes/encrypt-certs-and-keys-5175d7704d8df3ce.yaml releasenotes/notes/extend_api_to_accept_qos_policy_id-128ab592a735f3b8.yaml releasenotes/notes/failover-vip-no-dhcp-50805c5321ebbb05.yaml releasenotes/notes/fix-API-list-performance-3b121deffbc3ce4a.yaml releasenotes/notes/fix-IPv6-vip-079a3285f78686ee.yaml releasenotes/notes/fix-PING-health-monitor-bc38de57fa759ac0.yaml releasenotes/notes/fix-SNI-single-process-879ffce5eaa6c1c3.yaml releasenotes/notes/fix-active-standby-in-centos-4e47140e0e139de8.yaml releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml releasenotes/notes/fix-amp-failover-missing-vrrp-port-9b5f13b9951b7edb.yaml releasenotes/notes/fix-amphorav2-failover-secgroup-c793de5e00b32653.yaml releasenotes/notes/fix-api-listener-update-sni-containers-6595c52e2de1f621.yaml releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml releasenotes/notes/fix-barbican-client-verfiy-689be1b9389bd1d8.yaml releasenotes/notes/fix-certs-ramfs-race-561f355d13fc6d14.yaml releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml releasenotes/notes/fix-creating-fully-populated-load-balancer-ae57ffae5c017ac3.yaml releasenotes/notes/fix-default-timeout-values-for-listeners-108c8048ba8beb9a.yaml releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml releasenotes/notes/fix-driver-agent-decrement-quota-27486d9fa0bdeb89.yaml releasenotes/notes/fix-driver-errors-81d33948288bf8cf.yaml releasenotes/notes/fix-dual-error-amp-failover-69e323892bad8254.yaml releasenotes/notes/fix-duplicate-sg-creation-0c502a5d2d8c276d.yaml releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml releasenotes/notes/fix-error-in-rsyslog-config-a316a7856e1a847a.yaml releasenotes/notes/fix-error-messages-ec817a66249e6666.yaml releasenotes/notes/fix-eth1-ip-flush-7fadda4bdca387b5.yaml releasenotes/notes/fix-health-check-db-outage-279b0bc1d0039312.yaml releasenotes/notes/fix-host-routes-with-amphorav2-and-persistence-54b99d651a4ee9c4.yaml releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml releasenotes/notes/fix-housekeeping-db-performance-b0d0fcfcce696314.yaml releasenotes/notes/fix-invalid-attribute-for-filtering-d2ddb95a1acbded2.yaml releasenotes/notes/fix-ipv6-udp-health-message-ed94b35bbea396ec.yaml releasenotes/notes/fix-lb-error-failover-2c17afaa20c0c97f.yaml releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml releasenotes/notes/fix-listener-MAX_TIMEOUT-4c4fdf804a96c34b.yaml releasenotes/notes/fix-lo-interface-amphora-netns-90fb9934026e1485.yaml releasenotes/notes/fix-loadbalancer-db-cleanup-61ee81a4fd597067.yaml releasenotes/notes/fix-missing-amphora-create-dependency-a954ded0d260d462.yaml releasenotes/notes/fix-missing-cloud-guest-utils-rh-eb39a53502dc1e91.yaml releasenotes/notes/fix-missing-cronie-rh-bd31001338ddbb1e.yaml releasenotes/notes/fix-multi-amp-down-failover-952618fb8d3d8ae6.yaml releasenotes/notes/fix-nf_conntrack_buckets-sysctl-75ae6dbb9d052863.yaml releasenotes/notes/fix-no-resolvconf-rhel-dhclient-hook-36a1c3b1a3b03a3d.yaml releasenotes/notes/fix-nr_open-limit-value-7f475c3e301a608d.yaml releasenotes/notes/fix-oslo-messaging-connection-leakage-aeb79474105ac116.yaml releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml releasenotes/notes/fix-pool-crl-2cc6f2705f5b2009.yaml releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml releasenotes/notes/fix-provider-capabilities-filtering-8bd12b2cf7b37a84.yaml releasenotes/notes/fix-provider-driver-utils-b75485785dcd6462.yaml releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml releasenotes/notes/fix-route-table-b2ec0aa7b92d2abc.yaml releasenotes/notes/fix-spare-amphora-check-and-creation-3adf939b45610155.yaml releasenotes/notes/fix-subnet-host_routes-amphorav2-3c079c5a3bfa1b3d.yaml releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff83395544f228cf.yaml releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml releasenotes/notes/fix-udp-listener-wrr-50de9dc0774a8ea1.yaml releasenotes/notes/fix-udp-members-status-ef3202849bfda29b.yaml releasenotes/notes/fix-udp-only-lbs-c4ca42106fc1e2bb.yaml releasenotes/notes/fix-udp-server-status-bug-db4d3e38bcdf0554.yaml releasenotes/notes/fix-unlimited-connection-limit-48079688de033c1a.yaml releasenotes/notes/fix-vip-net-no-gw-b46c3ade1a748e69.yaml releasenotes/notes/fix-vip-network-ip-availability-2e924f32abf01052.yaml releasenotes/notes/fix-vip-plug-centos-74c2fe7099964b08.yaml releasenotes/notes/fix-vip-qos-policy-extension-enabled-3e16e1c23a7d7ae5.yaml releasenotes/notes/fix-worker-graceful-shutdown-c44b6797637aa1b3.yaml releasenotes/notes/fix_active_standby_ipv6-0317d5cd9e5d50e5.yaml releasenotes/notes/fixed-spare-amphora-rotation-007ba406411a313d.yaml releasenotes/notes/fixed-unstable-UDP-health-status-ba32690b83a9641b.yaml releasenotes/notes/force-controlplane-amphora-communication-to-use-tls1.2-1c4adf72d2ce5a82.yaml releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml releasenotes/notes/glance-tags-for-amphora-images-28bd9df1ed4b9ca3.yaml releasenotes/notes/glance_image_owner-42c92a12f91a62a6.yaml releasenotes/notes/haproxy-single-process-b17a3af3a97accea.yaml releasenotes/notes/healthcheck-cache-641f0a64e6f5856c.yaml releasenotes/notes/input-validation-server_certs_key_passphrase-6a9dfc190c9deba8.yaml releasenotes/notes/keepalived-race-with-haproxy-e402ef7f466871f6.yaml releasenotes/notes/lb-delete-flow-refactor-cfb1bc621bbe92b4.yaml releasenotes/notes/lb_flow_amp_vip-a83db5d84e17a26a.yaml releasenotes/notes/link-amphora-to-loadbalancer-earlier-ab3dddec48b8da96.yaml releasenotes/notes/load-balancer-expiry-age-a473f9147552f1b1.yaml releasenotes/notes/make-amphora-cert-validity-configurable-7defc508b1174f89.yaml releasenotes/notes/make-batch-member-call-additive-4785163e625fed1a.yaml releasenotes/notes/moving-api-config-variables-into-new-section-e1c20b77aaf5ea15.yaml releasenotes/notes/new-amphora-fields-fa3ffc5801b5e551.yaml releasenotes/notes/new-default_connection_limit-config-option-3ed9f0ed6ec2b514.yaml releasenotes/notes/octavia-active-standby-cec5d2ad4fd214d8.yaml releasenotes/notes/octavia-v2-api-c32a62b37c2c8f6f.yaml releasenotes/notes/octavia_v2_RBAC-0eb2b51aa6278435.yaml releasenotes/notes/per-amphora-statistics-api-5479605c7f3adb12.yaml releasenotes/notes/pike-release-35a1d632ce854d4a.yaml releasenotes/notes/pool-ciphers-73a347942e31788b.yaml releasenotes/notes/provisioning_neutron_db_sync-c019d96a7b64fe20.yaml releasenotes/notes/py3-hmac-digest-81696f6b176e7ae4.yaml releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml releasenotes/notes/remove-bbq-unset-acl-e680020de6a9ad3d.yaml releasenotes/notes/remove-deprecated-api-configs-3f5652f71610b05e.yaml releasenotes/notes/remove-deprecated-v1-resources-6360da3de27b74d3.yaml releasenotes/notes/remove-duplicated-cert_generator-option-83d18647dc1d2954.yaml releasenotes/notes/remove-tenant_id-c0352efbfb3a54f9.yaml releasenotes/notes/remove_user_group_option-56ba749d0064a394.yaml releasenotes/notes/render-disabled-members-to-make-statuses-consistent-69189f71da2e02e8.yaml releasenotes/notes/reserved-ips-7ef3a63ab0b6b28a.yaml releasenotes/notes/same-port-listeners-41198368d470e821.yaml releasenotes/notes/separate-thread-pool-for-health-stats-update-c263c844075a7721.yaml releasenotes/notes/service-type-73efc939e48d5858.yaml releasenotes/notes/statuses_alias-27559e3d74b9eaf0.yaml releasenotes/notes/stop-logging-amphora-cert-2e188675699d60d5.yaml releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml releasenotes/notes/support-http-health-check-with-host-header-e2cf1f2a98d4114f.yaml releasenotes/notes/support-networks-without-dhcp-3458a063333ab7a8.yaml releasenotes/notes/support-oslo_middleware-http_proxy_to_wsgi-928c6fc5ec3d421c.yaml releasenotes/notes/support-pkcs7-intermediate-ca-bundles-279c12bad974bff7.yaml releasenotes/notes/support-proxy-protocol-cc5991175a110619.yaml releasenotes/notes/support-redirect-http-code-1c2e87ef7fda12e97.yaml releasenotes/notes/support-redirect-prefix-7f8b289aee04fe99.yaml releasenotes/notes/support-remote-debugging-fcb52df4a59c1467.yaml releasenotes/notes/support-wsgi-deployment-56013fef7172e982.yaml releasenotes/notes/switch-taskflow-engine-parallel-8bf743eca15a0253.yaml releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml releasenotes/notes/use-dib-distribution-mirror-400d96c1a7df9862.yaml releasenotes/notes/validate-access-to-vip_subnet_id-48fc92b45529cafd.yaml releasenotes/notes/validate-same-ip-protocol-in-udp-lb-2813b545131097ec.yaml releasenotes/notes/validate-vip-network-params-57662cc3a99f80e5.yaml releasenotes/notes/vip-port-project-id-bbb26b657b08365e.yaml releasenotes/notes/volume-based-amphora-9a1899634f5244b0.yaml releasenotes/notes/zombie_amp-1b435eb66643dab8.yaml releasenotes/notes/fix-listener-API-update-with-null/None-fde2ffab82e783a4.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po specs/example.dot specs/skeleton.rst specs/template.rst specs-tests/__init__.py specs-tests/test_titles.py specs/version0.5/amphora-driver-interface.rst specs/version0.5/amphora-manager-interface.rst specs/version0.5/base-image.rst specs/version0.5/component-design.rst specs/version0.5/controller-worker.rst specs/version0.5/controller.dot specs/version0.5/controller.rst specs/version0.5/haproxy-amphora-api.rst specs/version0.5/housekeeping-manager-interface.rst specs/version0.5/network-driver-interface.rst specs/version0.5/nova-compute-driver.rst specs/version0.5/operator-api.rst specs/version0.5/queue-consumer.rst specs/version0.5/tls-data-security-1.diag specs/version0.5/tls-data-security-2.diag specs/version0.5/tls-data-security.rst specs/version0.8/active_passive_loadbalancer.rst specs/version0.8/use_glance_tag_to_refer_to_image.rst specs/version0.9/active-active-distributor.rst specs/version0.9/active-active-topology.rst specs/version0.9/stats_api.rst specs/version1.0/flavors.rst specs/version1.0/health_ip_port.rst specs/version1.0/n-lbaas-api-parity.rst specs/version1.0/vip-qos-policy-application.rst specs/version1.1/active-active-l3-distributor.rst specs/version1.1/enable-provider-driver.rst specs/version1.1/udp_support.rst tools/__init__.py tools/coding-checks.sh tools/create_flow_docs.py tools/flow-list-v2.txt tools/flow-list.txt tools/misc-sanity-checks.sh tools/pkcs7_to_pem.py zuul.d/jobs.yaml zuul.d/projects.yaml././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/octavia.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022360 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/octavia.egg-info/entry_points.txt0000664000175000017500000000742100000000000021614 0ustar00zuulzuul00000000000000[console_scripts] amphora-agent = octavia.cmd.agent:main haproxy-vrrp-check = octavia.cmd.haproxy_vrrp_check:main octavia-api = octavia.cmd.api:main octavia-db-manage = octavia.db.migration.cli:main octavia-driver-agent = octavia.cmd.driver_agent:main octavia-health-manager = octavia.cmd.health_manager:main octavia-housekeeping = octavia.cmd.house_keeping:main octavia-status = octavia.cmd.status:main octavia-worker = octavia.cmd.octavia_worker:main [octavia.amphora.drivers] amphora_haproxy_rest_driver = octavia.amphorae.drivers.haproxy.rest_api_driver:HaproxyAmphoraLoadBalancerDriver amphora_noop_driver = octavia.amphorae.drivers.noop_driver.driver:NoopAmphoraLoadBalancerDriver [octavia.amphora.health_update_drivers] health_db = octavia.controller.healthmanager.health_drivers.update_db:UpdateHealthDb health_logger = octavia.controller.healthmanager.health_drivers.update_logging:HealthUpdateLogger [octavia.amphora.stats_update_drivers] stats_db = octavia.controller.healthmanager.health_drivers.update_db:UpdateStatsDb stats_logger = octavia.controller.healthmanager.health_drivers.update_logging:StatsUpdateLogger [octavia.amphora.udp_api_server] keepalived_lvs = octavia.amphorae.backends.agent.api_server.keepalivedlvs:KeepalivedLvs [octavia.api.drivers] amphora = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver amphorav2 = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver noop_driver = octavia.api.drivers.noop_driver.driver:NoopProviderDriver noop_driver-alt = octavia.api.drivers.noop_driver.driver:NoopProviderDriver octavia = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver [octavia.barbican_auth] barbican_acl_auth = octavia.certificates.common.auth.barbican_acl:BarbicanACLAuth [octavia.cert_generator] local_cert_generator = octavia.certificates.generator.local:LocalCertGenerator [octavia.cert_manager] barbican_cert_manager = octavia.certificates.manager.barbican:BarbicanCertManager castellan_cert_manager = octavia.certificates.manager.castellan_mgr:CastellanCertManager local_cert_manager = octavia.certificates.manager.local:LocalCertManager [octavia.compute.drivers] compute_noop_driver = octavia.compute.drivers.noop_driver.driver:NoopComputeDriver compute_nova_driver = octavia.compute.drivers.nova_driver:VirtualMachineManager [octavia.distributor.drivers] distributor_noop_driver = octavia.distributor.drivers.noop_driver.driver:NoopDistributorDriver single_VIP_amphora = octavia.distributor.drivers.single_VIP_amphora.driver:SingleVIPAmpDistributorDriver [octavia.driver_agent.provider_agents] noop_agent = octavia.api.drivers.noop_driver.agent:noop_provider_agent [octavia.network.drivers] allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver containers_driver = octavia.network.drivers.neutron.containers:ContainersDriver network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver [octavia.plugins] hot_plug_plugin = octavia.controller.worker.v1.controller_worker:ControllerWorker [octavia.volume.drivers] volume_cinder_driver = octavia.volume.drivers.cinder_driver:VolumeManager volume_noop_driver = octavia.volume.drivers.noop_driver.driver:NoopVolumeDriver [octavia.worker.jobboard_driver] redis_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:RedisTaskFlowDriver zookeeper_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:ZookeeperTaskFlowDriver [oslo.config.opts] octavia = octavia.opts:list_opts [oslo.middleware.healthcheck] octavia_db_check = octavia.api.healthcheck.healthcheck_plugins:OctaviaDBHealthcheck [oslo.policy.enforcer] octavia = octavia.common.policy:get_no_context_enforcer [oslo.policy.policies] octavia = octavia.policies:list_rules [wsgi_scripts] octavia-wsgi = octavia.api.app:setup_app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/octavia.egg-info/not-zip-safe0000664000175000017500000000000100000000000020540 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/octavia.egg-info/pbr.json0000664000175000017500000000005700000000000017772 0ustar00zuulzuul00000000000000{"git_version": "0b4e05ae", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/octavia.egg-info/requires.txt0000664000175000017500000000213300000000000020711 0ustar00zuulzuul00000000000000Babel!=2.4.0,>=2.3.4 Flask!=0.11,>=0.10 Jinja2>=2.10 PyMySQL>=0.7.6 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 SQLAlchemy-Utils>=0.30.11 WSME>=0.8.0 WebOb>=1.8.2 Werkzeug>=0.14.1 alembic>=0.8.10 castellan>=0.16.0 cotyledon>=1.3.0 cryptography>=2.1 debtcollector>=1.19.0 diskimage-builder>=2.27.0 distro>=1.2.0 futurist>=1.2.0 gunicorn>=19.9.0 jsonschema>=2.6.0 keystoneauth1>=3.4.0 keystonemiddleware>=4.17.0 netaddr>=0.7.19 netifaces>=0.10.4 octavia-lib>=2.0.0 oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.db>=4.27.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=6.3.0 oslo.middleware>=4.0.1 oslo.policy>=1.30.0 oslo.reports>=1.18.0 oslo.serialization>=2.28.1 oslo.upgradecheck>=0.1.0 oslo.utils>=3.33.0 pbr!=2.1.0,>=2.0.0 pecan>=1.3.2 pyOpenSSL>=17.1.0 pyasn1!=0.2.3,>=0.1.8 pyasn1-modules>=0.0.6 python-barbicanclient>=4.5.2 python-cinderclient>=3.3.0 python-glanceclient>=2.8.0 python-neutronclient>=6.7.0 python-novaclient>=9.1.0 requests>=2.14.2 rfc3986>=0.3.1 setproctitle>=1.1.10 simplejson>=3.13.2 stevedore>=1.20.0 taskflow>=4.1.0 tenacity>=5.0.4 [:(sys_platform!='win32')] pyroute2>=0.4.21 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052691.0 octavia-6.2.2/octavia.egg-info/top_level.txt0000664000175000017500000000001000000000000021033 0ustar00zuulzuul00000000000000octavia ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3342166 octavia-6.2.2/playbooks/0000775000175000017500000000000000000000000015175 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4342167 octavia-6.2.2/playbooks/image-build/0000775000175000017500000000000000000000000017354 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/playbooks/image-build/post.yaml0000664000175000017500000000040500000000000021224 0ustar00zuulzuul00000000000000- hosts: all name: Copy image tasks: - synchronize: dest: "{{ zuul.executor.work_root }}/artifacts/" mode: pull src: "{{ ansible_user_dir }}/test-images" verify_host: true rsync_opts: - "--exclude=/*/*/"././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/playbooks/image-build/run.yaml0000664000175000017500000000376000000000000021052 0ustar00zuulzuul00000000000000- hosts: all tasks: - name: Install apt packages apt: pkg: "{{ item }}" state: "latest" update_cache: yes register: install_packages become: yes until: install_packages is success retries: 5 delay: 2 with_items: - qemu-utils - uuid-runtime - curl - kpartx - python3-yaml - debootstrap - qemu - bc - python3-venv - python3-setuptools when: - ansible_os_family == 'Debian' - name: Install rpm packages dnf: pkg: "{{ item }}" state: "latest" update_cache: yes register: install_packages become: yes until: install_packages is success retries: 5 delay: 2 with_items: - qemu-img - uuid - curl - kpartx - python3-pyyaml - qemu-kvm - python3-setuptools - yum when: - ansible_os_family == 'RedHat' - name: Install required pip packages pip: name: "{{ item }}" state: "latest" virtualenv: /var/tmp/venv virtualenv_command: python3 -m venv register: install_packages until: install_packages is success retries: 5 delay: 2 become: yes with_items: - diskimage-builder - name: Ensure artifacts/images directory exists file: path: '{{ ansible_user_dir }}/test-images' state: directory - name: Build an amphora image for publishing shell: >- . /var/tmp/venv/bin/activate && \ ./diskimage-create.sh -o {{ ansible_user_dir }}/test-images/test-only-amphora-x64-haproxy-{{ amphora_os }}-{{ amphora_os_release }}.qcow2 \ -i {{ amphora_os }} \ -d {{ amphora_os_release }} \ -s {{ amphora_image_size | default(2) }} args: chdir: "{{ ansible_user_dir }}/src/opendev.org/openstack/octavia/diskimage-create" tags: - skip_ansible_lint ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3342166 octavia-6.2.2/releasenotes/0000775000175000017500000000000000000000000015663 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4502168 octavia-6.2.2/releasenotes/notes/0000775000175000017500000000000000000000000017013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000021264 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/3rd-party-neutron-backends-deployment-82691be2bc72a786.yaml0000664000175000017500000000047600000000000031315 0ustar00zuulzuul00000000000000--- features: - | Added hook to plugin.sh: `octavia_create_network_interface_device` and `octavia_delete_network_interface_device`. For each of these functions, if they are defined during stack (respectively unstack), they are called to create (respectively delete) the management network interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-Stein-Prelude-7d8290b803db8c56.yaml0000664000175000017500000000254500000000000025217 0ustar00zuulzuul00000000000000--- prelude: | For the OpenStack Stein release, the Octavia team is excited to announce support for: Octavia flavors, TLS client authentication, backend re-encryption, and object tags. * Octavia flavors allow an operator to define "flavors" of load balancers, such as "active-standby" or "single" using the amphora driver, that configure the load balancer topology. The Amphora driver also supports specifying the nova compute flavor to use for the load balancer amphora. * TLS client authentication allows the listener to request a client certificate from users connecting to the load balancer. This certificate can then be checked against a CA certificate and optionally a certificate revocation list. New HTTP header insertions allow passing client certificate information to the backend members, while new L7 rules allow you to take custom actions based on the content of the client certificate. * Backend re-encryption allows users to configure pools to initiate TLS connections to the backend member servers. This enables load balancers to authenticate and encrypt connections from the load balancer to the backend member server. * Object tags allow users to assign a list of strings to the load balancer objects that can then be used for advanced API list filtering. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-TLS-client-auth-CA-certificate-6863f64a2fe70a6f.yaml0000664000175000017500000000022500000000000030230 0ustar00zuulzuul00000000000000--- features: - | You can now specify a certificate authority certificate reference, on listeners, for use with TLS client authentication. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-TLS-client-auth-CRL-d0722fd175bc2f51.yaml0000664000175000017500000000052700000000000026072 0ustar00zuulzuul00000000000000--- features: - | You can now provide a certificate revocation list reference for listeners using TLS client authentication. security: - | Note that the amphora provider currently only supports the crl-file provided to check for revocation. Remote revocation lists and/or OCSP will not be used by the amphora provider. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-TLS-client-auth-header-insertion-039debc7e6f06474.yaml0000664000175000017500000000052700000000000030730 0ustar00zuulzuul00000000000000--- features: - | When using TLS client authentication on TERMINATED_HTTPS listeners, you can now insert the following headers for backend members\: 'X-SSL-Client-Verify', 'X-SSL-Client-Has-Cert', 'X-SSL-Client-DN', 'X-SSL-Client-CN', 'X-SSL-Issuer', 'X-SSL-Client-SHA1', 'X-SSL-Client-Not-Before', 'X-SSL-Client-Not-After'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-TLS-client-auth-option-15d868d1009fc130.yaml0000664000175000017500000000012300000000000026614 0ustar00zuulzuul00000000000000--- features: - | You can now enable TLS client authentication on listeners. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-UDP-protocol-support-9c011a23525092a1.yaml0000664000175000017500000000136200000000000026342 0ustar00zuulzuul00000000000000--- features: - Added UDP protocol support to listeners and pools. - Adds a health monitor type of UDP-CONNECT that does a basic UDP port connect. issues: - You cannot mix IPv4 UDP listeners with IPv6 members at this time. This is being tracked with this story https://storyboard.openstack.org/#!/story/2003329 upgrade: - | UDP protocol support requires an update to the amphora image to support UDP protocol statistics reporting and UDP-CONNECT health monitoring. other: - | Health monitors of type UDP-CONNECT may not work correctly if ICMP unreachable is not enabled on the member server or is blocked by a security rule. A member server may be marked as operating status ONLINE when it is actually down. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-amphora-agent-config-update-API-298b31e6c0cd715c.yaml0000664000175000017500000000102100000000000030447 0ustar00zuulzuul00000000000000--- features: - | Octavia now has an administrative API that updates the amphora agent configuration on running amphora. upgrade: - | When the amphora agent configuration update API is called on an amphora running a version of the amphora agent that does not support configuration updates, an ERROR log message will be posted to the controller log file indicating that the amphora does not support agent configuration updates. In this case, the amphora image should be updated to a newer version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-amphora-info-endpoint-e2e3b53ae5ab5a85.yaml0000664000175000017500000000022000000000000027124 0ustar00zuulzuul00000000000000--- features: - | Added a new endpoint /v2.0/octavia/amphorae to expose internal details about amphorae. This endpoint is admin only. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-cached_zone-to-the-amphora-record-7c3231c2b5b96574.yaml0000664000175000017500000000100500000000000030777 0ustar00zuulzuul00000000000000--- features: - | The compute zone (if applicable) is now cached in the database and returned in the Amphora API as `cached_zone`. Please note that this is only set at the original time of provisioning, and could be stale for various reasons (for example, if live-migrations have taken place due to maintenances). We recommend it be used for reference only, unless you are absolutey certain it is current in your environment. The source of truth is still the system you use for compute. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-driver-agent-get-methods-b624a1342c3e6d0f.yaml0000664000175000017500000000013600000000000027373 0ustar00zuulzuul00000000000000--- features: - | Adds support for the driver agent to query for load balancer objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-log-offloading-233cd8612c0dd2b5.yaml0000664000175000017500000000055200000000000025457 0ustar00zuulzuul00000000000000--- features: - | Octavia now supports Amphora log offloading. Operators can define syslog targets for the Amphora administrative logs and for the tenant load balancer flow logs. issues: - | Amphorae are unable to provide tenant flow logs for UDP listeners. upgrade: - | To enable log offloading, the amphora image needs to be updated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-pool-CA-and-CRL-bb467b17188ed022.yaml0000664000175000017500000000022500000000000025126 0ustar00zuulzuul00000000000000--- features: - | You can now specify a ca_tls_container_ref and crl_container_ref on pools for validating backend pool members using TLS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-pool-tls-client-auth-01d3b8acfb78ab14.yaml0000664000175000017500000000017400000000000026715 0ustar00zuulzuul00000000000000--- features: - | You can now specify a tls_container_ref on pools for TLS client authentication to pool members. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-pool-tls_enabled-f189677c0e13c447.yaml0000664000175000017500000000022400000000000025675 0ustar00zuulzuul00000000000000--- features: - | You can now enable TLS backend re-encryption for connections to member servers by enabling tls_enabled option on pools. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Add-provider-agent-support-a735806c4da4c470.yaml0000664000175000017500000000027400000000000027161 0ustar00zuulzuul00000000000000--- features: - | The Octavia driver-agent now supports starting provider driver agents. Provider driver agents are long running agent processes supporting provider drivers. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=octavia-6.2.2/releasenotes/notes/Adds-L7rule-support-for-TLS-client-authentication-22e3ae29aaf7fc26.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Adds-L7rule-support-for-TLS-client-authentication-22e3ae29aaf7fc26.0000664000175000017500000000034500000000000032642 0ustar00zuulzuul00000000000000--- features: - | Adds the ability to define L7 rules based on TLS client authentication information. The new L7 rules are\: "L7RULE_TYPE_SSL_CONN_HAS_CERT", "L7RULE_TYPE_VERIFY_RESULT", and "L7RULE_TYPE_DN_FIELD". ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=octavia-6.2.2/releasenotes/notes/Allow-configuration-of-listener-timeout-values-9a7600c4e21364e3.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Allow-configuration-of-listener-timeout-values-9a7600c4e21364e3.yam0000664000175000017500000000063700000000000032732 0ustar00zuulzuul00000000000000--- features: - | Listeners have four new timeout settings: * `timeout_client_data`: Frontend client inactivity timeout * `timeout_member_connect`: Backend member connection timeout * `timeout_member_data`: Backend member inactivity timeout * `timeout_tcp_inspect`: Time to wait for TCP packets for content inspection The value for all of these fields is expected to be in milliseconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Allow-members-to-be-set-as-backup-e68e46bc52f2fc1f.yaml0000664000175000017500000000042400000000000030431 0ustar00zuulzuul00000000000000--- features: - | Members have a new boolean option `backup`. When set to `true`, the member will not receive traffic until all non-backup members are offline. Once all non-backup members are offline, traffic will begin balancing between the backup members. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Amphora-Failover-API-612090f761936254.yaml0000664000175000017500000000052000000000000025277 0ustar00zuulzuul00000000000000--- features: - | Added the 'failover' sub-resource for the Amphora API. Each amphora can be triggered to failover by sending a PUT (with an empty body) to the resource ``/v2.0/octavia/amphorae//failover``. It will cause the amphora to be recycled and replaced, in the same way as the health-triggered failover. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Change-HTTPS-HealthMonitor-functionality-79240ef13e65cd88.yaml0000664000175000017500000000103000000000000031570 0ustar00zuulzuul00000000000000--- features: - | New Health Monitor type "TLS-HELLO" to perform a simple TLS connection. upgrade: - | If users have configured Health Monitors of type "HTTPS" and are expecting a simple "TLS-HELLO" check, they will need to recreate their monitor with the new "TLS-HELLO" type. fixes: - | Health Monitor type "HTTPS" now correctly performs the configured check. This is done with all certificate validation disabled, so it will not work if backend members are performing client certificate validation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Correct-naming-for-quota-resources-8e4309a839208cd1.yaml0000664000175000017500000000035300000000000030554 0ustar00zuulzuul00000000000000--- deprecations: - | The quota objects named `health_monitor` and `load_balancer` have been renamed to `healthmonitor` and `loadbalancer`, respectively. The old names are deprecated, and will be removed in the T cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-API-update-null-None-1b400962017a3d56.yaml0000664000175000017500000000032000000000000026134 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the API handling of None (JSON null) on object update calls. The API will now either clear the value from the field or will reset the value of the field to the API default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-HM-DB-Rollback-no-connection-2664c4f7823ecaec.yaml0000664000175000017500000000024000000000000030005 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with the health manager reporting an UnboundLocalError if it gets an exception attempting to get a database connection. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-allocate_and_associate-deadlock-3ff1464421c1d464.yaml0000664000175000017500000000013700000000000030701 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a potential DB deadlock in allocate_and_associate found in testing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-failover-ip-addresses-exhausted-69110b2fa4683e1a.yaml0000664000175000017500000000023700000000000030732 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue with load balancer failover, when the VIP subnet is out of IP addresses, that could lead to the VIP being deallocated. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=octavia-6.2.2/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb0000664000175000017500000000020000000000000033203 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue when a loadbalancer is disabled, Octavia Health Manager keeps failovering the amphorae ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-ifup-on-member-create-5b405d98eb036718.yaml0000664000175000017500000000012400000000000026565 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue creating members on networks with IPv6 subnets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-noop-batch-member-update-issue-09b76787553e7752.yaml0000664000175000017500000000020600000000000030270 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with batch member updates, that don't have any changes, not properly rolling back the update. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Fix-plug-vip-revert-abandoned-vrrp-port-efff14edce62ad75.yaml0000664000175000017500000000025300000000000032100 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where, if we were unable to attach the base (VRRP) port to an amphora instance, the revert would not clean up the port in neutron. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/IPv6-support-953ef81ed8555fce.yaml0000664000175000017500000000027200000000000024516 0ustar00zuulzuul00000000000000--- features: - Adds support for IPv6 upgrade: - To support IPv6 a databse migration and amphora image update are required. fixes: - Resolves an issue with subnets larger than /24 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Octavia-flavors-2a96424c3d65c224.yaml0000664000175000017500000000027400000000000025013 0ustar00zuulzuul00000000000000--- features: - | Octavia now has flavors support which allows the operator to define, named, custom configurations that users can select from when creating a load balancer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Octavia-lib-transition-driver-agent-aeefef114898b8f5.yaml0000664000175000017500000000163600000000000031211 0ustar00zuulzuul00000000000000--- features: - | The Stein release of Octavia introduces the octavia-lib python module. This library enables provider drivers to integrate easier with the Octavia API by providing a shared set of coding objects and interfaces. upgrade: - | The Stein release of Octavia adds the driver-agent controller process. This process is deployed along with the Octavia API process and uses unix domain sockets for communication between the provider drivers using octavia-lib and the driver-agent. When upgrading to Stein, operators should make sure that the /var/run/octavia directry is available for the driver-agent with the appropriate ownership and permissions for the driver-agent and API processes to access it. The operator may need to make sure the driver-agent process starts after installation. For example, a systemd service may need to be created and enabled for it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Report-more-accurate-haproxy-statuses-7e995bb4c7cc0dd6.yaml0000664000175000017500000000073000000000000031626 0ustar00zuulzuul00000000000000--- fixes: - | Some versions of HAProxy incorrectly reported nodes in DRAIN status as being UP, and Octavia code was written around this incorrect reporting. This has been fixed in some versions of HAProxy and is now handled properly in Octavia as well. Now it is possible for members to be in the status DRAINING. Note that this is masked when statuses are forwarded to neutron-lbaas in the eventstream, so no compatibility change is necessary. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Set-retry-defaults-to-prod-values-f3cc10d16baa716a.yaml0000664000175000017500000000244000000000000030614 0ustar00zuulzuul00000000000000--- upgrade: - | We have changed the [haproxy_amphora] connection_max_retries and build_active_retries default values from 300 to 120. This means load balancer builds will wait for ten minutes instead of twenty-five minutes for nova to boot the virtual machine. We feel these are more reasonable default values for most production deployments and provide a better user experience. If you are running nova in a nested virtualization environment, meaning nova is booting VMs inside another VM, and you do not have nested virtualization enabled in the bottom hypervisor, you may need to set these values back up to 300. other: - | We have changed the [haproxy_amphora] connection_max_retries and build_active_retries default values from 300 to 120. This means load balancer builds will wait for ten minutes instead of twenty-five minutes for nova to boot the virtual machine. We feel these are more reasonable default values for most production deployments and provide a better user experience. If you are running nova in a nested virtualization environment, meaning nova is booting VMs inside another VM, and you do not have nested virtualization enabled in the bottom hypervisor, you may need to set these values back up to 300. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Support-PKCS12-certificate-objects-1c6e896be9d35977.yaml0000664000175000017500000000204500000000000030352 0ustar00zuulzuul00000000000000--- features: - | Users can now use a reference to a single PKCS12 bundle as their `default_tls_container_ref` instead of a Barbican container with individual secret objects. PKCS12 supports bundling a private key, certificate, and intermediates. Private keys can no longer be passphrase protected when using PKCS12 bundles. No configuration change is necessary to enable this feature. Users may simply begin using this. Any use of the old style containers will be detected and automatically fall back to using the old Barbican driver. - | Certificate bundles can now be stored in any backend Castellan supports, and can be retrieved via a Castellan driver, even if Barbican is not deployed. security: - | Private keys can no longer be password protected, as PKCS12 does not support storing a passphrase in an explicitly defined way. Note that this is not noticeably less secure than storing a passphrase protected private key in the same place as the passphrase, as was the case with Barbican. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/UDP-listener-health-d8fdf64a32e022d4.yaml0000664000175000017500000000032000000000000025702 0ustar00zuulzuul00000000000000--- upgrade: - | To enable UDP listener monitoring when no pool is attached, the amphora image needs to be updated and load balancers with UDP listeners need to be failed over to the new image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/Use-Ubuntu-virtual-kernel-for-Amphora-a1e8af8bc7893011.yaml0000664000175000017500000000043300000000000031243 0ustar00zuulzuul00000000000000--- features: - | The default kernel for the amphora image has switched from linux-image-generic to linux-image-virtual, resulting in an image size reduction of about 150MB. The linux-image-virtual kernel works with kvm, qemu tcg, and Xen hypervisors among others. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-ability-setting-barbican-acls-85f36747d4284035.yaml0000664000175000017500000000056300000000000030151 0ustar00zuulzuul00000000000000--- features: - | Added ability for Octavia to automatically set Barbican ACLs on behalf of the user. Such enables users to create TLS-terminated listeners without having to add the Octavia keystone user id to the ACL list. Octavia will also automatically revoke access to secrets whenever load balancing resources no longer require access to them. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=octavia-6.2.2/releasenotes/notes/add-ability-to-disable-tls-terminated-listeners-965ec7c1a8a9f732.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-ability-to-disable-tls-terminated-listeners-965ec7c1a8a9f732.ya0000664000175000017500000000013700000000000032744 0ustar00zuulzuul00000000000000--- features: - | Add a config variable to disable creation of TLS Terminated listeners. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-amphora-flavor-field-54d42da0381ced7f.yaml0000664000175000017500000000023300000000000026714 0ustar00zuulzuul00000000000000--- features: - | Amphora API now can return the field `compute_flavor` which is the ID of the compute instance flavor used to boot the amphora. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-anti-affinity-policy-config-39df309fd12d443c.yaml0000664000175000017500000000012200000000000030140 0ustar00zuulzuul00000000000000--- features: - Adds a new config parameter to specify the anti-affinity policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-api-tag-filtering-8bfb3c3b7cfd6afe.yaml0000664000175000017500000000011200000000000026522 0ustar00zuulzuul00000000000000--- features: - | You can now filter API queries by the object tag. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-batch-member-update-capability-4923bd266a9b2b80.yaml0000664000175000017500000000046500000000000030512 0ustar00zuulzuul00000000000000--- features: - | It is now possible to completely update a pool's member list as a batch operation. Using a PUT request on the base member endpoint of a pool, you can specify a list of member objects and the service will perform any necessary creates/deletes/updates as a single operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-compute-flavor-capability-ab202697a7fbdc3d.yaml0000664000175000017500000000053100000000000030056 0ustar00zuulzuul00000000000000--- features: - | Operators can now use the 'compute_flavor' Octavia flavor capability when using the amphora provider driver. This allows custom compute driver flavors to be used per-load balancer. If this is not defined in an Octavia flavor, the amp_flavor_id Octavia configuration file setting will continue to be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-default-ciphers-2eb70b34290711be.yaml0000664000175000017500000000064400000000000025631 0ustar00zuulzuul00000000000000--- features: - | HTTPS-terminated listeners can now be individually configured with an OpenSSL cipher string. The default cipher string for new listeners can be specified with ``default_tls_ciphers`` in ``octavia.conf``. The built-in default is OWASP's "Suite B" recommendation. (https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html) Existing listeners will be unaffected.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-healthcheck-middleware-6c09150bddd3113f.yaml0000664000175000017500000000034400000000000027205 0ustar00zuulzuul00000000000000--- features: - | Added the oslo-middleware healthcheck app to the Octavia API. Hitting /healthcheck will return a 200. This is enabled via the [api_settings]healthcheck_enabled setting and is disabled by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-id-column-to-healthmonitor-a331934ad2cede87.yaml0000664000175000017500000000035200000000000030103 0ustar00zuulzuul00000000000000--- upgrade: - Adding `ID` column to the health_monitor table in Octavia, whose value is same as the `pool_id` column. The database needs to be upgraded first, followed by upgrade and restart of the API servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-jobboard-based-controller-599279c7cc172e955.yaml0000664000175000017500000000044700000000000027641 0ustar00zuulzuul00000000000000--- features: - | Operators can now use the amphorav2 provider which uses jobboard-based controller. A jobboard controller solves the issue with resources stuck in PENDING_* states by writing info about task states in persistent backend and monitoring job claims via jobboard. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-lb-algorithm-source-ip-port-ff86433143e43136.yaml0000664000175000017500000000123300000000000027672 0ustar00zuulzuul00000000000000--- features: - | New Load Balancing algorithm SOURCE_IP_PORT has been added. It is supported only by OVN provider driver. upgrade: - | All pools configured under OVN provider driver are automatically migrated to SOURCE_IP_PORT algorithm. Previously algorithm was named as ROUND_ROBIN, but in fact it was not working like ROUND_ROBIN. After investigating, it was observed that core OVN actually utilizes a 5 Tuple Hash/RSS Hash in DPDK/Kernel as a Load Balancing algorithm. The 5 Tuple Hash has Source IP, Destination IP, Protocol, Source Port, Destination Port. To reflect this the name was changed to SOURCE_IP_PORT. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-monitor-address-and-port-to-member-99fa2ee65e2b04b4.yaml0000664000175000017500000000007100000000000031444 0ustar00zuulzuul00000000000000--- features: - Add monitor address and port to member ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-policy-json-support-38929bb1fb581a7a.yaml0000664000175000017500000000024700000000000026630 0ustar00zuulzuul00000000000000--- features: - Policy.json enforcement in Octavia. * Enables verification of privileges on specific API command for a specific user role and project_id. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml0000664000175000017500000000025300000000000026551 0ustar00zuulzuul00000000000000--- features: - | Add listener and pool protocol validation. The pool and listener can't be combined arbitrarily. We need some constraints on the protocol side. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-ptvsd-debugger-33bb632bccf494bb.yaml0000664000175000017500000000015200000000000025712 0ustar00zuulzuul00000000000000--- features: - | Added support to debug with the Python Visual Studio Debugger engine (ptvsd). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-quota-support-fe63a52b6b903789.yaml0000664000175000017500000000007100000000000025435 0ustar00zuulzuul00000000000000--- features: - Adds quota support to the Octavia API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-rh-flavors-support-for-amphora-agent-cd3e9f9f519b9ff2.yaml0000664000175000017500000000154200000000000032133 0ustar00zuulzuul00000000000000--- prelude: > Amphora image support for RH Linux flavors. features: - The diskimage-create script supports different operating system flavors such as Ubuntu (the default option), CentOS, Fedora and RHEL. Adaptations were made to several elements to ensure all images are operational. - The amphora-agent is now able to distinguish between operating systems and choose the right course of action to manage files and networking on each Linux flavor. issues: - To use CentOS, Fedora, or RHEL in your amphora image you must set the user_group option, located in the [haproxy_amphora] section of the octavia.conf file to "haproxy". This will be made automatic in a future version. upgrade: - agent_server_network_dir is now auto-detected for Ubuntu, CentOS, Fedora and RHEL if one is not specified in the configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-rhel-amphora-ab4a7bada2fa3eb7.yaml0000664000175000017500000000010700000000000025560 0ustar00zuulzuul00000000000000--- features: - | Added support to create RHEL 8 amphora images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-shared-pools-and-l7-ef9edf01bb9058e0.yaml0000664000175000017500000000166700000000000026477 0ustar00zuulzuul00000000000000--- features: - | Adds support for Layer 7 switching and shared pools features to Octavia. This supports the equivalent feature added to Neutron LBaaS v2. * Layer 7 policies allow a tenant / user to define actions the load balancer may take other than routing requests to the default pool. * Layer 7 rules control the logic behind whether a given Layer 7 policy is followed. * Works for HTTP and TERMINATED_HTTPS listeners. * Shared pools allow listeners or Layer 7 REDIRECT_TO_POOL policies to share back-end pools. upgrade: - | Upgrade requires a database migration. * Shared-pools introduces a new ``load_balancer_id`` column into the ``pools`` table. * ``pools.load_balancer_id`` column is populated from ``listeners`` data using ETL in the migration. * Two new tables are created to handle Layer 7 switching. These are ``l7policy`` and ``l7rule``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-sos-element-5d6677471341e7f2.yaml0000664000175000017500000000011300000000000024663 0ustar00zuulzuul00000000000000--- features: - Add sos element to amphora images (Red Hat family only). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-support-for-centos-8-e0730de5d20a48be.yaml0000664000175000017500000000010300000000000026641 0ustar00zuulzuul00000000000000--- features: - | Added support for CentOS 8 amphora images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-systemd-support-5794252f02bce666.yaml0000664000175000017500000000045700000000000025720 0ustar00zuulzuul00000000000000--- features: - Adds support for amphora images that use systemd. - Add support for Ubuntu Xenial amphora images. deprecations: - The "use_upstart" configuration option is now deprecated because the amphora agent can now automatically discover the init system in use in the amphora image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-upgrade-check-framework-cc440f3f440ba6d2.yaml0000664000175000017500000000060400000000000027403 0ustar00zuulzuul00000000000000--- features: - | Added new tool ``octavia-status upgrade check``. This framework allows adding various checks which can be run before a Octavia upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``octavia-status upgrade check`` to check if Octavia deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-vip-acl-4a7e20d167fe4a49.yaml0000664000175000017500000000020600000000000024175 0ustar00zuulzuul00000000000000--- features: - | Added support to VIP access control list. Users can now limit incoming traffic to a set of allowed CIDRs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add-x-forwarded-proto-19a1d971cf43b795.yaml0000664000175000017500000000013400000000000026153 0ustar00zuulzuul00000000000000--- features: - | Adding support for the listener X-Forwarded-Proto header insertion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add_API_reference-81d84d0c8598b764.yaml0000664000175000017500000000025000000000000025231 0ustar00zuulzuul00000000000000--- other: - | Octavia now has an up to date API reference for the Octavia v2 API. It is available at: https://developer.openstack.org/api-ref/load-balancer/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add_ability_to_disable_api_versions-253a8dc4253f0f56.yaml0000664000175000017500000000014100000000000031242 0ustar00zuulzuul00000000000000--- features: - | Add config variables to allow disabling either API version (v1 or v2.0). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add_api_audit-58dc16bff517eae7.yaml0000664000175000017500000000015100000000000025030 0ustar00zuulzuul00000000000000--- features: - | The Octavia API now supports Cloud Auditing Data Federation (CADF) auditing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml0000664000175000017500000000373100000000000027637 0ustar00zuulzuul00000000000000--- features: - | Octavia now supports provider drivers. This allows third party load balancing drivers to be integrated with the Octavia v2 API. Users select the "provider" for a load balancer at creation time. - | There is now an API available to list enabled provider drivers. upgrade: - | Two new options are included with provider driver support. The enabled_provider_drivers option defaults to "amphora, octavia" to support existing Octavia load balancers. The default_provider_driver option defaults to "amphora" for all new load balancers that do not specify a provider at creation time. These defaults should cover most existing deployments. - | The provider driver support requires a database migration and follows Octavia standard rolling upgrade procedures; database migration followed by rolling control plane upgrades. Existing load balancers with no provider specified will be assigned "amphora" as part of the database migration. deprecations: - | The Octavia API handlers are now deprecated and replaced by the new provider driver support. Octavia API handlers will remain in the code to support the Octavia v1 API (used for neutron-lbaas). - | Provider of "octavia" has been deprecated in favor of "amphora" to clarify the provider driver supporting the load balancer. other: - | A provider driver developer guide has been added to the documentation to aid driver providers. - | An operator documentation page has been added to list known Octavia provider drivers and provide links to those drivers. Non-reference drivers, drivers other than the "amphora" driver, will be outside of the octavia code repository but are dynamically loadable via a well defined interface described in the provider driver developers guide. - | Installed drivers need to be enabled for use in the Octavia configuration file once you are ready to expose the driver to users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/add_tag_support-4735534f4066b9af.yaml0000664000175000017500000000027500000000000025146 0ustar00zuulzuul00000000000000--- features: - | Added tags property for Octavia resources. It includes: * Load balancer * Listener * Member * Pool * L7rule * L7policy * Health Monitor ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/additional-udp-healthcheck-types-2414a5edee9f5110.yaml0000664000175000017500000000022600000000000030407 0ustar00zuulzuul00000000000000--- features: - | Two new types of healthmonitoring are now valid for UDP listeners. Both ``HTTP`` and ``TCP`` check types can now be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/admin-state-up-fix-4aa278eac67646ae.yaml0000664000175000017500000000025000000000000025610 0ustar00zuulzuul00000000000000--- upgrade: - To fix the admin-state-up bug you must upgrade your amphora image. fixes: - Fixes admin-state-up=False action for loadbalancer and listener. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml0000664000175000017500000000150100000000000026657 0ustar00zuulzuul00000000000000--- upgrade: - | After this upgrade, users will no longer be able use network resources they cannot see or "show" on load balancers. Operators can revert this behavior by setting the "allow_invisible_reourece_usage" configuration file setting to ``True``. security: - | Previously, if a user knew or could guess the UUID for a network resource, they could use that UUID to create load balancer resources using that UUID. Now the user must have permission to see or "show" the resource before it can be used with a load balancer. This will be the new default, but operators can disable this behavior via the setting the configuration file setting "allow_invisible_resource_usage" to ``True``. This issue falls under the "Class C1" security issue as the user would require a valid UUID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/allow-operators-to-disable-ping-healthchecks-42fd8c3b88edaf35.yaml0000664000175000017500000000025700000000000033022 0ustar00zuulzuul00000000000000--- features: - | Cloud deployers can set `api_settings.allow_ping_health_monitors = False` in `octavia.conf` to disable the ability to create PING health monitors. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/allow-vip-on-mgmt-net-d6c65d4ccb2a8f2c.yaml0000664000175000017500000000015700000000000026407 0ustar00zuulzuul00000000000000--- fixes: - Allow the loadbalancer's VIP to be created on the same network as the management interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml0000664000175000017500000000067100000000000030203 0ustar00zuulzuul00000000000000--- upgrade: - | Any amphorae running a py3 based image must be recycled or else they will eventually fail on certificate rotation. fixes: - | Resolved broken certificate upload on py3 based amphora images. On a housekeeping certificate rotation event, the amphora would clear out its server certificate and return a 500, putting the amphora in ERROR status and breaking further communication. See upgrade notes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/amp-az-1a0b4255c77fd1dc.yaml0000664000175000017500000000017200000000000023350 0ustar00zuulzuul00000000000000--- features: - | Added a configuration option that specifies the availability zone amphora should be built in. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/api-create-project-id-4bb984b24d56de2e.yaml0000664000175000017500000000061000000000000026245 0ustar00zuulzuul00000000000000--- deprecations: - | The project_id attribute of the POST method on the following objects is now deprecated\: listener, pool, health monitor, and member. These objects will use the parent load balancer's project_id. Values passed into the project_id on those objects will be ignored until the deprecation cycle has expired, at which point they will cause an error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/auth-strategy-keystone-80b3780a18420b6c.yaml0000664000175000017500000000014200000000000026400 0ustar00zuulzuul00000000000000--- upgrade: - | The configuration setting auth_strategy is now set to keystone by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/auto_detect_haproxy_user_group-c220b6a2c8f1d589.yaml0000664000175000017500000000044200000000000030436 0ustar00zuulzuul00000000000000--- features: - The amphora haproxy user_group setting is now automatically detected for Ubuntu, CentOS, Fedora, or RHEL based amphora. deprecations: - haproxy user_group is no longer being used. it is now auto-detected for Ubuntu, CentOS, Fedora and RHEL based amphora images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/availability-zone-api-a28ff5e00bdcc69a.yaml0000664000175000017500000000031400000000000026521 0ustar00zuulzuul00000000000000--- features: - | Add an API for allowing administrators to manage Octavia Availability Zones and Availability Zone Profiles, which behave nearly identically to Flavors and Flavor Profiles. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=octavia-6.2.2/releasenotes/notes/availability-zones-can-override-valid-vip-networks-5566aa4769c158dc.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/availability-zones-can-override-valid-vip-networks-5566aa4769c158dc0000664000175000017500000000017100000000000033031 0ustar00zuulzuul00000000000000--- features: - | Availability zone profiles can now override the ``valid_vip_networks`` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/bug-1797130-8c9bfa50d9b6c955.yaml0000664000175000017500000000070200000000000023616 0ustar00zuulzuul00000000000000--- features: - | Listeners default timeouts can be set by config in section haproxy_amphora: * `timeout_client_data`: Frontend client inactivity timeout * `timeout_member_connect`: Backend member connection timeout * `timeout_member_data`: Backend member inactivity timeout * `timeout_tcp_inspect`: Time to wait for TCP packets for content inspection The value for all of these options is expected to be in milliseconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/build-rate-limiting-a2d2d4c9333a8f46.yaml0000664000175000017500000000065500000000000025763 0ustar00zuulzuul00000000000000--- features: - | Octavia now has options to limit the amphora concurrent build rate. This may be useful for deployments where nova can get overloaded. Amphora builds will be prioritized in the following order: failover, normal, spares pool builds. See the configuration guide for more information: https://docs.openstack.org/octavia/latest/configuration/configref.html#haproxy_amphora.build_rate_limit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/cert-encrypted-ramfs-381ffe3d4a7392d7.yaml0000664000175000017500000000064500000000000026170 0ustar00zuulzuul00000000000000--- upgrade: - To enabled encrypted ramfs storage for certificates and keys, you must upgrade your amphora image. deprecations: - Amphora with a terminated HTTPS load balancer can no longer be rebooted. If they reboot, they will trigger a failover of the amphora. security: - Certificate and key storage for terminated HTTPS load balancers is now in an encrypted ramfs path inside the amphora. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/change-keystone-backend-config-d246b1e34015c86c.yaml0000664000175000017500000000064100000000000027742 0ustar00zuulzuul00000000000000--- prelude: > Extended support for Keystone API v3. features: - Octavia supports different Keystone APIs and choose authentication mechanism based on configuration specified in "keystone_authtoken" section of octavia.conf file. upgrade: - From configuration file section "keystone_authtoken_v3" was removed and all parameters are stored in "keystone_authtoken" section of configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/correct-amp-client-auth-vulnerability-6803f4bac2508e4c.yaml0000664000175000017500000000020700000000000031425 0ustar00zuulzuul00000000000000--- security: - | Correctly require two-way certificate authentication to connect to the amphora agent API (CVE-2019-17134). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/custom_eventstreamer_queue_url-7a98bd6a7e92e9de.yaml0000664000175000017500000000122700000000000030646 0ustar00zuulzuul00000000000000--- features: - | In some environments (e.g. OSA) Neutron and Octavia use different queues (at least different vhosts) and so if Octavia posts to the Octavia queue and Neutron listens on the Neutron queue the events will never make it over. This adds a way to configure a custom queue for the event streamer thus allowing to post messages to the Neutron queue if needed. security: - | Depending on how the other queue is set up additional passwords for the other queue will be in the Octavia config file. Operators should take care of setting up appropriate users with appropriate restrictions to the topic(s) needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/deleted-404-2cdd751e7afbe036.yaml0000664000175000017500000000043300000000000024164 0ustar00zuulzuul00000000000000--- fixes: - | Fixes the v2 API returning "DELETED" records until the amphora_expiry_age timeout expired. The API will now immediately return a 404 HTTP status code when deleted objects are requested. The API version has been raised to v2.1 to reflect this change. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/deprecate-amp_ssh_key_name-e1041a64ed970a9e.yaml0000664000175000017500000000132400000000000027346 0ustar00zuulzuul00000000000000--- features: - | New option in diskimage-create.sh `-n` to completely disable sshd on the amphora. deprecations: - | Config option `amp_ssh_access_allowed` is deprecated, as it overlaps with `amp_ssh_key_name` in functionality and is not needed. Simply leave the variable `amp_ssh_key_name` blank and no ssh key will be installed. This is the same result as using `amp_ssh_access_allowed = False`. security: - | It is now possible to completely remove sshd from the amphora image, to further lock down access and increase security. If this is set, providing an `amp_ssh_key_name` in config will install the key, but ssh access will not be possible as sshd will not be running. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/disable_logging-3b50f388ee2b8127.yaml0000664000175000017500000000137100000000000025152 0ustar00zuulzuul00000000000000--- features: - | The new option `[haproxy_amphora]/connection_logging` will disable logging of connection data if set to False which can improve performance of the load balancer and might aid compliance. security: - | Disabling connection logging might make it more difficult to audit systems for unauthorized access, from which IPs it originated, and which assets were compromised. other: - | As part of GDPR compliance, connection logs might be considered personal data and might need to follow specific data retention policies. Disabling connection logging might aid in making Octavia compliant by preventing the output of such data. As always, consult with an expert on compliance prior to making changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/diskimage-create-git-branch-9c44e7e3fa70a985.yaml0000664000175000017500000000021100000000000027335 0ustar00zuulzuul00000000000000--- features: - | Added an option to the diskimage-create.sh script to specify the Octavia Git branch to build the image from. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/documentation-migration-f72c6a1703a105b7.yaml0000664000175000017500000000036200000000000026661 0ustar00zuulzuul00000000000000--- other: - | The Octavia project documentation has been reorganized as part of the OpenStack documentation migration project. The Octavia project documentation is now located at: https://docs.openstack.org/octavia/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/drop-python-2-7-a6b3f456bf6a3da7.yaml0000664000175000017500000000020600000000000025037 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. The minimum version of Python now supported by Octavia is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/enable-keystone-on-api-b3ebb132ad5ab308.yaml0000664000175000017500000000074000000000000026477 0ustar00zuulzuul00000000000000--- prelude: > Support for Keystone token authentication on frontend Octavia API. features: - After setting "auth_strategy = keystone" all incoming requests to Octavia API will be verified using Keystone are they send by authenticated person. By default that option is disabled because Neutron LBaaS v2 is not supporting that functionality properly. upgrade: - This feature add new configuration value "auth_strategy" which by default is set for "noauth". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/enable-mutable-configuration-1d7f62a133148767.yaml0000664000175000017500000000032600000000000027422 0ustar00zuulzuul00000000000000--- features: - | You can now update the running configuration of the Octavia control plane processes by sending the parent process a "HUP" signal. Note: The configuration item must support mutation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/encrypt-certs-and-keys-5175d7704d8df3ce.yaml0000664000175000017500000000134000000000000026436 0ustar00zuulzuul00000000000000--- security: - | As a followup to the fix that resolved CVE-2018-16856, Octavia will now encrypt certificates and keys used for secure communication with amphorae, in its internal workflows. Octavia used to exclude debug-level log prints for specific tasks and flows that were explicitly specified by name, a method that is susceptive to code changes. other: - | Added a new option named server_certs_key_passphrase under the certificates section. The default value gets copied from an environment variable named TLS_PASS_AMPS_DEFAULT. In a case where TLS_PASS_AMPS_DEFAULT is not set, and the operator did not fill any other value directly, 'insecure-key-do-not-use-this-key' will be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/extend_api_to_accept_qos_policy_id-128ab592a735f3b8.yaml0000664000175000017500000000026500000000000031116 0ustar00zuulzuul00000000000000--- features: - Now Octavia API can accept the QoS Policy id from neutron to support the QoS requirements towards Load Balancer VIP port when create/update load balancer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/failover-vip-no-dhcp-50805c5321ebbb05.yaml0000664000175000017500000000016600000000000025750 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that caused failover to unsuccessful if the vip network was not DHCP enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-API-list-performance-3b121deffbc3ce4a.yaml0000664000175000017500000000015200000000000027042 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a performance regression in the Octavia v2 API when using the "list" APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-IPv6-vip-079a3285f78686ee.yaml0000664000175000017500000000047500000000000024154 0ustar00zuulzuul00000000000000--- upgrade: - | To fix IPv6 VIP addresses, you must run the "octavia-db-manage upgrade head" migration script. fixes: - | Fully expanded IPv6 VIP addresses would fail to store with "Data too long for column 'ip_address' at row 1". This patch includes a database migration to fix this column. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-PING-health-monitor-bc38de57fa759ac0.yaml0000664000175000017500000000127300000000000026535 0ustar00zuulzuul00000000000000--- issues: - | Amphora images with HAProxy older than 1.6 (CentOS 7, etc.) will still use health monitor type TCP when PING is selected by the user. upgrade: - | Amphora will need to be updated to a new image with this version of the agent and ping-wrapper.sh script prior to updating the Octavia controllers. If a load balancer is using a health monitor of type PING with an amphora image that has not been updated, the next configuration change to the load balancer will cause it to go into an ERROR state until it is failed over to an updated image. fixes: - | Fixed an issue where health monitors of type PING were really doing a TCP health check. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-SNI-single-process-879ffce5eaa6c1c3.yaml0000664000175000017500000000030700000000000026514 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where load balancers with more than one TLS enabled listener, one or more SNI enabled, may load certificates from other TLS enabled listeners for SNI use. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-active-standby-in-centos-4e47140e0e139de8.yaml0000664000175000017500000000014000000000000027426 0ustar00zuulzuul00000000000000--- fixes: - | Fixed duplicated IPv6 addresses in Active/Standby mode in CentOS amphorae. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml0000664000175000017500000000017000000000000030170 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where members added to TLS-enabled pools would go to ERROR provisioning status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-amp-failover-missing-vrrp-port-9b5f13b9951b7edb.yaml0000664000175000017500000000022700000000000030773 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with failing over an amphora if the pair amphora in an active/standby pair had a missing VRRP port in neutron. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-amphorav2-failover-secgroup-c793de5e00b32653.yaml0000664000175000017500000000024300000000000030146 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that an amphorav2 LB cannot be reached after loadbalancer failover. The LB security group was not set in the amphora port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-api-listener-update-sni-containers-6595c52e2de1f621.yaml0000664000175000017500000000040300000000000031426 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where setting of SNI containers were not being applied on listener update API calls. - | Fixed an Octavia API validation on listener update where SNI containers could be set on non-TERMINATED_HTTPS listeners. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml0000664000175000017500000000015600000000000025223 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where some columns could not be used for sort keys in API list calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-barbican-client-verfiy-689be1b9389bd1d8.yaml0000664000175000017500000000015400000000000027240 0ustar00zuulzuul00000000000000--- fixes: - | Fix an issue when the barbican service enable TLS, we create the listerner failed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-certs-ramfs-race-561f355d13fc6d14.yaml0000664000175000017500000000064000000000000025755 0ustar00zuulzuul00000000000000--- upgrade: - | A new amphora image is required to fix the potential certs-ramfs race condition. security: - | A race condition between the certs-ramfs and the amphora agent may lead to tenant TLS content being stored on the amphora filesystem instead of in the encrypted RAM filesystem. fixes: - | Fixed a potential race condition with the certs-ramfs and amphora agent services. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml0000664000175000017500000000032600000000000027730 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where load balancers with more than one TLS enabled listener, using client authentication and/or backend re-encryption, may load incorrect certificates for the listener. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-creating-fully-populated-load-balancer-ae57ffae5c017ac3.yaml0000664000175000017500000000020600000000000032513 0ustar00zuulzuul00000000000000--- fixes: - | Fixes creating a fully populated load balancer with not REDIRECT_POOL type L7 policy and default_pool field. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-default-timeout-values-for-listeners-108c8048ba8beb9a.yaml0000664000175000017500000000031700000000000032156 0ustar00zuulzuul00000000000000--- fixes: - | Fix default value override for timeout values for listeners. Changing the default timeouts in the configuration file wasn't correctly applied in the default listener parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml0000664000175000017500000000035700000000000030240 0ustar00zuulzuul00000000000000--- fixes: - | Fix operational status for disabled UDP listeners. The operating status of disabled UDP listeners is now OFFLINE instead of ONLINE, the behavior is now similary to the behavior of HTTP/HTTPS/TCP/... listeners. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-driver-agent-decrement-quota-27486d9fa0bdeb89.yaml0000664000175000017500000000017400000000000030461 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where provider drivers may not decrement the load balancer objects quota on delete. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-driver-errors-81d33948288bf8cf.yaml0000664000175000017500000000011500000000000025445 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where the driver errors were not caught. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-dual-error-amp-failover-69e323892bad8254.yaml0000664000175000017500000000022700000000000027206 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that could cause load balancers, with multiple amphora in a failed state, to be unable to complete a failover. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-duplicate-sg-creation-0c502a5d2d8c276d.yaml0000664000175000017500000000064400000000000027057 0ustar00zuulzuul00000000000000--- fixes: - | Fixes loadbalancer creation failure when one of the listener port matches with the octavia generated peer ports and the allowed_cidr is explicitly set to 0.0.0.0/0 on the listener. This is due to creation of two security group rules with remote_ip_prefix as None and remote_ip_prefix as 0.0.0.0/0 which neutron rejects the second request with security group rule already exists. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml0000664000175000017500000000023600000000000026502 0ustar00zuulzuul00000000000000--- fixes: - | Fix an incorrect ``operating_status`` with empty UDP pools. A UDP pool without any member is now ``ONLINE`` instead of ``OFFLINE``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-error-in-rsyslog-config-a316a7856e1a847a.yaml0000664000175000017500000000025600000000000027324 0ustar00zuulzuul00000000000000--- fixes: - | Fix an issue with the rsyslog configuration file in the Amphora when the log offloading feature and the local log storage feature are both disabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-error-messages-ec817a66249e6666.yaml0000664000175000017500000000021700000000000025517 0ustar00zuulzuul00000000000000--- fixes: - | Improves error messages returned to the user, such as errors for attempting to add a second health monitor to a pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-eth1-ip-flush-7fadda4bdca387b5.yaml0000664000175000017500000000060100000000000025561 0ustar00zuulzuul00000000000000--- upgrade: - | To fix the issue with active/standby load balancers or single topology load balancers with members on the VIP subnet, you need to update the amphora image. critical: - | Fixed a bug where active/standby load balancers and single topology load balancers with members on the VIP subnet may fail. An updated image is required to fix this bug. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-health-check-db-outage-279b0bc1d0039312.yaml0000664000175000017500000000032600000000000026706 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue when Octavia cannot reach the database (all database instances are down) bringing down all running loadbalancers. The Health Manager is more resilient to DB outages now. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=octavia-6.2.2/releasenotes/notes/fix-host-routes-with-amphorav2-and-persistence-54b99d651a4ee9c4.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-host-routes-with-amphorav2-and-persistence-54b99d651a4ee9c4.yam0000664000175000017500000000021600000000000032763 0ustar00zuulzuul00000000000000--- fixes: - | Fix a serialization error when using host_routes in VIP subnets when persistence in the amphorav2 driver is enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml0000664000175000017500000000031100000000000027146 0ustar00zuulzuul00000000000000--- fixes: - | Fix a bug that could interrupt resource creation when performing a graceful shutdown of the house keeping service and leave resources such as amphorae in a BOOTING status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-housekeeping-db-performance-b0d0fcfcce696314.yaml0000664000175000017500000000032200000000000030402 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a performance issue where the Housekeeping service could significantly and incrementally utilize CPU as more amphorae and load balancers are created and/or marked as DELETED. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-invalid-attribute-for-filtering-d2ddb95a1acbded2.yaml0000664000175000017500000000014100000000000031435 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an error triggered when the deletion of the VIP security group fails. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-ipv6-udp-health-message-ed94b35bbea396ec.yaml0000664000175000017500000000025200000000000027462 0ustar00zuulzuul00000000000000--- fixes: - | Some IPv6 UDP members were incorrectly marked in ERROR status, because of a formatting issue while generating the health message in the amphora. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-lb-error-failover-2c17afaa20c0c97f.yaml0000664000175000017500000000015500000000000026356 0ustar00zuulzuul00000000000000--- fixes: - | Fix load balancers that could not be failed over when in ERROR provisioning status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml0000664000175000017500000000021100000000000027271 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where load balancers would go into ERROR when setting data not visible to providers (e.g. tags). ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4502168 octavia-6.2.2/releasenotes/notes/fix-listener-API-update-with-null/0000775000175000017500000000000000000000000025234 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-listener-API-update-with-null/None-fde2ffab82e783a4.yaml0000664000175000017500000000046100000000000031463 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where the listener API would accept null/None values for fields that must have a valid value, such as connection-limit. Now when a PUT call is made to one of these fields with null as the value the API will reset the field value to the field default value. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-listener-MAX_TIMEOUT-4c4fdf804a96c34b.yaml0000664000175000017500000000040700000000000026442 0ustar00zuulzuul00000000000000--- fixes: - | Fixed MAX_TIMEOUT for timeout_client_data, timeout_member_connect, timeout_member_data, timeout_tcp_inspect API listener. The value was reduced from 365 days to 24 days, which now does not exceed the value of the data type in DB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-lo-interface-amphora-netns-90fb9934026e1485.yaml0000664000175000017500000000060700000000000027616 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with the ``lo`` interface in the ``amphora-haproxy`` network namespace. The ``lo`` interface was down and prevented haproxy to communicate with other haproxy processes (for persistent stick tables) on configuration change. It delayed old haproxy worker cleanup and increased the memory consumption usage after reloading the configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-loadbalancer-db-cleanup-61ee81a4fd597067.yaml0000664000175000017500000000021600000000000027251 0ustar00zuulzuul00000000000000--- fixes: - | Fix an issue that prevented the cleanup of load balancer entries in the database by the Octavia housekeeper service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-missing-amphora-create-dependency-a954ded0d260d462.yaml0000664000175000017500000000021700000000000031347 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where amphora load balancers fail to create when Nova anti-affinity is enabled and topology is SINGLE. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-missing-cloud-guest-utils-rh-eb39a53502dc1e91.yaml0000664000175000017500000000013300000000000030336 0ustar00zuulzuul00000000000000--- fixes: - | Add missing cloud-utils-growpart RPM to Red Hat based amphora images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-missing-cronie-rh-bd31001338ddbb1e.yaml0000664000175000017500000000011500000000000026265 0ustar00zuulzuul00000000000000--- fixes: - | Add missing cronie RPM to Red Hat based amphora images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-multi-amp-down-failover-952618fb8d3d8ae6.yaml0000664000175000017500000000024600000000000027377 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where if more than one amphora fails at the same time, failover might not fully complete, leaving the load balancer in ERROR. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-nf_conntrack_buckets-sysctl-75ae6dbb9d052863.yaml0000664000175000017500000000015000000000000030405 0ustar00zuulzuul00000000000000--- fixes: - | Fix nf_conntrack_buckets sysctl in the Amphora, its value was incorrectly set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-no-resolvconf-rhel-dhclient-hook-36a1c3b1a3b03a3d.yaml0000664000175000017500000000017400000000000031177 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where /etc/resolv.conf on RHEl-based amphorae was being populated with DNS servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-nr_open-limit-value-7f475c3e301a608d.yaml0000664000175000017500000000033500000000000026506 0ustar00zuulzuul00000000000000--- fixes: - | Increase the limit value for nr_open and file-max in the amphora, the new value is based on what HAProxy 2.x is expecting from the system with the greatest maxconn value that Octavia can set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-oslo-messaging-connection-leakage-aeb79474105ac116.yaml0000664000175000017500000000015300000000000031263 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a bug that caused an excessive number of RabbitMQ connections to be opened. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml0000664000175000017500000000054700000000000027256 0ustar00zuulzuul00000000000000--- upgrade: - | An amphora image update is recommended to pick up a workaround to an HAProxy issue where it would fail to reload on configuration change should the local peer name start with "-x". fixes: - | Workaround an HAProxy issue where it would fail to reload on configuration change should the local peer name start with "-x". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-pool-crl-2cc6f2705f5b2009.yaml0000664000175000017500000000020300000000000024334 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue were updating a CRL or client certificate on a pool would cause the pool to go into ERROR. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml0000664000175000017500000000022300000000000032020 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where listener "insert_headers" parameter was accepted for protocols that do not support header insertion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-provider-capabilities-filtering-8bd12b2cf7b37a84.yaml0000664000175000017500000000013200000000000031214 0ustar00zuulzuul00000000000000--- fixes: - | Fixes the ability to filter on the provider flavor capabilities API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-provider-driver-utils-b75485785dcd6462.yaml0000664000175000017500000000031300000000000027036 0ustar00zuulzuul00000000000000--- fixes: - | Fixes the provider driver utils conversion of flavor_id in load balancer conversion, sni_refs and L7 policies in listener conversion, and health monitor in pool conversions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml0000664000175000017500000000054400000000000032074 0ustar00zuulzuul00000000000000--- fixes: - | Fixed code that configured the CentOS/Red Hat amphora images to use the correct names for the network 'ifcfg' files for static routes and routing rules. It was using the wrong name for the routes file, and did not support IPv6 in either file. For more information, see https://storyboard.openstack.org/#!/story/2007051 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-route-table-b2ec0aa7b92d2abc.yaml0000664000175000017500000000025000000000000025376 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where VIP return traffic was always routed, if a gateway was defined, through the gateway address even if it was local traffic. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-spare-amphora-check-and-creation-3adf939b45610155.yaml0000664000175000017500000000012000000000000030705 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that prevents spare amphorae to be created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-subnet-host_routes-amphorav2-3c079c5a3bfa1b3d.yaml0000664000175000017500000000017300000000000030577 0ustar00zuulzuul00000000000000--- fixes: - | Fix load balancers that use customized host_routes in the VIP or the member subnets in amphorav2. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=octavia-6.2.2/releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff83395544f228cf.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff8339550000664000175000017500000000036600000000000033273 0ustar00zuulzuul00000000000000--- fixes: - | Add support for monitor_address and monitor_port attributes in UDP members. Previously, monitor_address and monitor_port were ignored and address and protocol_port attributes were used as monitoring address and port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml0000664000175000017500000000012500000000000030555 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where TLS-enabled pools would fail to provision. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-udp-listener-wrr-50de9dc0774a8ea1.yaml0000664000175000017500000000033200000000000026201 0ustar00zuulzuul00000000000000--- fixes: - | Fix weighted round-robin for UDP listeners with keepalived and lvs. The algorithm must be specified as 'wrr' in order for weighted round-robin to work correctly, but was being set to 'rr'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-udp-members-status-ef3202849bfda29b.yaml0000664000175000017500000000026500000000000026525 0ustar00zuulzuul00000000000000--- fixes: - | Fix operating_status for pools and members that use UDP protocol. operating_status values are now consistant with the values of non-UDP load balancers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-udp-only-lbs-c4ca42106fc1e2bb.yaml0000664000175000017500000000015200000000000025340 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where UDP only load balancers would not bring up the VIP address. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-udp-server-status-bug-db4d3e38bcdf0554.yaml0000664000175000017500000000102200000000000027221 0ustar00zuulzuul00000000000000--- issues: - | When a load balancer with a UDP listener is updated, the listener service is restarted, which causes an interruption of the flow of traffic during a short period of time. This issue is caused by a keepalived bug (https://github.com/acassen/keepalived/issues/1163) that was fixed in keepalived 2.0.14, but this package is not yet provided by distributions. fixes: - | Fix a bug that prevented UDP servers to be restored as members of a pool after removing a health monitor resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-unlimited-connection-limit-48079688de033c1a.yaml0000664000175000017500000000040000000000000030003 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a bug where unspecified or unlimited listener connection limit settings would lead to a 2000 connection limit when using the amphora/octavia driver. This was the compiled in connection limit in some HAproxy packages. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-vip-net-no-gw-b46c3ade1a748e69.yaml0000664000175000017500000000023100000000000025370 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where the amphora would fail to bring up the VIP if the VIP network did not have a gateway specified in neutron. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-vip-network-ip-availability-2e924f32abf01052.yaml0000664000175000017500000000040400000000000030143 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue in the selection of vip-subnet-id on multi-subnet networks by checking the IP availability of the subnets, ensuring enough IPs are available for loadbalancer when creating loadbalancer specifying vip-network-id. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-vip-plug-centos-74c2fe7099964b08.yaml0000664000175000017500000000011600000000000025613 0ustar00zuulzuul00000000000000--- fixes: - Fixed an error when plugging the VIP on CentOS-based amphorae. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-vip-qos-policy-extension-enabled-3e16e1c23a7d7ae5.yaml0000664000175000017500000000056700000000000031260 0ustar00zuulzuul00000000000000--- fixes: - Fixed an issue where trying to set a QoS policy on a VIP while the QoS extension is disabled would bring the load balancer to ERROR. Should the QoS extension be disabled, the API will now return HTTP 400 to the user. - Fixed an issue where setting a QoS policy on the VIP would bring the load balancer to ERROR when the QoS extension is enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix-worker-graceful-shutdown-c44b6797637aa1b3.yaml0000664000175000017500000000064500000000000027577 0ustar00zuulzuul00000000000000--- fixes: - | Fix a bug that could interrupt resource creation when performing a graceful shutdown of the controller worker and leave resources in a PENDING_CREATE/PENDING_UPDATE/PENDING_DELETE provisioning status. If the duration of an Octavia flow is greater than the 'graceful_shutdown_timeout' configuration value, stopping the Octavia worker can still interrupt the creation of resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fix_active_standby_ipv6-0317d5cd9e5d50e5.yaml0000664000175000017500000000046000000000000026726 0ustar00zuulzuul00000000000000--- upgrade: - | To resolve the IPv6 VIP issues on active/standby load balancers you need to build a new amphora image. fixes: - | Fixes issues using IPv6 VIP addresses with load balancers configured for active/standby topology. This fix requires a new amphora image to be built. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fixed-spare-amphora-rotation-007ba406411a313d.yaml0000664000175000017500000000010700000000000027411 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a bug that prevents spare amphora rotation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/fixed-unstable-UDP-health-status-ba32690b83a9641b.yaml0000664000175000017500000000042600000000000030155 0ustar00zuulzuul00000000000000--- fixes: - | Fix a potential invalid DOWN operating status for members of a UDP pool. A race condition could have occured when building the first heartbeat message after adding a new member in a pool, this recently added member could have been seen as DOWN. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=octavia-6.2.2/releasenotes/notes/force-controlplane-amphora-communication-to-use-tls1.2-1c4adf72d2ce5a82.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/force-controlplane-amphora-communication-to-use-tls1.2-1c4adf72d2ce0000664000175000017500000000032700000000000033226 0ustar00zuulzuul00000000000000--- security: - | Communication between the control-plane and the amphora-agent now uses minimum TLSv1.2 by default, and is configurable. The previous default of SSLv2/3 is widely considered insecure. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml0000664000175000017500000000053300000000000026323 0ustar00zuulzuul00000000000000--- security: - | If you are using the admin_or_owner-policy.yaml policy override file you should upgrade your API processes to include the unscoped token fix. The default policies are not affected by this issue. fixes: - | Fixes an issue when using the admin_or_owner-policy.yaml policy override file and unscoped tokens. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/glance-tags-for-amphora-images-28bd9df1ed4b9ca3.yaml0000664000175000017500000000065500000000000030206 0ustar00zuulzuul00000000000000--- features: - Glance image containing the latest Amphora image can now be referenced using a Glance tag. To use the feature, set amp_image_tag in [controller_worker]. Note that amp_image_id should be unset for the new feature to take into effect. upgrade: - amp_image_id option is deprecated and will be removed in one of the next releases. Operators are advised to migrate to the new amp_image_tag option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/glance_image_owner-42c92a12f91a62a6.yaml0000664000175000017500000000033100000000000025626 0ustar00zuulzuul00000000000000--- security: - Allows the operator to optionally restrict the amphora glance image selection to a specific owner id. This is a recommended security setting for clouds that allow user uploadable images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/haproxy-single-process-b17a3af3a97accea.yaml0000664000175000017500000000062200000000000027027 0ustar00zuulzuul00000000000000--- upgrade: - | A new amphora image is required to resolve the amphora memory issues when a load balancer has multiple listeners and the amphora image uses haproxy 1.8 or newer. fixes: - | Fixed an issue with load balancers that have multiple listeners when using an amphora image that contains HAProxy 1.8 or newer. An updated amphora image is required to apply this fix. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/healthcheck-cache-641f0a64e6f5856c.yaml0000664000175000017500000000023600000000000025351 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the healthcheck endpoint always querying the backends by caching results for a configurable time. The default is five seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/input-validation-server_certs_key_passphrase-6a9dfc190c9deba8.yaml0000664000175000017500000000036700000000000033445 0ustar00zuulzuul00000000000000--- fixes: - The passphrase for config option 'server_certs_key_passphrase' is used as a Fernet key in Octavia and thus must be 32, base64(url) compatible, characters long. Octavia will now validate the passphrase length and format. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/keepalived-race-with-haproxy-e402ef7f466871f6.yaml0000664000175000017500000000056400000000000027542 0ustar00zuulzuul00000000000000--- upgrade: - | The keepalived improvements require the amphora image to be upgraded. fixes: - | Improvements to the keepalived system used in active/standby topologies. keepalived is now monitored for health by the amphora agent (previously just by the init system) and a systemd race condition between keepalived and haproxy have been resolved. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/lb-delete-flow-refactor-cfb1bc621bbe92b4.yaml0000664000175000017500000000022200000000000026721 0ustar00zuulzuul00000000000000--- fixes: - | Removes unnecessary listener delete from non-cascade delete load balancer flow thus speeding up the loadbalancer delete. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/lb_flow_amp_vip-a83db5d84e17a26a.yaml0000664000175000017500000000032200000000000025326 0ustar00zuulzuul00000000000000--- features: - | This will speed up lb creation by allocating AAP ports in parallel for LBs with more than one amp. As a side effect the AAP driver will be simplified and thus easier to mainain. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/link-amphora-to-loadbalancer-earlier-ab3dddec48b8da96.yaml0000664000175000017500000000074100000000000031452 0ustar00zuulzuul00000000000000--- other: - | Amphorae that are booting for a specific loadbalancer will now be linked to that loadbalancer immediately upon creation. Previously this would not happen until near the end of the process, leaving a gap during booting during which is was difficult to understand which booting amphora belonged to which loadbalancer. This was especially problematic when attempting to troubleshoot loadbalancers that entered ERROR status due to boot issues. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/load-balancer-expiry-age-a473f9147552f1b1.yaml0000664000175000017500000000055300000000000026513 0ustar00zuulzuul00000000000000--- features: - Stale load balancer entries with DELETED provisioning_status are now cleaned-up by housekeeper after if they are older than `load_balancer_expiry_age`. upgrade: - New option `load_balancer_expiry_age` is added to the `house_keeping` config section. It defines load balancer expiry age in seconds, the default value is 604800. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/make-amphora-cert-validity-configurable-7defc508b1174f89.yaml0000664000175000017500000000043600000000000031711 0ustar00zuulzuul00000000000000--- features: - | The validity period for locally generated certificates used inside Amphora is now configurable. See ``[certificates] cert_validity_time``. security: - | The default validity time for Amphora certificates has been reduced from two years to 30 days. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/make-batch-member-call-additive-4785163e625fed1a.yaml0000664000175000017500000000037500000000000030007 0ustar00zuulzuul00000000000000--- features: - | The batch member update resource can now be used additively by passing the query parameter ``additive_only=True``. Existing members can be updated and new members will be created, but missing members will not be deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/moving-api-config-variables-into-new-section-e1c20b77aaf5ea15.yaml0000664000175000017500000000034200000000000032716 0ustar00zuulzuul00000000000000--- upgrade: - | Several API related variables are moving to their own section `api_settings`. bind_host bind_port api_handler allow_pagination allow_sorting pagination_max_limit api_base_uri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/new-amphora-fields-fa3ffc5801b5e551.yaml0000664000175000017500000000020700000000000025653 0ustar00zuulzuul00000000000000--- features: - | Amphora API now returns the field `image_id` which is the ID of the glance image used to boot the amphora. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/new-default_connection_limit-config-option-3ed9f0ed6ec2b514.yaml0000664000175000017500000000171400000000000032660 0ustar00zuulzuul00000000000000--- features: - | Add a new configuration option to define the default connection_limit for new listeners that use the Amphora provider. The option is [haproxy_amphora].default_connection_limit and its default value is 50,000. This value is used when creating or setting a listener with -1 as connection_limit parameter, or when unsetting connection_limit parameter. fixes: - | With haproxy 1.8.x releases, haproxy consumes much more memory in the amphorae because of pre-allocated data structures. This amount of memory depends on the maxconn parameters in its configuration file (which is related to the connection_limit parameter in the Octavia API). In the Amphora provider, the default connection_limit value -1 is now converted to a maxconn of 50,000. It was previously 1,000,000 but that value triggered some memory allocation issues when quickly performing multiple configuration updates in a load balancer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/octavia-active-standby-cec5d2ad4fd214d8.yaml0000664000175000017500000000112000000000000026663 0ustar00zuulzuul00000000000000--- features: - | Active/Standby support for Octavia. * When enabled in the configuration file, Octavia will boot an active and standby amphora for each load balancer. * Session persistence is maintained between the active and standby amphora. * Amphora failover is supported when active/standby is enabled. Should the master or backup amphora fail, the health manager will rebuild it. upgrade: - | Upgrade requires a database migration. * Adds tables for active/standby. * Updates load balancer, listener, and amphora tables. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/octavia-v2-api-c32a62b37c2c8f6f.yaml0000664000175000017500000000054100000000000024715 0ustar00zuulzuul00000000000000--- features: - | Octavia now has a v2 API that can be used as a standalone endpoint. The Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API and is a superset of the neutron-lbaas v2 API. For more information see the Octavia API reference: https://developer.openstack.org/api-ref/load-balancer/v2/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/octavia_v2_RBAC-0eb2b51aa6278435.yaml0000664000175000017500000000150500000000000024647 0ustar00zuulzuul00000000000000--- features: - | The Octavia v2 API now supports Role Based Access Control (RBAC). The default rules require users to have a load-balancer_* role to be able to access the Octavia v2 API. This can be overridden with the admin_or_owner-policy.json sample file provided. See the `Octavia Policies `_ document for more information. security: - | Note that while the Octavia v2 API now supports Role Bassed Access Control (RBAC), the Octavia v1.0 API does not. The Octavia v1.0 API should not be exposed publicly and should only be used internally such as for the neutron-lbaas octavia driver. Publicly accessible instances of the Octavia API should have the v1.0 API disabled via the Octavia configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/per-amphora-statistics-api-5479605c7f3adb12.yaml0000664000175000017500000000012400000000000027204 0ustar00zuulzuul00000000000000--- features: - | Adds an administrator API to access per-amphora statistics. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/pike-release-35a1d632ce854d4a.yaml0000664000175000017500000000055600000000000024463 0ustar00zuulzuul00000000000000--- prelude: > For the OpenStack Pike release, the Octavia team is excited to announce Octavia version 1.0.0 and introduce the Octavia v2 API. Octavia can now be deployed without neutron-lbaas as a standalone endpoint. The Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API and is a superset of the neutron-lbaas v2 API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/pool-ciphers-73a347942e31788b.yaml0000664000175000017500000000061700000000000024316 0ustar00zuulzuul00000000000000--- features: - | TLS-enabled pools can now be individually configured with an OpenSSL cipher string. The default cipher for new pools can be specified with ``default_pools_ciphers`` in ``octavia.conf``. The built-in default is OWASP's "Suite B" recommendation. (https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html) Existing pools will be unaffected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/provisioning_neutron_db_sync-c019d96a7b64fe20.yaml0000664000175000017500000000061500000000000030120 0ustar00zuulzuul00000000000000--- upgrade: - Added option 'sync_provisioning_status' to enable synchronizing provisioning status of loadbalancers with the neutron-lbaas database. Enabling this option will queue one additional message per amphora every heartbeat interval. fixes: - Resolved an issue that could cause provisioning status to become out of sync between neutron-lbaas and octavia during high load. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/py3-hmac-digest-81696f6b176e7ae4.yaml0000664000175000017500000000071100000000000024751 0ustar00zuulzuul00000000000000--- upgrade: - | The fix for the hmac.compare_digest on python3 requires you to upgrade your health managers before updating the amphora image. The health manager is compatible with older amphora images, but older controllers will reject the health heartbeats from images with this fix. fixes: - | Fixes an issue with hmac.compare_digest on python3 that could cause health manager "calculated hmac not equal to msg hmac" errors. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml0000664000175000017500000000062700000000000026731 0ustar00zuulzuul00000000000000--- upgrade: - | The failover improvements do not require an updated amphora image, but updating existing amphora will minimize the failover outage time for standalone amphora on subsequent failovers. fixes: - | Significantly improved the reliability and performance of amphora and load balancer failovers. This is especially true when the Nova service is experiencing failures. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/remove-bbq-unset-acl-e680020de6a9ad3d.yaml0000664000175000017500000000030000000000000026107 0ustar00zuulzuul00000000000000--- fixes: - | Octavia will no longer automatically revoke access to secrets whenever load balancing resources no longer require access to them. This may be added in the future. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/remove-deprecated-api-configs-3f5652f71610b05e.yaml0000664000175000017500000000135700000000000027546 0ustar00zuulzuul00000000000000--- upgrade: - | The following configuration settings have reached the end of their deprecation period and are now removed from the [default] section of the configuration. These will only be available in the [api_settings] section going forward. * [DEFAULT] bind_host * [DEFAULT] bind_port * [DEFAULT] auth_strategy * [DEFAULT] api_handler deprecations: - | The following configuration settings have reached the end of their deprecation period and are now removed from the [default] section of the configuration. These will only be available in the [api_settings] section going forward. * [DEFAULT] bind_host * [DEFAULT] bind_port * [DEFAULT] auth_strategy * [DEFAULT] api_handler ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/remove-deprecated-v1-resources-6360da3de27b74d3.yaml0000664000175000017500000000046400000000000030044 0ustar00zuulzuul00000000000000--- upgrade: - | Octavia v1 API (used for integration with Neutron-LBaaS) has been removed. If Neutron-LBaaS integration is still required, do not upgrade to this version. deprecations: - | Octavia v1 API deprecation is complete. All relevant code, tests, and docs have been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/remove-duplicated-cert_generator-option-83d18647dc1d2954.yaml0000664000175000017500000000022400000000000031703 0ustar00zuulzuul00000000000000--- upgrade: - Remove duplicated config option 'cert_generator' in [controller_worker]. Operators now should set it under [certificates]. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/remove-tenant_id-c0352efbfb3a54f9.yaml0000664000175000017500000000025200000000000025511 0ustar00zuulzuul00000000000000--- deprecations: - | Finally completely remove tenant_id, as it was deprecated along with the keystone v2 API in Mitaka, which means we're free of it in Pike! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/remove_user_group_option-56ba749d0064a394.yaml0000664000175000017500000000015600000000000027116 0ustar00zuulzuul00000000000000--- deprecations: - | Finally completely the remove user_group option, as it was deprecated in Pike.././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=octavia-6.2.2/releasenotes/notes/render-disabled-members-to-make-statuses-consistent-69189f71da2e02e8.yaml 22 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/render-disabled-members-to-make-statuses-consistent-69189f71da2e02e0000664000175000017500000000107100000000000033063 0ustar00zuulzuul00000000000000--- fixes: - | Creating a member on a pool with no healthmonitor would sometimes briefly update their operating status from `NO_MONITOR` to `OFFLINE` and back to `NO_MONITOR` during the provisioning sequence. This flapping will no longer occur. - | Members that are disabled via `admin_state_up=False` are now rendered in the HAProxy configuration on the amphora as `disabled`. Previously they were not rendered at all. This means that disabled members will now appear in health messages, and will properly change status to OFFLINE. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/reserved-ips-7ef3a63ab0b6b28a.yaml0000664000175000017500000000034000000000000024650 0ustar00zuulzuul00000000000000--- security: - | Adds a configuration option, "reserved_ips" that allows the operator to block addresses from being used in load balancer members. The default setting blocks the nova metadata service address. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/same-port-listeners-41198368d470e821.yaml0000664000175000017500000000023300000000000025536 0ustar00zuulzuul00000000000000--- fixes: - | Fixed bug which prevented the creation of listeners for different protocols on the same port (i.e: tcp port 53, and udp port 53). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/separate-thread-pool-for-health-stats-update-c263c844075a7721.yaml0000664000175000017500000000045000000000000032355 0ustar00zuulzuul00000000000000--- fixes: - | Add new parameters to specify the number of threads for updating amphora health and stats. deprecations: - | `status_update_threads` config option for healthmanager is deprecated because it is replaced as `health_update_threads` and `stats_update_threads`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/service-type-73efc939e48d5858.yaml0000664000175000017500000000037100000000000024510 0ustar00zuulzuul00000000000000--- other: - | Octavia will use the OpenStack service type 'load-balancer'. For more information about service types, see the Octavia API reference: https://developer.openstack.org/api-ref/load-balancer/v2/index.html#service-endpoints ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/statuses_alias-27559e3d74b9eaf0.yaml0000664000175000017500000000021500000000000025150 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a neutron-lbaas LBaaS v2 API compatibility issue when requesting a load balancer status tree via '/statuses'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/stop-logging-amphora-cert-2e188675699d60d5.yaml0000664000175000017500000000043000000000000026707 0ustar00zuulzuul00000000000000--- security: - | Fixed a debug level logging of Amphora certificates for flows such as 'octavia-create-amp-for-lb-subflow-octavia-generate-serverpem' (triggered with loadbalancer failover) and 'octavia-create-amp-for-lb-subflow-octavia-update-cert-expiration'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml0000664000175000017500000000126000000000000026566 0ustar00zuulzuul00000000000000--- features: - | The load balancer create command now accepts an availability_zone argument. With the amphora driver this will create a load balancer in the targeted compute availability_zone in nova. When using spare pools, it will create spares in each AZ. For the amphora driver, if no ``[nova] availability_zone`` is configured and availability zones are used, results may be slightly unpredictable. Note (for the ``amphora`` driver): if it is possible for an amphora to change availability zone after initial creation (not typically possible without outside intervention) this may affect the ability of this feature to function properly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-http-health-check-with-host-header-e2cf1f2a98d4114f.yaml0000664000175000017500000000032100000000000032350 0ustar00zuulzuul00000000000000--- features: - Extend the Octavia Health Monitor API with two new fields ``http_version`` and ``domain_name`` for support HTTP health check, which will inject the domain name into HTTP host header. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-networks-without-dhcp-3458a063333ab7a8.yaml0000664000175000017500000000024700000000000027754 0ustar00zuulzuul00000000000000--- features: - Adds support for networks that do not have DHCP services enabled. upgrade: - To support networks without DHCP you must upgrade your amphora image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-oslo_middleware-http_proxy_to_wsgi-928c6fc5ec3d421c.yaml0000664000175000017500000000061100000000000033031 0ustar00zuulzuul00000000000000--- features: - | Now supports ``oslo_middleware http_proxy_to_wsgi``, which will set up the request URL correctly in the case that there is a proxy (for example, a loadbalancer such as HAProxy) in front of the Octavia API. It is off by default and can be enabled by setting ``enable_proxy_headers_parsing=True`` in the ``[oslo_middleware]`` section of ``octavia.conf``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-pkcs7-intermediate-ca-bundles-279c12bad974bff7.yaml0000664000175000017500000000030500000000000031426 0ustar00zuulzuul00000000000000--- features: - Adds support for PKCS7 PEM or DER encoded intermediate certificate bundles for TERMINATED_HTTPS listeners. fixes: - Resolves an issue with using encrypted TLS private keys. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-proxy-protocol-cc5991175a110619.yaml0000664000175000017500000000010700000000000026422 0ustar00zuulzuul00000000000000--- features: - Add support PROXY protocol for lbaas pool in octavia ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-redirect-http-code-1c2e87ef7fda12e97.yaml0000664000175000017500000000040100000000000027546 0ustar00zuulzuul00000000000000--- features: - Now Octavia L7Policy API can accept an new option `redirect_http_code` for L7Policy actions `REDIRECT_URL` or `REDIRECT_PREFIX`, then each HTTP requests to the associated Listener will return the configured HTTP response code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-redirect-prefix-7f8b289aee04fe99.yaml0000664000175000017500000000007600000000000027033 0ustar00zuulzuul00000000000000--- features: - Support REDIRECT_PREFIX action for L7Policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-remote-debugging-fcb52df4a59c1467.yaml0000664000175000017500000000020700000000000027135 0ustar00zuulzuul00000000000000--- features: - Support remote debugging with PyDev. Please refer to the Contributor documentation section to find more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/support-wsgi-deployment-56013fef7172e982.yaml0000664000175000017500000000007500000000000026630 0ustar00zuulzuul00000000000000--- features: - Octavia API now supports WSGI deplyment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/switch-taskflow-engine-parallel-8bf743eca15a0253.yaml0000664000175000017500000000077400000000000030302 0ustar00zuulzuul00000000000000--- upgrade: - | The default TaskFlow engine is now set to 'parallel' instead of 'serial'. The parallel engine schedules tasks onto different threads to allow for running non-dependent tasks simultaneously. This has the benefit of accelerating the execution of some Octavia Amphora flows such as provisioning of active-standby amphora loadbalancers. Operators can revert to previously default 'serial' engine type by setting the configuration option [task_flow]/engine = serial ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml0000664000175000017500000000023200000000000030604 0ustar00zuulzuul00000000000000--- fixes: - | Delay between checks on UDP healthmonitors was using the incorrect config value ``timeout``, when it should have been ``delay``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/use-dib-distribution-mirror-400d96c1a7df9862.yaml0000664000175000017500000000075300000000000027424 0ustar00zuulzuul00000000000000--- features: - | The diskimage-create script now supports generic download mirrors via the DIB_DISTRIBUTION_MIRROR environment variable, replacing the existing distribution-specific elements upgrade: - | For the diskimage-create script, the BASE_OS_MIRROR environment variable was renamed to DIB_DISTRIBUTION_MIRROR deprecations: - | These custom distribution mirror elements for the diskimage-script were removed: apt-mirror, centos-mirror, fedora-mirror ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/validate-access-to-vip_subnet_id-48fc92b45529cafd.yaml0000664000175000017500000000023400000000000030504 0ustar00zuulzuul00000000000000--- fixes: - | Fix a bug that allowed a user to create a load balancer on a ``vip_subnet_id`` that belongs to another user using the subnet UUID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/validate-same-ip-protocol-in-udp-lb-2813b545131097ec.yaml0000664000175000017500000000042300000000000030434 0ustar00zuulzuul00000000000000--- fixes: - | Adding a member with different IP protocol version than the VIP IP protocol version in a UDP load balancer caused a crash in the amphora. A validation step in the amphora driver now prevents mixing IP protocol versions in UDP load balancers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/validate-vip-network-params-57662cc3a99f80e5.yaml0000664000175000017500000000023300000000000027405 0ustar00zuulzuul00000000000000--- fixes: - | Add a validation step in the Octavia Amphora driver to ensure that the port_security_enabled parameter is set on the VIP network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/vip-port-project-id-bbb26b657b08365e.yaml0000664000175000017500000000070200000000000025730 0ustar00zuulzuul00000000000000--- fixes: - | Neutron LBaaS was assigning the VIP port it created the user's project-id, thus allowing the user to attach Floating-IPs to the VIP port. Octavia, on the other hand, was assigning the Octavia project-id to the port, making it impossible for the user to attach a Floating IP. This patch brings Octavia's behavior in line with Neutron LBaaS and assigns the user's project-id to the VIP port created by Octavia. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/volume-based-amphora-9a1899634f5244b0.yaml0000664000175000017500000000134200000000000025715 0ustar00zuulzuul00000000000000--- features: - | Allow creation of volume based amphora. Many deploy production use volume based instances because of more flexibility. Octavia will create volume and attach this to the amphora. Have new settings: * `volume_driver`: Whether to use volume driver (cinder) to create volume backed amphorae. * `volume_size`: Size of root volume for Amphora Instance when using Cinder * `volume_type` : Type of volume for Amphorae volume root disk * `volume_create_retry_interval`: Interval time to wait volume is created in available state * `volume_create_timeout`: Timeout When volume is not create success * `volume_create_max_retries`: Maximum number of retries to create volume ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/notes/zombie_amp-1b435eb66643dab8.yaml0000664000175000017500000000063400000000000024237 0ustar00zuulzuul00000000000000--- fixes: - | This will automatically nova delete zombie amphora when they are detected by Octavia. Zombie amphorae are amphorae which report health messages but appear DELETED in Octavia's database. other: - | Processing zombie amphora is already expensive and this adds another step which could increase the load on Octavia Health Manager, especially during Nova API slowness. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/releasenotes/source/0000775000175000017500000000000000000000000017163 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/releasenotes/source/_static/0000775000175000017500000000000000000000000020611 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000023062 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/releasenotes/source/_templates/0000775000175000017500000000000000000000000021320 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000023571 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/conf.py0000664000175000017500000002050100000000000020460 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Octavia Release Notes documentation build configuration file, created # by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/octavia' bug_project = '908' bug_tag = 'doc' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2015, Octavia Developers' # Release notes are version independent. # The short X.Y version. # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'OctaviaReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'OctaviaReleaseNotes.tex', u'OctaviaRelease Notes Documentation', u'Octavia Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'octaviareleasenotes', u'Octavia Release Notes ' 'Documentation', [u'Octavia Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'OctaviaReleaseNotes', u'Octavia Release Notes ' 'Documentation', u'Octavia Developers', 'OctaviaReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/index.rst0000664000175000017500000000141300000000000021023 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===================== Octavia Release Notes ===================== .. toctree:: :maxdepth: 1 unreleased train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/liberty.rst0000664000175000017500000000022200000000000021363 0ustar00zuulzuul00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3342166 octavia-6.2.2/releasenotes/source/locale/0000775000175000017500000000000000000000000020422 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.3342166 octavia-6.2.2/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021374 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023161 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000017764300000000000026234 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: octavia\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2020-04-10 13:34+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-04-10 03:56+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "0.10.0" msgstr "0.10.0" msgid "0.10.0-37" msgstr "0.10.0-37" msgid "0.5.2-23" msgstr "0.5.2-23" msgid "0.8.0" msgstr "0.8.0" msgid "0.9.0" msgstr "0.9.0" msgid "1.0.0" msgstr "1.0.0" msgid "1.0.2" msgstr "1.0.2" msgid "1.0.4" msgstr "1.0.4" msgid "1.0.5-3" msgstr "1.0.5-3" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.2" msgstr "2.0.2" msgid "2.0.3" msgstr "2.0.3" msgid "2.0.4" msgstr "2.0.4" msgid "2.1.0" msgstr "2.1.0" msgid "2.1.1" msgstr "2.1.1" msgid "2.1.2" msgstr "2.1.2" msgid "2.1.2-11" msgstr "2.1.2-11" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.1" msgstr "3.0.1" msgid "3.0.2" msgstr "3.0.2" msgid "3.1.0" msgstr "3.1.0" msgid "3.1.1" msgstr "3.1.1" msgid "3.2.0" msgstr "3.2.0" msgid "3.2.1" msgstr "3.2.1" msgid "3.2.2" msgstr "3.2.2" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "4.1.0" msgstr "4.1.0" msgid "4.1.1" msgstr "4.1.1" msgid "4.1.1-10" msgstr "4.1.1-10" msgid "5.0.0" msgstr "5.0.0" msgid "5.0.0.0rc1-109" msgstr "5.0.0.0rc1-109" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.1-14" msgstr "5.0.1-14" msgid "" "A new amphora image is required to fix the potential certs-ramfs race " "condition." msgstr "" "A new amphora image is required to fix the potential certs-ramfs race " "condition." msgid "" "A new amphora image is required to resolve the amphora memory issues when a " "load balancer has multiple listeners and the amphora image uses haproxy 1.8 " "or newer." msgstr "" "A new amphora image is required to resolve the amphora memory issues when a " "load balancer has multiple listeners and the amphora image uses HAProxy 1.8 " "or newer." msgid "" "A provider driver developer guide has been added to the documentation to aid " "driver providers." msgstr "" "A provider driver developer guide has been added to the documentation to aid " "driver providers." msgid "" "A race condition between the certs-ramfs and the amphora agent may lead to " "tenant TLS content being stored on the amphora filesystem instead of in the " "encrypted RAM filesystem." msgstr "" "A race condition between the certs-ramfs and the amphora agent may lead to " "tenant TLS content being stored on the amphora filesystem instead of in the " "encrypted RAM filesystem." msgid "Active/Standby support for Octavia." msgstr "Active/Standby support for Octavia." msgid "Add a config variable to disable creation of TLS Terminated listeners." msgstr "Add a config variable to disable creation of TLS Terminated listeners." msgid "" "Add an API for allowing administrators to manage Octavia Availability Zones " "and Availability Zone Profiles, which behave nearly identically to Flavors " "and Flavor Profiles." msgstr "" "Add an API for allowing administrators to manage Octavia Availability Zones " "and Availability Zone Profiles, which behave nearly identically to Flavours " "and Flavour Profiles." msgid "" "Add config variables to allow disabling either API version (v1 or v2.0)." msgstr "" "Add config variables to allow disabling either API version (v1 or v2.0)." msgid "" "Add listener and pool protocol validation. The pool and listener can't be " "combined arbitrarily. We need some constraints on the protocol side." msgstr "" "Add listener and pool protocol validation. The pool and listener can't be " "combined arbitrarily. We need some constraints on the protocol side." msgid "Add monitor address and port to member" msgstr "Add monitor address and port to member" msgid "" "Add new parameters to specify the number of threads for updating amphora " "health and stats." msgstr "" "Add new parameters to specify the number of threads for updating amphora " "health and stats." msgid "Add sos element to amphora images (Red Hat family only)." msgstr "Add sos element to amphora images (Red Hat family only)." msgid "Add support PROXY protocol for lbaas pool in octavia" msgstr "Add support PROXY protocol for LBaaS pool in Octavia" msgid "Add support for Ubuntu Xenial amphora images." msgstr "Add support for Ubuntu Xenial Amphora images." msgid "" "Add support for monitor_address and monitor_port attributes in UDP members. " "Previously, monitor_address and monitor_port were ignored and address and " "protocol_port attributes were used as monitoring address and port." msgstr "" "Add support for monitor_address and monitor_port attributes in UDP members. " "Previously, monitor_address and monitor_port were ignored and address and " "protocol_port attributes were used as monitoring address and port." msgid "Added UDP protocol support to listeners and pools." msgstr "Added UDP protocol support to listeners and pools." msgid "" "Added a configuration option that specifies the availability zone amphora " "should be built in." msgstr "" "Added a configuration option that specifies the availability zone amphora " "should be built in." msgid "" "Added a new endpoint /v2.0/octavia/amphorae to expose internal details about " "amphorae. This endpoint is admin only." msgstr "" "Added a new endpoint /v2.0/octavia/amphorae to expose internal details about " "amphorae. This endpoint is admin only." msgid "" "Added a new option named server_certs_key_passphrase under the certificates " "section. The default value gets copied from an environment variable named " "TLS_PASS_AMPS_DEFAULT. In a case where TLS_PASS_AMPS_DEFAULT is not set, and " "the operator did not fill any other value directly, 'insecure-key-do-not-use-" "this-key' will be used." msgstr "" "Added a new option named server_certs_key_passphrase under the certificates " "section. The default value gets copied from an environment variable named " "TLS_PASS_AMPS_DEFAULT. In a case where TLS_PASS_AMPS_DEFAULT is not set, and " "the operator did not fill any other value directly, 'insecure-key-do-not-use-" "this-key' will be used." msgid "" "Added ability for Octavia to automatically set Barbican ACLs on behalf of " "the user. Such enables users to create TLS-terminated listeners without " "having to add the Octavia keystone user id to the ACL list. Octavia will " "also automatically revoke access to secrets whenever load balancing " "resources no longer require access to them." msgstr "" "Added ability for Octavia to automatically set Barbican ACLs on behalf of " "the user. Such enables users to create TLS-terminated listeners without " "having to add the Octavia Keystone user id to the ACL list. Octavia will " "also automatically revoke access to secrets whenever load balancing " "resources no longer require access to them." msgid "" "Added an option to the diskimage-create.sh script to specify the Octavia Git " "branch to build the image from." msgstr "" "Added an option to the diskimage-create.sh script to specify the Octavia Git " "branch to build the image from." msgid "" "Added hook to plugin.sh: `octavia_create_network_interface_device` and " "`octavia_delete_network_interface_device`. For each of these functions, if " "they are defined during stack (respectively unstack), they are called to " "create (respectively delete) the management network interface." msgstr "" "Added hook to plugin.sh: `octavia_create_network_interface_device` and " "`octavia_delete_network_interface_device`. For each of these functions, if " "they are defined during stack (respectively unstack), they are called to " "create (respectively delete) the management network interface." msgid "" "Added new tool ``octavia-status upgrade check``. This framework allows " "adding various checks which can be run before a Octavia upgrade to ensure if " "the upgrade can be performed safely." msgstr "" "Added new tool ``octavia-status upgrade check``. This framework allows " "adding various checks which can be run before a Octavia upgrade to ensure if " "the upgrade can be performed safely." msgid "" "Added option 'sync_provisioning_status' to enable synchronizing provisioning " "status of loadbalancers with the neutron-lbaas database. Enabling this " "option will queue one additional message per amphora every heartbeat " "interval." msgstr "" "Added option 'sync_provisioning_status' to enable synchronising provisioning " "status of load balancers with the neutron-lbaas database. Enabling this " "option will queue one additional message per amphora every heartbeat " "interval." msgid "Added support for CentOS 8 amphora images." msgstr "Added support for CentOS 8 amphora images." msgid "" "Added support to VIP access control list. Users can now limit incoming " "traffic to a set of allowed CIDRs." msgstr "" "Added support to VIP access control list. Users can now limit incoming " "traffic to a set of allowed CIDRs." msgid "Added support to create RHEL 8 amphora images." msgstr "Added support to create RHEL 8 amphora images." msgid "" "Added support to debug with the Python Visual Studio Debugger engine (ptvsd)." msgstr "" "Added support to debug with the Python Visual Studio Debugger engine (ptvsd)." msgid "Added tags property for Octavia resources. It includes:" msgstr "Added tags property for Octavia resources. It includes:" msgid "" "Added the 'failover' sub-resource for the Amphora API. Each amphora can be " "triggered to failover by sending a PUT (with an empty body) to the resource " "``/v2.0/octavia/amphorae//failover``. It will cause the amphora to be " "recycled and replaced, in the same way as the health-triggered failover." msgstr "" "Added the 'failover' sub-resource for the Amphora API. Each amphora can be " "triggered to failover by sending a PUT (with an empty body) to the resource " "``/v2.0/octavia/amphorae//failover``. It will cause the amphora to be " "recycled and replaced, in the same way as the health-triggered failover." msgid "" "Added the oslo-middleware healthcheck app to the Octavia API. Hitting /" "healthcheck will return a 200. This is enabled via the " "[api_settings]healthcheck_enabled setting and is disabled by default." msgstr "" "Added the oslo-middleware healthcheck app to the Octavia API. Hitting /" "healthcheck will return a 200. This is enabled via the " "[api_settings]healthcheck_enabled setting and is disabled by default." msgid "" "Adding `ID` column to the health_monitor table in Octavia, whose value is " "same as the `pool_id` column. The database needs to be upgraded first, " "followed by upgrade and restart of the API servers." msgstr "" "Adding `ID` column to the health_monitor table in Octavia, whose value is " "same as the `pool_id` column. The database needs to be upgraded first, " "followed by upgrade and restart of the API servers." msgid "" "Adding a member with different IP protocol version than the VIP IP protocol " "version in a UDP load balancer caused a crash in the amphora. A validation " "step in the amphora driver now prevents mixing IP protocol versions in UDP " "load balancers." msgstr "" "Adding a member with different IP protocol version than the VIP IP protocol " "version in a UDP load balancer caused a crash in the amphora. A validation " "step in the amphora driver now prevents mixing IP protocol versions in UDP " "load balancers." msgid "Adding support for the listener X-Forwarded-Proto header insertion." msgstr "Adding support for the listener X-Forwarded-Proto header insertion." msgid "" "Adds a configuration option, \"reserved_ips\" that allows the operator to " "block addresses from being used in load balancer members. The default " "setting blocks the nova metadata service address." msgstr "" "Adds a configuration option, \"reserved_ips\" that allows the operator to " "block addresses from being used in load balancer members. The default " "setting blocks the Nova metadata service address." msgid "" "Adds a health monitor type of UDP-CONNECT that does a basic UDP port connect." msgstr "" "Adds a health monitor type of UDP-CONNECT that does a basic UDP port connect." msgid "Adds a new config parameter to specify the anti-affinity policy" msgstr "Adds a new config parameter to specify the anti-affinity policy" msgid "Adds an administrator API to access per-amphora statistics." msgstr "Adds an administrator API to access per-amphora statistics." msgid "Adds quota support to the Octavia API." msgstr "Adds quota support to the Octavia API." msgid "Adds support for IPv6" msgstr "Adds support for IPv6" msgid "" "Adds support for Layer 7 switching and shared pools features to Octavia. " "This supports the equivalent feature added to Neutron LBaaS v2." msgstr "" "Adds support for Layer 7 switching and shared pools features to Octavia. " "This supports the equivalent feature added to Neutron LBaaS v2." msgid "" "Adds support for PKCS7 PEM or DER encoded intermediate certificate bundles " "for TERMINATED_HTTPS listeners." msgstr "" "Adds support for PKCS7 PEM or DER encoded intermediate certificate bundles " "for TERMINATED_HTTPS listeners." msgid "Adds support for amphora images that use systemd." msgstr "Adds support for Amphora images that use systemd." msgid "Adds support for networks that do not have DHCP services enabled." msgstr "Adds support for networks that do not have DHCP services enabled." msgid "Adds support for the driver agent to query for load balancer objects." msgstr "Adds support for the driver agent to query for load balancer objects." msgid "Adds tables for active/standby." msgstr "Adds tables for active/standby." msgid "" "Adds the ability to define L7 rules based on TLS client authentication " "information. The new L7 rules are\\: \"L7RULE_TYPE_SSL_CONN_HAS_CERT\", " "\"L7RULE_TYPE_VERIFY_RESULT\", and \"L7RULE_TYPE_DN_FIELD\"." msgstr "" "Adds the ability to define L7 rules based on TLS client authentication " "information. The new L7 rules are\\: \"L7RULE_TYPE_SSL_CONN_HAS_CERT\", " "\"L7RULE_TYPE_VERIFY_RESULT\", and \"L7RULE_TYPE_DN_FIELD\"." msgid "" "After setting \"auth_strategy = keystone\" all incoming requests to Octavia " "API will be verified using Keystone are they send by authenticated person. " "By default that option is disabled because Neutron LBaaS v2 is not " "supporting that functionality properly." msgstr "" "After setting \"auth_strategy = keystone\" all incoming requests to Octavia " "API will be verified using Keystone are they send by authenticated person. " "By default that option is disabled because Neutron LBaaS v2 is not " "supporting that functionality properly." msgid "" "All pools configured under OVN provider driver are automatically migrated to " "SOURCE_IP_PORT algorithm. Previously algorithm was named as ROUND_ROBIN, but " "in fact it was not working like ROUND_ROBIN. After investigating, it was " "observed that core OVN actually utilizes a 5 Tuple Hash/RSS Hash in DPDK/" "Kernel as a Load Balancing algorithm. The 5 Tuple Hash has Source IP, " "Destination IP, Protocol, Source Port, Destination Port. To reflect this the " "name was changed to SOURCE_IP_PORT." msgstr "" "All pools configured under OVN provider driver are automatically migrated to " "SOURCE_IP_PORT algorithm. Previously algorithm was named as ROUND_ROBIN, but " "in fact it was not working like ROUND_ROBIN. After investigating, it was " "observed that core OVN actually utilises a 5 Tuple Hash/RSS Hash in DPDK/" "Kernel as a Load Balancing algorithm. The 5 Tuple Hash has Source IP, " "Destination IP, Protocol, Source Port, Destination Port. To reflect this the " "name was changed to SOURCE_IP_PORT." msgid "" "Allow creation of volume based amphora. Many deploy production use volume " "based instances because of more flexibility. Octavia will create volume and " "attach this to the amphora." msgstr "" "Allow creation of volume based amphora. Many deploy production use volume " "based instances because of more flexibility. Octavia will create volume and " "attach this to the amphora." msgid "" "Allow the loadbalancer's VIP to be created on the same network as the " "management interface." msgstr "" "Allow the load balancer's VIP to be created on the same network as the " "management interface." msgid "" "Allows the operator to optionally restrict the amphora glance image " "selection to a specific owner id. This is a recommended security setting for " "clouds that allow user uploadable images." msgstr "" "Allows the operator to optionally restrict the amphora glance image " "selection to a specific owner id. This is a recommended security setting for " "clouds that allow user uploadable images." msgid "" "Amphora API now can return the field `compute_flavor` which is the ID of the " "compute instance flavor used to boot the amphora." msgstr "" "Amphora API now can return the field `compute_flavor` which is the ID of the " "compute instance flavour used to boot the amphora." msgid "" "Amphora API now returns the field `image_id` which is the ID of the glance " "image used to boot the amphora." msgstr "" "Amphora API now returns the field `image_id` which is the ID of the glance " "image used to boot the amphora." msgid "" "Amphora failover is supported when active/standby is enabled. Should the " "master or backup amphora fail, the health manager will rebuild it." msgstr "" "Amphora failover is supported when active/standby is enabled. Should the " "master or backup Amphora fail, the health manager will rebuild it." msgid "Amphora image support for RH Linux flavors." msgstr "Amphora image support for RH Linux flavours." msgid "" "Amphora images with HAProxy older than 1.6 (CentOS 7, etc.) will still use " "health monitor type TCP when PING is selected by the user." msgstr "" "Amphora images with HAProxy older than 1.6 (CentOS 7, etc.) will still use " "health monitor type TCP when PING is selected by the user." msgid "" "Amphora will need to be updated to a new image with this version of the " "agent and ping-wrapper.sh script prior to updating the Octavia controllers. " "If a load balancer is using a health monitor of type PING with an amphora " "image that has not been updated, the next configuration change to the load " "balancer will cause it to go into an ERROR state until it is failed over to " "an updated image." msgstr "" "Amphora will need to be updated to a new image with this version of the " "agent and ping-wrapper.sh script prior to updating the Octavia controllers. " "If a load balancer is using a health monitor of type PING with an Amphora " "image that has not been updated, the next configuration change to the load " "balancer will cause it to go into an ERROR state until it is failed over to " "an updated image." msgid "" "Amphora with a terminated HTTPS load balancer can no longer be rebooted. If " "they reboot, they will trigger a failover of the amphora." msgstr "" "Amphora with a terminated HTTPS load balancer can no longer be rebooted. If " "they reboot, they will trigger a failover of the Amphora." msgid "Amphorae are unable to provide tenant flow logs for UDP listeners." msgstr "Amphorae are unable to provide tenant flow logs for UDP listeners." msgid "" "Amphorae that are booting for a specific loadbalancer will now be linked to " "that loadbalancer immediately upon creation. Previously this would not " "happen until near the end of the process, leaving a gap during booting " "during which is was difficult to understand which booting amphora belonged " "to which loadbalancer. This was especially problematic when attempting to " "troubleshoot loadbalancers that entered ERROR status due to boot issues." msgstr "" "Amphorae that are booting for a specific loadbalancer will now be linked to " "that loadbalancer immediately upon creation. Previously this would not " "happen until near the end of the process, leaving a gap during booting " "during which is was difficult to understand which booting amphora belonged " "to which loadbalancer. This was especially problematic when attempting to " "troubleshoot loadbalancers that entered ERROR status due to boot issues." msgid "" "An operator documentation page has been added to list known Octavia provider " "drivers and provide links to those drivers. Non-reference drivers, drivers " "other than the \"amphora\" driver, will be outside of the octavia code " "repository but are dynamically loadable via a well defined interface " "described in the provider driver developers guide." msgstr "" "An operator documentation page has been added to list known Octavia provider " "drivers and provide links to those drivers. Non-reference drivers, drivers " "other than the \"amphora\" driver, will be outside of the Octavia code " "repository but are dynamically loadable via a well defined interface " "described in the provider driver developers guide." msgid "" "As a followup to the fix that resolved CVE-2018-16856, Octavia will now " "encrypt certificates and keys used for secure communication with amphorae, " "in its internal workflows. Octavia used to exclude debug-level log prints " "for specific tasks and flows that were explicitly specified by name, a " "method that is susceptive to code changes." msgstr "" "As a followup to the fix that resolved CVE-2018-16856, Octavia will now " "encrypt certificates and keys used for secure communication with amphorae, " "in its internal workflows. Octavia used to exclude debug-level log prints " "for specific tasks and flows that were explicitly specified by name, a " "method that is susceptible to code changes." msgid "" "As part of GDPR compliance, connection logs might be considered personal " "data and might need to follow specific data retention policies. Disabling " "connection logging might aid in making Octavia compliant by preventing the " "output of such data. As always, consult with an expert on compliance prior " "to making changes." msgstr "" "As part of GDPR compliance, connection logs might be considered personal " "data and might need to follow specific data retention policies. Disabling " "connection logging might aid in making Octavia compliant by preventing the " "output of such data. As always, consult with an expert on compliance prior " "to making changes." msgid "" "Availability zone profiles can now override the ``valid_vip_networks`` " "configuration option." msgstr "" "Availability zone profiles can now override the ``valid_vip_networks`` " "configuration option." msgid "" "Backend re-encryption allows users to configure pools to initiate TLS " "connections to the backend member servers. This enables load balancers to " "authenticate and encrypt connections from the load balancer to the backend " "member server." msgstr "" "Backend re-encryption allows users to configure pools to initiate TLS " "connections to the backend member servers. This enables load balancers to " "authenticate and encrypt connections from the load balancer to the backend " "member server." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Certificate and key storage for terminated HTTPS load balancers is now in an " "encrypted ramfs path inside the amphora." msgstr "" "Certificate and key storage for terminated HTTPS load balancers is now in an " "encrypted ramfs path inside the Amphora." msgid "" "Certificate bundles can now be stored in any backend Castellan supports, and " "can be retrieved via a Castellan driver, even if Barbican is not deployed." msgstr "" "Certificate bundles can now be stored in any backend Castellan supports, and " "can be retrieved via a Castellan driver, even if Barbican is not deployed." msgid "" "Cloud deployers can set `api_settings.allow_ping_health_monitors = False` in " "`octavia.conf` to disable the ability to create PING health monitors." msgstr "" "Cloud deployers can set `api_settings.allow_ping_health_monitors = False` in " "`octavia.conf` to disable the ability to create PING health monitors." msgid "" "Communication between the control-plane and the amphora-agent now uses " "minimum TLSv1.2 by default, and is configurable. The previous default of " "SSLv2/3 is widely considered insecure." msgstr "" "Communication between the control-plane and the amphora-agent now uses " "minimum TLSv1.2 by default, and is configurable. The previous default of " "SSLv2/3 is widely considered insecure." msgid "" "Config option `amp_ssh_access_allowed` is deprecated, as it overlaps with " "`amp_ssh_key_name` in functionality and is not needed. Simply leave the " "variable `amp_ssh_key_name` blank and no ssh key will be installed. This is " "the same result as using `amp_ssh_access_allowed = False`." msgstr "" "Config option `amp_ssh_access_allowed` is deprecated, as it overlaps with " "`amp_ssh_key_name` in functionality and is not needed. Simply leave the " "variable `amp_ssh_key_name` blank and no ssh key will be installed. This is " "the same result as using `amp_ssh_access_allowed = False`." msgid "" "Creating a member on a pool with no healthmonitor would sometimes briefly " "update their operating status from `NO_MONITOR` to `OFFLINE` and back to " "`NO_MONITOR` during the provisioning sequence. This flapping will no longer " "occur." msgstr "" "Creating a member on a pool with no healthmonitor would sometimes briefly " "update their operating status from `NO_MONITOR` to `OFFLINE` and back to " "`NO_MONITOR` during the provisioning sequence. This flapping will no longer " "occur." msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Depending on how the other queue is set up additional passwords for the " "other queue will be in the Octavia config file. Operators should take care " "of setting up appropriate users with appropriate restrictions to the " "topic(s) needed." msgstr "" "Depending on how the other queue is set up additional passwords for the " "other queue will be in the Octavia config file. Operators should take care " "of setting up appropriate users with appropriate restrictions to the " "topic(s) needed." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Disabling connection logging might make it more difficult to audit systems " "for unauthorized access, from which IPs it originated, and which assets were " "compromised." msgstr "" "Disabling connection logging might make it more difficult to audit systems " "for unauthorised access, from which IPs it originated, and which assets were " "compromised." msgid "Extended support for Keystone API v3." msgstr "Extended support for Keystone API v3." msgid "" "Finally completely remove tenant_id, as it was deprecated along with the " "keystone v2 API in Mitaka, which means we're free of it in Pike!" msgstr "" "Finally completely remove tenant_id, as it was deprecated along with the " "keystone v2 API in Mitaka, which means we're free of it in Pike!" msgid "" "Fixed an issue that caused failover to unsuccessful if the vip network was " "not DHCP enabled." msgstr "" "Fixed an issue that caused failover to unsuccessful if the VIP network was " "not DHCP enabled." msgid "" "Fixed an issue where health monitors of type PING were really doing a TCP " "health check." msgstr "" "Fixed an issue where health monitors of type PING were really doing a TCP " "health check." msgid "" "Fixed an issue where the amphora would fail to bring up the VIP if the VIP " "network did not have a gateway specified in neutron." msgstr "" "Fixed an issue where the amphora would fail to bring up the VIP if the VIP " "network did not have a gateway specified in Neutron." msgid "" "Fixes a bug where unspecified or unlimited listener connection limit " "settings would lead to a 2000 connection limit when using the amphora/" "octavia driver. This was the compiled in connection limit in some HAproxy " "packages." msgstr "" "Fixes a bug where unspecified or unlimited listener connection limit " "settings would lead to a 2000 connection limit when using the Amphora/" "Octavia driver. This was the compiled in connection limit in some HAproxy " "packages." msgid "" "Fixes a neutron-lbaas LBaaS v2 API compatibility issue when requesting a " "load balancer status tree via '/statuses'." msgstr "" "Fixes a neutron-lbaas LBaaS v2 API compatibility issue when requesting a " "load balancer status tree via '/statuses'." msgid "Fixes admin-state-up=False action for loadbalancer and listener." msgstr "Fixes admin-state-up=False action for load balancer and listener." msgid "" "Fixes an issue where VIP return traffic was always routed, if a gateway was " "defined, through the gateway address even if it was local traffic." msgstr "" "Fixes an issue where VIP return traffic was always routed, if a gateway was " "defined, through the gateway address even if it was local traffic." msgid "" "Fixes an issue where if more than one amphora fails at the same time, " "failover might not fully complete, leaving the load balancer in ERROR." msgstr "" "Fixes an issue where if more than one Amphora fails at the same time, " "failover might not fully complete, leaving the load balancer in ERROR." msgid "" "Fixes an issue with hmac.compare_digest on python3 that could cause health " "manager \"calculated hmac not equal to msg hmac\" errors." msgstr "" "Fixes an issue with hmac.compare_digest on python3 that could cause health " "manager \"calculated hmac not equal to msg hmac\" errors." msgid "" "Fixes the v2 API returning \"DELETED\" records until the amphora_expiry_age " "timeout expired. The API will now immediately return a 404 HTTP status code " "when deleted objects are requested. The API version has been raised to v2.1 " "to reflect this change." msgstr "" "Fixes the v2 API returning \"DELETED\" records until the amphora_expiry_age " "timeout expired. The API will now immediately return a 404 HTTP status code " "when deleted objects are requested. The API version has been raised to v2.1 " "to reflect this change." msgid "" "For the OpenStack Pike release, the Octavia team is excited to announce " "Octavia version 1.0.0 and introduce the Octavia v2 API. Octavia can now be " "deployed without neutron-lbaas as a standalone endpoint. The Octavia v2 API " "is fully backward compatible with the neutron-lbaas v2 API and is a superset " "of the neutron-lbaas v2 API." msgstr "" "For the OpenStack Pike release, the Octavia team is excited to announce " "Octavia version 1.0.0 and introduce the Octavia v2 API. Octavia can now be " "deployed without neutron-lbaas as a standalone endpoint. The Octavia v2 API " "is fully backward compatible with the neutron-lbaas v2 API and is a superset " "of the neutron-lbaas v2 API." msgid "" "For the diskimage-create script, the BASE_OS_MIRROR environment variable was " "renamed to DIB_DISTRIBUTION_MIRROR" msgstr "" "For the diskimage-create script, the BASE_OS_MIRROR environment variable was " "renamed to DIB_DISTRIBUTION_MIRROR" msgid "" "From configuration file section \"keystone_authtoken_v3\" was removed and " "all parameters are stored in \"keystone_authtoken\" section of configuration " "file." msgstr "" "From configuration file section \"keystone_authtoken_v3\" was removed and " "all parameters are stored in \"keystone_authtoken\" section of configuration " "file." msgid "" "Glance image containing the latest Amphora image can now be referenced using " "a Glance tag. To use the feature, set amp_image_tag in [controller_worker]. " "Note that amp_image_id should be unset for the new feature to take into " "effect." msgstr "" "Glance image containing the latest Amphora image can now be referenced using " "a Glance tag. To use the feature, set amp_image_tag in [controller_worker]. " "Note that amp_image_id should be unset for the new feature to take into " "effect." msgid "" "Health Monitor type \"HTTPS\" now correctly performs the configured check. " "This is done with all certificate validation disabled, so it will not work " "if backend members are performing client certificate validation." msgstr "" "Health Monitor type \"HTTPS\" now correctly performs the configured check. " "This is done with all certificate validation disabled, so it will not work " "if backend members are performing client certificate validation." msgid "" "If users have configured Health Monitors of type \"HTTPS\" and are expecting " "a simple \"TLS-HELLO\" check, they will need to recreate their monitor with " "the new \"TLS-HELLO\" type." msgstr "" "If users have configured Health Monitors of type \"HTTPS\" and are expecting " "a simple \"TLS-HELLO\" check, they will need to recreate their monitor with " "the new \"TLS-HELLO\" type." msgid "" "Improvements to the keepalived system used in active/standby topologies. " "keepalived is now monitored for health by the amphora agent (previously just " "by the init system) and a systemd race condition between keepalived and " "haproxy have been resolved." msgstr "" "Improvements to the keepalived system used in active/standby topologies. " "keepalived is now monitored for health by the amphora agent (previously just " "by the init system) and a systemd race condition between keepalived and " "HAProxy have been resolved." msgid "" "Improves error messages returned to the user, such as errors for attempting " "to add a second health monitor to a pool." msgstr "" "Improves error messages returned to the user, such as errors for attempting " "to add a second health monitor to a pool." msgid "" "In some enviornments (e.g. OSA) Neutron and Octavia use different queues (at " "least different vhosts) and so if Octavia posts to the Octavia queue and " "Neutron listens on the Neutron queue the events will never make it over." msgstr "" "In some environments (e.g. OSA) Neutron and Octavia use different queues (at " "least different vhosts) and so if Octavia posts to the Octavia queue and " "Neutron listens on the Neutron queue the events will never make it over." msgid "" "Installed drivers need to be enabled for use in the Octavia configuration " "file once you are ready to expose the driver to users." msgstr "" "Installed drivers need to be enabled for use in the Octavia configuration " "file once you are ready to expose the driver to users." msgid "" "It is now possible to completely remove sshd from the amphora image, to " "further lock down access and increase security. If this is set, providing an " "`amp_ssh_key_name` in config will install the key, but ssh access will not " "be possible as sshd will not be running." msgstr "" "It is now possible to completely remove SSHd from the Amphora image, to " "further lock down access and increase security. If this is set, providing an " "`amp_ssh_key_name` in config will install the key, but ssh access will not " "be possible as SSHd will not be running." msgid "" "It is now possible to completely update a pool's member list as a batch " "operation. Using a PUT request on the base member endpoint of a pool, you " "can specify a list of member objects and the service will perform any " "necessary creates/deletes/updates as a single operation." msgstr "" "It is now possible to completely update a pool's member list as a batch " "operation. Using a PUT request on the base member endpoint of a pool, you " "can specify a list of member objects and the service will perform any " "necessary creates/deletes/updates as a single operation." msgid "Known Issues" msgstr "Known Issues" msgid "" "Layer 7 policies allow a tenant / user to define actions the load balancer " "may take other than routing requests to the default pool." msgstr "" "Layer 7 policies allow a tenant / user to define actions the load balancer " "may take other than routing requests to the default pool." msgid "" "Layer 7 rules control the logic behind whether a given Layer 7 policy is " "followed." msgstr "" "Layer 7 rules control the logic behind whether a given Layer 7 policy is " "followed." msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "Listeners have four new timeout settings:" msgstr "Listeners have four new timeout settings:" msgid "" "Members have a new boolean option `backup`. When set to `true`, the member " "will not receive traffic until all non-backup members are offline. Once all " "non-backup members are offline, traffic will begin balancing between the " "backup members." msgstr "" "Members have a new boolean option `backup`. When set to `true`, the member " "will not receive traffic until all non-backup members are offline. Once all " "non-backup members are offline, traffic will begin balancing between the " "backup members." msgid "" "Members that are disabled via `admin_state_up=False` are now rendered in the " "HAProxy configuration on the amphora as `disabled`. Previously they were not " "rendered at all. This means that disabled members will now appear in health " "messages, and will properly change status to OFFLINE." msgstr "" "Members that are disabled via `admin_state_up=False` are now rendered in the " "HAProxy configuration on the Amphora as `disabled`. Previously they were not " "rendered at all. This means that disabled members will now appear in health " "messages, and will properly change status to OFFLINE." msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "" "Neutron LBaaS was assigning the VIP port it created the user's project-id, " "thus allowing the user to attach Floating-IPs to the VIP port. Octavia, on " "the other hand, was assigning the Octavia project-id to the port, making it " "impossible for the user to attach a Floating IP. This patch brings Octavia's " "behavior in line with Neutron LBaaS and assigns the user's project-id to the " "VIP port created by Octavia." msgstr "" "Neutron LBaaS was assigning the VIP port it created the user's project-id, " "thus allowing the user to attach Floating-IPs to the VIP port. Octavia, on " "the other hand, was assigning the Octavia project-id to the port, making it " "impossible for the user to attach a Floating IP. This patch brings Octavia's " "behaviour in line with Neutron LBaaS and assigns the user's project-id to " "the VIP port created by Octavia." msgid "New Features" msgstr "New Features" msgid "" "New Health Monitor type \"TLS-HELLO\" to perform a simple TLS connection." msgstr "" "New Health Monitor type \"TLS-HELLO\" to perform a simple TLS connection." msgid "" "New option `load_balancer_expiry_age` is added to the `house_keeping` config " "section. It defines load balancer expiry age in seconds, the default value " "is 604800." msgstr "" "New option `load_balancer_expiry_age` is added to the `house_keeping` config " "section. It defines load balancer expiry age in seconds, the default value " "is 604800." msgid "" "New option in diskimage-create.sh `-n` to completely disable sshd on the " "amphora." msgstr "" "New option in diskimage-create.sh `-n` to completely disable SSHd on the " "Amphora image." msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "" "Note that while the Octavia v2 API now supports Role Bassed Access Control " "(RBAC), the Octavia v1.0 API does not. The Octavia v1.0 API should not be " "exposed publicly and should only be used internally such as for the neutron-" "lbaas octavia driver. Publicly accessible instances of the Octavia API " "should have the v1.0 API disabled via the Octavia configuration file." msgstr "" "Note that while the Octavia v2 API now supports Role Based Access Control " "(RBAC), the Octavia v1.0 API does not. The Octavia v1.0 API should not be " "exposed publicly and should only be used internally such as for the neutron-" "lbaas Octavia driver. Publicly accessible instances of the Octavia API " "should have the v1.0 API disabled via the Octavia configuration file." msgid "" "Now Octavia API can accept the QoS Policy id from neutron to support the QoS " "requirements towards Load Balancer VIP port when create/update load balancer." msgstr "" "Now Octavia API can accept the QoS Policy id from neutron to support the QoS " "requirements towards Load Balancer VIP port when create/update load balancer." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Octavia API now supports WSGI deplyment." msgstr "Octavia API now supports WSGI deployment." msgid "Octavia Release Notes" msgstr "Octavia Release Notes" msgid "" "Octavia now has a v2 API that can be used as a standalone endpoint. The " "Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API " "and is a superset of the neutron-lbaas v2 API. For more information see the " "Octavia API reference: https://developer.openstack.org/api-ref/load-balancer/" "v2/index.html" msgstr "" "Octavia now has a v2 API that can be used as a standalone endpoint. The " "Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API " "and is a superset of the neutron-lbaas v2 API. For more information see the " "Octavia API reference: https://developer.openstack.org/api-ref/load-balancer/" "v2/index.html" msgid "" "Octavia now has an up to date API reference for the Octavia v2 API. It is " "available at: https://developer.openstack.org/api-ref/load-balancer/" msgstr "" "Octavia now has an up to date API reference for the Octavia v2 API. It is " "available at: https://developer.openstack.org/api-ref/load-balancer/" msgid "" "Octavia now has options to limit the amphora concurrent build rate. This may " "be useful for deployments where nova can get overloaded. Amphora builds will " "be prioritized in the following order: failover, normal, spares pool builds. " "See the configuration guide for more information: https://docs.openstack.org/" "octavia/latest/configuration/configref.html#haproxy_amphora.build_rate_limit" msgstr "" "Octavia now has options to limit the Amphora concurrent build rate. This may " "be useful for deployments where Nova can get overloaded. Amphora builds will " "be prioritised in the following order: failover, normal, spares pool builds. " "See the configuration guide for more information: https://docs.openstack.org/" "octavia/latest/configuration/configref.html#haproxy_amphora.build_rate_limit" msgid "" "Octavia now supports provider drivers. This allows third party load " "balancing drivers to be integrated with the Octavia v2 API. Users select the " "\"provider\" for a load balancer at creation time." msgstr "" "Octavia now supports provider drivers. This allows third party load " "balancing drivers to be integrated with the Octavia v2 API. Users select the " "\"provider\" for a load balancer at creation time." msgid "" "Octavia supports different Keystone APIs and choose authentication mechanism " "based on configuration specified in \"keystone_authtoken\" section of " "octavia.conf file." msgstr "" "Octavia supports different Keystone APIs and choose authentication mechanism " "based on configuration specified in \"keystone_authtoken\" section of " "octavia.conf file." msgid "" "Octavia will use the OpenStack service type 'load-balancer'. For more " "information about service types, see the Octavia API reference: https://" "developer.openstack.org/api-ref/load-balancer/v2/index.html#service-endpoints" msgstr "" "Octavia will use the OpenStack service type 'load-balancer'. For more " "information about service types, see the Octavia API reference: https://" "developer.openstack.org/api-ref/load-balancer/v2/index.html#service-endpoints" msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Policy.json enforcement in Octavia. * Enables verification of privileges on " "specific API command for a specific user role and project_id." msgstr "" "Policy.json enforcement in Octavia. * Enables verification of privileges on " "specific API command for a specific user role and project_id." msgid "Prelude" msgstr "Prelude" msgid "" "Private keys can no longer be password protected, as PKCS12 does not support " "storing a passphrase in an explicitly defined way. Note that this is not " "noticeably less secure than storing a passphrase protected private key in " "the same place as the passphrase, as was the case with Barbican." msgstr "" "Private keys can no longer be password protected, as PKCS12 does not support " "storing a passphrase in an explicitly defined way. Note that this is not " "noticeably less secure than storing a passphrase protected private key in " "the same place as the passphrase, as was the case with Barbican." msgid "" "Provider of \"octavia\" has been deprecated in favor of \"amphora\" to " "clarify the provider driver supporting the load balancer." msgstr "" "Provider of \"octavia\" has been deprecated in favour of \"amphora\" to " "clarify the provider driver supporting the load balancer." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Remove duplicated config option 'cert_generator' in [controller_worker]. " "Operators now should set it under [certificates]." msgstr "" "Remove duplicated config option 'cert_generator' in [controller_worker]. " "Operators now should set it under [certificates]." msgid "" "Resolved an issue that could cause provisioning status to become out of sync " "between neutron-lbaas and octavia during high load." msgstr "" "Resolved an issue that could cause provisioning status to become out of sync " "between neutron-lbaas and Octavia during high load." msgid "Resolves an issue with subnets larger than /24" msgstr "Resolves an issue with subnets larger than /24" msgid "Resolves an issue with using encrypted TLS private keys." msgstr "Resolves an issue with using encrypted TLS private keys." msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "Security Issues" msgstr "Security Issues" msgid "" "Session persistence is maintained between the active and standby amphora." msgstr "" "Session persistence is maintained between the active and standby amphora." msgid "" "Several API related variables are moving to their own section " "`api_settings`. bind_host bind_port api_handler allow_pagination " "allow_sorting pagination_max_limit api_base_uri" msgstr "" "Several API related variables are moving to their own section " "`api_settings`. bind_host bind_port api_handler allow_pagination " "allow_sorting pagination_max_limit api_base_uri" msgid "" "Shared pools allow listeners or Layer 7 REDIRECT_TO_POOL policies to share " "back-end pools." msgstr "" "Shared pools allow listeners or Layer 7 REDIRECT_TO_POOL policies to share " "back-end pools." msgid "" "Shared-pools introduces a new ``load_balancer_id`` column into the ``pools`` " "table." msgstr "" "Shared-pools introduces a new ``load_balancer_id`` column into the ``pools`` " "table." msgid "" "Some versions of HAProxy incorrectly reported nodes in DRAIN status as being " "UP, and Octavia code was written around this incorrect reporting. This has " "been fixed in some versions of HAProxy and is now handled properly in " "Octavia as well. Now it is possible for members to be in the status " "DRAINING. Note that this is masked when statuses are forwarded to neutron-" "lbaas in the eventstream, so no compatibility change is necessary." msgstr "" "Some versions of HAProxy incorrectly reported nodes in DRAIN status as being " "UP, and Octavia code was written around this incorrect reporting. This has " "been fixed in some versions of HAProxy and is now handled properly in " "Octavia as well. Now it is possible for members to be in the status " "DRAINING. Note that this is masked when statuses are forwarded to neutron-" "lbaas in the event stream, so no compatibility change is necessary." msgid "" "Stale load balancer entries with DELETED provisioning_status are now cleaned-" "up by housekeeper after if they are older than `load_balancer_expiry_age`." msgstr "" "Stale load balancer entries with DELETED provisioning_status are now cleaned-" "up by housekeeper after if they are older than `load_balancer_expiry_age`." msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "Support for Keystone token authentication on frontend Octavia API." msgstr "Support for Keystone token authentication on frontend Octavia API." msgid "" "The \"use_upstart\" configuration option is now deprecated because the " "amphora agent can now automatically discover the init system in use in the " "amphora image." msgstr "" "The \"use_upstart\" configuration option is now deprecated because the " "amphora agent can now automatically discover the init system in use in the " "Amphora image." msgid "" "The Octavia API handlers are now deprecated and replaced by the new provider " "driver support. Octavia API handlers will remain in the code to support the " "Octavia v1 API (used for neutron-lbaas)." msgstr "" "The Octavia API handlers are now deprecated and replaced by the new provider " "driver support. Octavia API handlers will remain in the code to support the " "Octavia v1 API (used for neutron-lbaas)." msgid "" "The Octavia project documentation has been reorganized as part of the " "OpenStack documentation migration project. The Octavia project documentation " "is now located at: https://docs.openstack.org/octavia/latest/" msgstr "" "The Octavia project documentation has been reorganised as part of the " "OpenStack documentation migration project. The Octavia project documentation " "is now located at: https://docs.openstack.org/octavia/latest/" msgid "" "The Octavia v2 API now supports Role Based Access Control (RBAC). The " "default rules require users to have a load-balancer_* role to be able to " "access the Octavia v2 API. This can be overriden with the admin_or_owner-" "policy.json sample file provided. See the `Octavia Policies `_ document for more " "information." msgstr "" "The Octavia v2 API now supports Role Based Access Control (RBAC). The " "default rules require users to have a load-balancer_* role to be able to " "access the Octavia v2 API. This can be overridden with the admin_or_owner-" "policy.json sample file provided. See the `Octavia Policies `_ document for more " "information." msgid "" "The amphora haproxy user_group setting is now automatically detected for " "Ubuntu, CentOS, Fedora, or RHEL based amphora." msgstr "" "The Amphora HAProxy user_group setting is now automatically detected for " "Ubuntu, CentOS, Fedora, or RHEL based Amphora." msgid "" "The amphora-agent is now able to distinguish between operating systems and " "choose the right course of action to manage files and networking on each " "Linux flavor." msgstr "" "The Amphora-agent is now able to distinguish between operating systems and " "choose the right course of action to manage files and networking on each " "Linux flavour." msgid "" "The compute zone (if applicable) is now cached in the database and returned " "in the Amphora API as `cached_zone`. Please note that this is only set at " "the original time of provisioning, and could be stale for various reasons " "(for example, if live-migrations have taken place due to maintenances). We " "recommend it be used for reference only, unless you are absolutey certain it " "is current in your environment. The source of truth is still the system you " "use for compute." msgstr "" "The compute zone (if applicable) is now cached in the database and returned " "in the Amphora API as `cached_zone`. Please note that this is only set at " "the original time of provisioning, and could be stale for various reasons " "(for example, if live-migrations have taken place due to maintenances). We " "recommend it be used for reference only, unless you are absolutely certain " "it is current in your environment. The source of truth is still the system " "you use for compute." msgid "" "The configuration setting auth_strategy is now set to keystone by default." msgstr "" "The configuration setting auth_strategy is now set to Keystone by default." msgid "" "The diskimage-create script now supports generic download mirrors via the " "DIB_DISTRIBUTION_MIRROR environment variable, replacing the existing " "distribution-specific elements" msgstr "" "The diskimage-create script now supports generic download mirrors via the " "DIB_DISTRIBUTION_MIRROR environment variable, replacing the existing " "distribution-specific elements" msgid "" "The diskimage-create script supports different operating system flavors such " "as Ubuntu (the default option), CentOS, Fedora and RHEL. Adaptations were " "made to several elements to ensure all images are operational." msgstr "" "The diskimage-create script supports different operating system flavours " "such as Ubuntu (the default option), CentOS, Fedora and RHEL. Adaptations " "were made to several elements to ensure all images are operational." msgid "" "The fix for the hmac.compare_digest on python3 requires you to upgrade your " "health managers before updating the amphora image. The health manager is " "compatible with older amphora images, but older controllers will reject the " "health heartbeats from images with this fix." msgstr "" "The fix for the hmac.compare_digest on python3 requires you to upgrade your " "health managers before updating the amphora image. The health manager is " "compatible with older amphora images, but older controllers will reject the " "health heartbeats from images with this fix." msgid "The keepalived improvements require the amphora image to be upgraded." msgstr "The keepalived improvements require the amphora image to be upgraded." msgid "" "The new option `[haproxy_amphora]/connection_logging` will disable logging " "of connection data if set to False which can improve performance of the load " "balancer and might aid compliance." msgstr "" "The new option `[haproxy_amphora]/connection_logging` will disable logging " "of connection data if set to False which can improve performance of the load " "balancer and might aid compliance." msgid "" "The project_id attribute of the POST method on the following objects is now " "deprecated\\: listener, pool, health monitor, and member. These objects will " "use the parent load balancer's project_id. Values passed into the project_id " "on those objects will be ignored until the deprecation cycle has expired, at " "which point they will cause an error." msgstr "" "The project_id attribute of the POST method on the following objects is now " "deprecated\\: listener, pool, health monitor, and member. These objects will " "use the parent load balancer's project_id. Values passed into the project_id " "on those objects will be ignored until the deprecation cycle has expired, at " "which point they will cause an error." msgid "" "The provider driver support requires a database migration and follows " "Octavia standard rolling upgrade procedures; database migration followed by " "rolling control plane upgrades. Existing load balancers with no provider " "specified will be assigned \"amphora\" as part of the database migration." msgstr "" "The provider driver support requires a database migration and follows " "Octavia standard rolling upgrade procedures; database migration followed by " "rolling control plane upgrades. Existing load balancers with no provider " "specified will be assigned \"amphora\" as part of the database migration." msgid "" "The quota objects named `health_monitor` and `load_balancer` have been " "renamed to `healthmonitor` and `loadbalancer`, respectively. The old names " "are deprecated, and will be removed in the T cycle." msgstr "" "The quota objects named `health_monitor` and `load_balancer` have been " "renamed to `healthmonitor` and `loadbalancer`, respectively. The old names " "are deprecated, and will be removed in the T cycle." msgid "The value for all of these fields is expected to be in milliseconds." msgstr "The value for all of these fields is expected to be in milliseconds." msgid "There is now an API available to list enabled provider drivers." msgstr "There is now an API available to list enabled provider drivers." msgid "" "These custom distribution mirror elements for the diskimage-script were " "removed: apt-mirror, centos-mirror, fedora-mirror" msgstr "" "These custom distribution mirror elements for the diskimage-script were " "removed: apt-mirror, centos-mirror, fedora-mirror" msgid "" "This adds a way to configure a custom queue for the event streamer thus " "allowing to post messages to the Neutron queue if needed." msgstr "" "This adds a way to configure a custom queue for the event streamer thus " "allowing to post messages to the Neutron queue if needed." msgid "" "This feature add new configuration value \"auth_strategy\" which by default " "is set for \"noauth\"." msgstr "" "This feature add new configuration value \"auth_strategy\" which by default " "is set for \"noauth\"." msgid "" "To enabled encrypted ramfs storage for certificates and keys, you must " "upgrade your amphora image." msgstr "" "To enabled encrypted ramfs storage for certificates and keys, you must " "upgrade your amphora image." msgid "To fix the admin-state-up bug you must upgrade your amphora image." msgstr "To fix the admin-state-up bug you must upgrade your Amphora image." msgid "" "To support IPv6 a databse migration and amphora image update are required." msgstr "" "To support IPv6 a database migration and Amphora image update are required." msgid "To support networks without DHCP you must upgrade your amphora image." msgstr "To support networks without DHCP you must upgrade your Amphora image." msgid "" "To use CentOS, Fedora, or RHEL in your amphora image you must set the " "user_group option, located in the [haproxy_amphora] section of the octavia." "conf file to \"haproxy\". This will be made automatic in a future version." msgstr "" "To use CentOS, Fedora, or RHEL in your amphora image you must set the " "user_group option, located in the [haproxy_amphora] section of the octavia." "conf file to \"haproxy\". This will be made automatic in a future version." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "" "Two new options are included with provider driver support. The " "enabled_provider_drivers option defaults to \"amphora, octavia\" to support " "existing Octavia load balancers. The default_provider_driver option defaults " "to \"amphora\" for all new load balancers that do not specify a provider at " "creation time. These defaults should cover most existing deployments." msgstr "" "Two new options are included with provider driver support. The " "enabled_provider_drivers option defaults to \"amphora, octavia\" to support " "existing Octavia load balancers. The default_provider_driver option defaults " "to \"amphora\" for all new load balancers that do not specify a provider at " "creation time. These defaults should cover most existing deployments." msgid "" "Two new tables are created to handle Layer 7 switching. These are " "``l7policy`` and ``l7rule``." msgstr "" "Two new tables are created to handle Layer 7 switching. These are " "``l7policy`` and ``l7rule``." msgid "Updates load balancer, listener, and amphora tables." msgstr "Updates load balancer, listener, and Amphora tables." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "Upgrade requires a database migration." msgstr "Upgrade requires a database migration." msgid "" "Users can now use a reference to a single PKCS12 bundle as their " "`default_tls_container_ref` instead of a Barbican container with individual " "secret objects. PKCS12 supports bundling a private key, certificate, and " "intermediates. Private keys can no longer be passphrase protected when using " "PKCS12 bundles. No configuration change is necessary to enable this feature. " "Users may simply begin using this. Any use of the old style containers will " "be detected and automatically fall back to using the old Barbican driver." msgstr "" "Users can now use a reference to a single PKCS12 bundle as their " "`default_tls_container_ref` instead of a Barbican container with individual " "secret objects. PKCS12 supports bundling a private key, certificate, and " "intermediates. Private keys can no longer be passphrase protected when using " "PKCS12 bundles. No configuration change is necessary to enable this feature. " "Users may simply begin using this. Any use of the old style containers will " "be detected and automatically fall back to using the old Barbican driver." msgid "" "When enabled in the configuration file, Octavia will boot an active and " "standby amphora for each load balancer." msgstr "" "When enabled in the configuration file, Octavia will boot an active and " "standby Amphora for each load balancer." msgid "Works for HTTP and TERMINATED_HTTPS listeners." msgstr "Works for HTTP and TERMINATED_HTTPS listeners." msgid "" "You can now provide a certificate revocation list reference for listeners " "using TLS client authentication." msgstr "" "You can now provide a certificate revocation list reference for listeners " "using TLS client authentication." msgid "" "You can now specify a ca_tls_container_ref and crl_container_ref on pools " "for validating backend pool members using TLS." msgstr "" "You can now specify a ca_tls_container_ref and crl_container_ref on pools " "for validating backend pool members using TLS." msgid "" "You can now specify a certificate authority certificate reference, on " "listeners, for use with TLS client authentication." msgstr "" "You can now specify a certificate authority certificate reference, on " "listeners, for use with TLS client authentication." msgid "" "You can now specify a tls_container_ref on pools for TLS client " "authentication to pool members." msgstr "" "You can now specify a tls_container_ref on pools for TLS client " "authentication to pool members." msgid "" "You can now update the running configuration of the Octavia control plane " "processes by sending the parent process a \"HUP\" signal. Note: The " "configuration item must support mutation." msgstr "" "You can now update the running configuration of the Octavia control plane " "processes by sending the parent process a \"HUP\" signal. Note: The " "configuration item must support mutation." msgid "" "You cannot mix IPv4 UDP listeners with IPv6 members at this time. This is " "being tracked with this story https://storyboard.openstack.org/#!/" "story/2003329" msgstr "" "You cannot mix IPv4 UDP listeners with IPv6 members at this time. This is " "being tracked with this story https://storyboard.openstack.org/#!/" "story/2003329" msgid "[DEFAULT] api_handler" msgstr "[DEFAULT] api_handler" msgid "[DEFAULT] auth_strategy" msgstr "[DEFAULT] auth_strategy" msgid "[DEFAULT] bind_host" msgstr "[DEFAULT] bind_host" msgid "[DEFAULT] bind_port" msgstr "[DEFAULT] bind_port" msgid "" "``pools.load_balancer_id`` column is populated from ``listeners`` data using " "ETL in the migration." msgstr "" "``pools.load_balancer_id`` column is populated from ``listeners`` data using " "ETL in the migration." msgid "" "`status_update_threads` config option for healthmanager is deprecated " "because it is replaced as `health_update_threads` and `stats_update_threads`." msgstr "" "`status_update_threads` config option for healthmanager is deprecated " "because it is replaced as `health_update_threads` and `stats_update_threads`." msgid "`timeout_client_data`: Frontend client inactivity timeout" msgstr "`timeout_client_data`: Frontend client inactivity timeout" msgid "`timeout_member_connect`: Backend member connection timeout" msgstr "`timeout_member_connect`: Backend member connection timeout" msgid "`timeout_member_data`: Backend member inactivity timeout" msgstr "`timeout_member_data`: Backend member inactivity timeout" msgid "" "`timeout_tcp_inspect`: Time to wait for TCP packets for content inspection" msgstr "" "`timeout_tcp_inspect`: Time to wait for TCP packets for content inspection" msgid "" "agent_server_network_dir is now auto-detected for Ubuntu, CentOS, Fedora and " "RHEL if one is not specified in the configuration file." msgstr "" "agent_server_network_dir is now auto-detected for Ubuntu, CentOS, Fedora and " "RHEL if one is not specified in the configuration file." msgid "" "amp_image_id option is deprecated and will be removed in one of the next " "releases. Operators are adviced to migrate to the new amp_image_tag option." msgstr "" "amp_image_id option is deprecated and will be removed in one of the next " "releases. Operators are advised to migrate to the new amp_image_tag option." msgid "" "haproxy user_group is no longer being used. it is now auto-detected for " "Ubuntu, CentOS, Fedora and RHEL based amphora images." msgstr "" "HAProxy user_group is no longer being used. it is now auto-detected for " "Ubuntu, CentOS, Fedora and RHEL based Amphora images." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/mitaka.rst0000664000175000017500000000022000000000000021155 0ustar00zuulzuul00000000000000============================== Mitaka Series Release Notes ============================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/newton.rst0000664000175000017500000000023200000000000021224 0ustar00zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000020777 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/pike.rst0000664000175000017500000000021700000000000020645 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/queens.rst0000664000175000017500000000022300000000000021212 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000021037 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/stein.rst0000664000175000017500000000022100000000000021032 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/train.rst0000664000175000017500000000017600000000000021036 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/releasenotes/source/unreleased.rst0000664000175000017500000000016000000000000022041 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/requirements.txt0000664000175000017500000000365700000000000016471 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. alembic>=0.8.10 # MIT cotyledon>=1.3.0 # Apache-2.0 pecan>=1.3.2 # BSD pbr!=2.1.0,>=2.0.0 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT SQLAlchemy-Utils>=0.30.11 Babel!=2.4.0,>=2.3.4 # BSD futurist>=1.2.0 # Apache-2.0 requests>=2.14.2 # Apache-2.0 rfc3986>=0.3.1 # Apache-2.0 keystoneauth1>=3.4.0 # Apache-2.0 keystonemiddleware>=4.17.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 WebOb>=1.8.2 # MIT stevedore>=1.20.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db>=4.27.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=6.3.0 # Apache-2.0 oslo.middleware>=4.0.1 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 oslo.serialization>=2.28.1 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 pyasn1!=0.2.3,>=0.1.8 # BSD pyasn1-modules>=0.0.6 # BSD PyMySQL>=0.7.6 # MIT License python-barbicanclient>=4.5.2 # Apache-2.0 python-glanceclient>=2.8.0 # Apache-2.0 python-novaclient>=9.1.0 # Apache-2.0 python-cinderclient>=3.3.0 # Apache-2.0 pyOpenSSL>=17.1.0 # Apache-2.0 WSME>=0.8.0 # MIT Jinja2>=2.10 # BSD License (3 clause) taskflow>=4.1.0 # Apache-2.0 diskimage-builder>=2.27.0 # Apache-2.0 castellan>=0.16.0 # Apache-2.0 tenacity>=5.0.4 # Apache-2.0 distro>=1.2.0 # Apache-2.0 jsonschema>=2.6.0 # MIT debtcollector>=1.19.0 # Apache-2.0 octavia-lib>=2.0.0 # Apache-2.0 netaddr>=0.7.19 # BSD simplejson>=3.13.2 # MIT setproctitle>=1.1.10 # BSD #for the amphora api Flask!=0.11,>=0.10 # BSD netifaces>=0.10.4 # MIT cryptography>=2.1 # BSD/Apache-2.0 pyroute2>=0.4.21;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) gunicorn>=19.9.0 # MIT Werkzeug>=0.14.1 # BSD License ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4582167 octavia-6.2.2/setup.cfg0000664000175000017500000001234500000000000015020 0ustar00zuulzuul00000000000000[metadata] name = octavia summary = OpenStack Octavia Scalable Load Balancer as a Service description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/octavia/latest/ python-requires = >=3.6 classifier = Development Status :: 5 - Production/Stable Environment :: OpenStack Intended Audience :: Developers Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = octavia data_files = etc/octavia = etc/octavia.conf share/octavia = LICENSE README.rst share/octavia/diskimage-create = diskimage-create/diskimage-create.sh diskimage-create/image-tests.sh diskimage-create/README.rst diskimage-create/requirements.txt diskimage-create/test-requirements.txt diskimage-create/tox.ini diskimage-create/version.txt [entry_points] wsgi_scripts = octavia-wsgi = octavia.api.app:setup_app console_scripts = octavia-api = octavia.cmd.api:main octavia-worker = octavia.cmd.octavia_worker:main octavia-health-manager = octavia.cmd.health_manager:main octavia-housekeeping = octavia.cmd.house_keeping:main octavia-db-manage = octavia.db.migration.cli:main octavia-driver-agent = octavia.cmd.driver_agent:main amphora-agent = octavia.cmd.agent:main haproxy-vrrp-check = octavia.cmd.haproxy_vrrp_check:main octavia-status = octavia.cmd.status:main octavia.api.drivers = noop_driver = octavia.api.drivers.noop_driver.driver:NoopProviderDriver noop_driver-alt = octavia.api.drivers.noop_driver.driver:NoopProviderDriver amphora = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver octavia = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver amphorav2 = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver octavia.amphora.drivers = amphora_noop_driver = octavia.amphorae.drivers.noop_driver.driver:NoopAmphoraLoadBalancerDriver amphora_haproxy_rest_driver = octavia.amphorae.drivers.haproxy.rest_api_driver:HaproxyAmphoraLoadBalancerDriver octavia.amphora.health_update_drivers = health_logger = octavia.controller.healthmanager.health_drivers.update_logging:HealthUpdateLogger health_db = octavia.controller.healthmanager.health_drivers.update_db:UpdateHealthDb octavia.amphora.stats_update_drivers = stats_logger = octavia.controller.healthmanager.health_drivers.update_logging:StatsUpdateLogger stats_db = octavia.controller.healthmanager.health_drivers.update_db:UpdateStatsDb octavia.amphora.udp_api_server = keepalived_lvs = octavia.amphorae.backends.agent.api_server.keepalivedlvs:KeepalivedLvs octavia.compute.drivers = compute_noop_driver = octavia.compute.drivers.noop_driver.driver:NoopComputeDriver compute_nova_driver = octavia.compute.drivers.nova_driver:VirtualMachineManager octavia.driver_agent.provider_agents = noop_agent = octavia.api.drivers.noop_driver.agent:noop_provider_agent octavia.network.drivers = network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver containers_driver = octavia.network.drivers.neutron.containers:ContainersDriver octavia.volume.drivers = volume_noop_driver = octavia.volume.drivers.noop_driver.driver:NoopVolumeDriver volume_cinder_driver = octavia.volume.drivers.cinder_driver:VolumeManager octavia.distributor.drivers = distributor_noop_driver = octavia.distributor.drivers.noop_driver.driver:NoopDistributorDriver single_VIP_amphora = octavia.distributor.drivers.single_VIP_amphora.driver:SingleVIPAmpDistributorDriver octavia.cert_generator = local_cert_generator = octavia.certificates.generator.local:LocalCertGenerator octavia.cert_manager = local_cert_manager = octavia.certificates.manager.local:LocalCertManager barbican_cert_manager = octavia.certificates.manager.barbican:BarbicanCertManager castellan_cert_manager = octavia.certificates.manager.castellan_mgr:CastellanCertManager octavia.barbican_auth = barbican_acl_auth = octavia.certificates.common.auth.barbican_acl:BarbicanACLAuth octavia.plugins = hot_plug_plugin = octavia.controller.worker.v1.controller_worker:ControllerWorker octavia.worker.jobboard_driver = redis_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:RedisTaskFlowDriver zookeeper_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:ZookeeperTaskFlowDriver oslo.config.opts = octavia = octavia.opts:list_opts oslo.policy.policies = octavia = octavia.policies:list_rules oslo.policy.enforcer = octavia = octavia.common.policy:get_no_context_enforcer oslo.middleware.healthcheck = octavia_db_check = octavia.api.healthcheck.healthcheck_plugins:OctaviaDBHealthcheck [compile_catalog] directory = octavia/locale domain = octavia [update_catalog] domain = octavia output_dir = octavia/locale input_file = octavia/locale/octavia.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = octavia/locale/octavia.pot [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/setup.py0000664000175000017500000000200600000000000014702 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/specs/0000775000175000017500000000000000000000000014307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/example.dot0000664000175000017500000000113600000000000016453 0ustar00zuulzuul00000000000000/* This work is licensed under a Creative Commons Attribution 3.0 * Unported License. * * http://creativecommons.org/licenses/by/3.0/legalcode */ digraph G { label="Sample Graph" subgraph cluster_0 { style=filled; color=lightgrey; node [style=filled,color=white]; a0 -> a1 -> a2 -> a3; label = "process #1"; } subgraph cluster_1 { node [style=filled]; b0 -> b1 -> b2 -> b3; label = "process #2"; color=blue } start -> a0; start -> b0; a1 -> b3; b2 -> a3; a3 -> a0; a3 -> end; b3 -> end; start [shape=Mdiamond]; end [shape=Msquare]; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/skeleton.rst0000664000175000017500000000154700000000000016674 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== Title of your blueprint ========================================== Problem description =================== Proposed change =============== Alternatives ------------ Data model impact ----------------- REST API impact --------------- Security impact --------------- Notifications impact -------------------- Other end user impact --------------------- Performance Impact ------------------ Other deployer impact --------------------- Developer impact ---------------- Implementation ============== Assignee(s) ----------- Work Items ---------- Dependencies ============ Testing ======= Documentation Impact ==================== References ========== ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/template.rst0000664000175000017500000003545200000000000016665 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== Example Spec - The title of your blueprint ========================================== Include the URL of your launchpad blueprint: https://blueprints.launchpad.net/octavia/+spec/example Introduction paragraph -- why are we doing anything? A single paragraph of prose that operators can understand. Some notes about using this template: * Your spec should be in ReSTructured text, like this template. * Please wrap text at 80 columns. * The filename in the git repository should match the launchpad URL, for example a URL of: https://blueprints.launchpad.net/octavia/+spec/awesome-thing should be named awesome-thing.rst * Please do not delete any of the sections in this template. If you have nothing to say for a whole section, just write: None * For help with syntax, see http://sphinx-doc.org/rest.html * To test out your formatting, build the docs using tox, or see: http://rst.ninjs.org * If you would like to provide a diagram with your spec, text representations are preferred. http://asciiflow.com/ is a very nice tool to assist with making ascii diagrams. blockdiag is another tool. These are described below. For more complicated diagrams that need "real" graphics, yet still should be in the git revision control system, GraphViz .dot files are acceptable. If you require an image (screenshot) for your BP, attaching that to the BP and checking it in is also accepted. However, text representations are preferred. * Diagram examples asciiflow:: +----------+ +-----------+ +----------+ | A | | B | | C | | +-----+ +--------+ | +----------+ +-----------+ +----------+ blockdiag .. blockdiag:: blockdiag sample { a -> b -> c; } actdiag .. actdiag:: actdiag { write -> convert -> image lane user { label = "User" write [label = "Writing reST"]; image [label = "Get diagram IMAGE"]; } lane actdiag { convert [label = "Convert reST to Image"]; } } nwdiag .. nwdiag:: nwdiag { network dmz { address = "210.x.x.x/24" web01 [address = "210.x.x.1"]; web02 [address = "210.x.x.2"]; } network internal { address = "172.x.x.x/24"; web01 [address = "172.x.x.1"]; web02 [address = "172.x.x.2"]; db01; db02; } } seqdiag .. seqdiag:: seqdiag { browser -> webserver [label = "GET /index.html"]; browser <-- webserver; browser -> webserver [label = "POST /blog/comment"]; webserver -> database [label = "INSERT comment"]; webserver <-- database; browser <-- webserver; } graphviz .. graphviz:: digraph G { label="Sample Graph" subgraph cluster_0 { style=filled; color=lightgrey; node [style=filled,color=white]; a0 -> a1 -> a2 -> a3; label = "process #1"; } subgraph cluster_1 { node [style=filled]; b0 -> b1 -> b2 -> b3; label = "process #2"; color=blue } start -> a0; start -> b0; a1 -> b3; b2 -> a3; a3 -> a0; a3 -> end; b3 -> end; start [shape=Mdiamond]; end [shape=Msquare]; } graphviz (external file) .. graphviz:: example.dot Problem description =================== A detailed description of the problem: * For a new feature this might be use cases. Ensure you are clear about the actors in each use case: End User vs Deployer * For a major reworking of something existing it would describe the problems in that feature that are being addressed. Proposed change =============== Here is where you cover the change you propose to make in detail. How do you propose to solve this problem? If this is one part of a larger effort make it clear where this piece ends. In other words, what's the scope of this effort? Alternatives ------------ What other ways could we do this thing? Why aren't we using those? This doesn't have to be a full literature review, but it should demonstrate that thought has been put into why the proposed solution is an appropriate one. Data model impact ----------------- Changes which require modifications to the data model often have a wider impact on the system. The community often has strong opinions on how the data model should be evolved, from both a functional and performance perspective. It is therefore important to capture and gain agreement as early as possible on any proposed changes to the data model. Questions which need to be addressed by this section include: * What new data objects and/or database schema changes is this going to require? * What database migrations will accompany this change. * How will the initial set of new data objects be generated, for example if you need to take into account existing instances, or modify other existing data describe how that will work. REST API impact --------------- Octavia includes several internal APIs (all of which should be versioned). In defining how API(s) are affected by this change, make sure to clearly indicate which API(s) specifically are being altered, which version of the API(s) are being altered, and other pertinent details as described below. While we are not using Neutron's attribute map facility since Octavia is not Neutron, following the tried-and-true guidelines Neutron uses around API changes is a good idea, including defining attribute map tables. For reference: For each API resource to be implemented using Neutron's attribute map facility (see the neutron.api.v2.attributes), describe the resource collection and specify the name, type, and other essential details of each new or modified attribute. A table similar to the following may be used: +----------+-------+---------+---------+------------+--------------+ |Attribute |Type |Access |Default |Validation/ |Description | |Name | | |Value |Conversion | | +==========+=======+=========+=========+============+==============+ |id |string |RO, all |generated|N/A |identity | | |(UUID) | | | | | +----------+-------+---------+---------+------------+--------------+ |name |string |RW, all |'' |string |human-readable| | | | | | |name | +----------+-------+---------+---------+------------+--------------+ |color |string |RW, admin|'red' |'red', |color | | | | | |'yellow', or|indicating | | | | | |'green' |state | +----------+-------+---------+---------+------------+--------------+ Here is the other example of the table using csv-table .. csv-table:: CSVTable :header: Attribute Name,Type,Access,Default Value,Validation Conversion,Description id,string (UUID),"RO, all",generated,N/A,identity name,string,"RW, all","''",string,human-readable name color,string,"RW, admin",red,"'red', 'yellow' or 'green'",color indicating state Each API method which is either added or changed that does not use Neutron's attribute map facility should have the following: * Specification for the method * A description of what the method does suitable for use in user documentation * Method type (POST/PUT/GET/DELETE) * Normal http response code(s) * Expected error http response code(s) * A description for each possible error code should be included describing semantic errors which can cause it such as inconsistent parameters supplied to the method, or when an instance is not in an appropriate state for the request to succeed. Errors caused by syntactic problems covered by the JSON schema definition do not need to be included. * URL for the resource * Parameters which can be passed via the url * JSON schema definition for the body data if allowed * JSON schema definition for the response data if any * Example use case including typical API samples for both data supplied by the caller and the response * Discuss any API policy changes, and discuss what things a deployer needs to think about when defining their API policy. This is in reference to the policy.json file. Note that the schema should be defined as restrictively as possible. Parameters which are required should be marked as such and only under exceptional circumstances should additional parameters which are not defined in the schema be permitted (eg additionalProperties should be False). Reuse of existing predefined parameter types such as regexps for passwords and user defined names is highly encouraged. Security impact --------------- Describe any potential security impact on the system. Some of the items to consider include: * Does this change touch sensitive data such as tokens, keys, or user data? * Does this change alter the API in a way that may impact security, such as a new way to access sensitive information or a new way to login? * Does this change involve cryptography or hashing? * Does this change require the use of sudo or any elevated privileges? * Does this change involve using or parsing user-provided data? This could be directly at the API level or indirectly such as changes to a cache layer. * Can this change enable a resource exhaustion attack, such as allowing a single API interaction to consume significant server resources? Some examples of this include launching subprocesses for each connection, or entity expansion attacks in XML. For more detailed guidance, please see the OpenStack Security Guidelines as a reference (https://wiki.openstack.org/wiki/Security/Guidelines). These guidelines are a work in progress and are designed to help you identify security best practices. For further information, feel free to reach out to the OpenStack Security Group at openstack-security@lists.openstack.org. Notifications impact -------------------- Please specify any changes to notifications. Be that an extra notification, changes to an existing notification, or removing a notification. Other end user impact --------------------- Aside from the API, are there other ways a user will interact with this feature? Keep in mind that 'user' in this context could mean either tenant or operator. * Does this change have an impact on python-neutronclient? What does the user interface there look like? Performance Impact ------------------ Describe any potential performance impact on the system, for example how often will new code be called, and is there a major change to the calling pattern of existing code. Examples of things to consider here include: * A periodic task might look like a small addition but if it calls conductor or another service the load is multiplied by the number of nodes in the system. * A small change in a utility function or a commonly used decorator can have a large impacts on performance. * Calls which result in a database queries (whether direct or via conductor) can have a profound impact on performance when called in critical sections of the code. * Will the change include any locking, and if so what considerations are there on holding the lock? Other deployer impact --------------------- Discuss things that will affect how you deploy and configure OpenStack that have not already been mentioned, such as: * What config options are being added? Should they be more generic than proposed (for example a flag that other hypervisor drivers might want to implement as well)? Are the default values ones which will work well in real deployments? * Is this a change that takes immediate effect after its merged, or is it something that has to be explicitly enabled? * If this change is a new binary, how would it be deployed? * Please state anything that those doing continuous deployment, or those upgrading from the previous release, need to be aware of. Also describe any plans to deprecate configuration values or features. For example, if we change the directory name that instances are stored in, how do we handle instance directories created before the change landed? Do we move them? Do we have a special case in the code? Do we assume that the operator will recreate all the instances in their cloud? Developer impact ---------------- Discuss things that will affect other developers working on OpenStack, such as: * If the blueprint proposes a change to the API, discussion of how other plugins would implement the feature is required. Implementation ============== Assignee(s) ----------- Who is leading the writing of the code? Or is this a blueprint where you're throwing it out there to see who picks it up? If more than one person is working on the implementation, please designate the primary author and contact. Primary assignee: Other contributors: Work Items ---------- Work items or tasks -- break the feature up into the things that need to be done to implement it. Those parts might end up being done by different people, but we're mostly trying to understand the timeline for implementation. Dependencies ============ * Include specific references to specs and/or blueprints in octavia, or in other projects, that this one either depends on or is related to. * If this requires functionality of another project that is not currently used by Octavia document that fact. * Does this feature require any new library dependencies or code otherwise not included in OpenStack? Or does it depend on a specific version of library? Testing ======= Please discuss how the change will be tested. We especially want to know what tempest tests will be added. It is assumed that unit test coverage will be added so that doesn't need to be mentioned explicitly, but discussion of why you think unit tests are sufficient and we don't need to add more tempest tests would need to be included. Is this untestable in gate given current limitations (specific hardware / software configurations available)? If so, are there mitigation plans (3rd party testing, gate enhancements, etc). Documentation Impact ==================== What is the impact on the docs team of this change? Some changes might require donating resources to the docs team to have the documentation updated. Don't repeat details discussed above, but please reference them here. References ========== Please add any useful references here. You are not required to have any reference. Moreover, this specification should still make sense when your references are unavailable. Examples of what you could include are: * Links to mailing list or IRC discussions * Links to notes from a summit session * Links to relevant research, if appropriate * Related specifications as appropriate (e.g. link any vendor documentation) * Anything else you feel it is worthwhile to refer to ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/specs/version0.5/0000775000175000017500000000000000000000000016217 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/amphora-driver-interface.rst0000775000175000017500000002103600000000000023634 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== Amphora Driver Interface ========================================== https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface This blueprint describes how a driver will interface with the controller. It will describe the base class and other classes required. It will not describe the REST interface needed to talk to an amphora nor how health information or statistics are gathered from the amphora. Problem description =================== The controller needs to talk through a driver to the amphora to allow for custom APIs and custom rendering of configuration data for different amphora implementations. The controller will heavily utilize taskflow [2] to accomplish its goals so it is highly encouraged for drivers to use taskflow to organize their work, too. Proposed change =============== Establish a base class to model the desire functionality: .. code:: python class AmphoraLoadBalancerDriver(object): def update(self, listener, vip): """updates the amphora with a new configuration for the listener on the vip. """ raise NotImplementedError def stop(self, listener, vip): """stops the listener on the vip.""" return None def start(self, listener, vip): """starts the listener on the vip.""" return None def delete(self, listener, vip): """deletes the listener on the vip.""" raise NotImplementedError def get_info(self, amphora): """Get detailed information about an amphora returns information about the amphora, e.g. {"Rest Interface": "1.0", "Amphorae": "1.0", "packages":{"ha proxy":"1.5"}, "network-interfaces": {"eth0":{"ip":...}} some information might come from querying the amphora """ raise NotImplementedError def get_diagnostics(self, amphora): """OPTIONAL - Run diagnostics run some expensive self tests to determine if the amphora and the lbs are healthy the idea is that those tests are triggered more infrequent than the heartbeat """ raise NotImplementedError def finalize_amphora(self, amphora): """OPTIONAL - called once an amphora has been build but before any listeners are configured. This is a hook for drivers who need to do additional work before am amphora becomes ready to accept listeners. Please keep in mind that amphora might be kept in am offline pool after this call. """ pass def post_network_plug(self, amphora, port): """OPTIONAL - called after adding a compute instance to a network. This will perform any necessary actions to allow for connectivity for that network on that instance. port is an instance of octavia.network.data_models.Port. It contains information about the port, subnet, and network that was just plugged. """ def post_vip_plug(self, load_balancer, amphorae_network_config): """OPTIONAL - called after plug_vip method of the network driver. This is to do any additional work needed on the amphorae to plug the vip, such as bring up interfaces. amphorae_network_config is a dictionary of objects that include network specific information about each amphora's connections. """ def start_health_check(self, health_mixin): """start check health :param health_mixin: health mixin object :type amphora: object Start listener process and calls HealthMixin to update databases information. """ pass def stop_health_check(self): """stop check health Stop listener process and calls HealthMixin to update databases information. """ pass The referenced listener is a listener object and vip a vip as described in our model. The model is detached from the DB so the driver can't write to the DB. Because our initial goal is to render a whole config no special methods for adding nodes, health monitors, etc. are supported at this juncture. This might be added in later versions. No method for obtaining logs has been added. This will be done in a future blueprint. Exception Model --------------- The driver is expected to raise the following well defined exceptions * NotImplementedError - this functionality is not implemented/not supported * AmphoraDriverError - a super class for all other exceptions and the catch all if no specific exception can be found * NotFoundError - this amphora couldn't be found/ was deleted by nova * InfoException - gathering information about this amphora failed * NetworkConfigException - gathering network information failed * UnauthorizedException - the driver can't access the amphora * TimeOutException - contacting the amphora timed out * UnavailableException - the amphora is temporary unavailable * SuspendFaied - this load balancer couldn't be suspended * EnableFailed - this load balancer couldn't be enabled * DeleteFailed - this load balancer couldn't be deleted * ProvisioningErrors - those are errors which happen during provisioning * ListenerProvisioningError - could not provision Listener * LoadBalancerProvisoningError - could not provision LoadBalancer * HealthMonitorProvisioningError - could not provision HealthMonitor * NodeProvisioningError - could not provision Node Health and Stat Mixin --------------------- It has been suggested to gather health and statistic information via UDP packets emitted from the amphora. This requires each driver to spin up a thread to listen on a UDP port and then hand the information to the controller as a mixin to make sense of it. Here is the mixin definition: .. code:: python class HealthMixIn(object): def update_health(health): #map: {"amphora-status":HEALTHY, loadbalancers: {"loadbalancer-id": {"loadbalancer-status": HEALTHY, # "listeners":{"listener-id":{"listener-status":HEALTHY, "nodes":{"node-id":HEALTHY, ...}}, ...}, ...}} # only items whose health has changed need to be submitted # awesome update code pass class StatsMixIn(object): def update_stats(stats): #uses map {"loadbalancer-id":{"listener-id": {"bytes-in": 123, "bytes_out":123, "active_connections":123, # "total_connections", 123}, ...} # elements are named to keep it extensible for future versions #awesome update code and code to send to ceilometer pass Things a good driver should do: ------------------------------- * Non blocking IO - throw an appropriate exception instead to wait forever; use timeouts on sockets * We might employ a circuit breaker to insulate driver problems from controller problems [1] * Use appropriate logging * Use the preferred threading model This will be demonstrated in the Noop-driver code. Alternatives ------------ Require all amphora to implement a common REST interface and use that as the integration point. Data model impact ----------------- None REST API impact --------------- None Security impact --------------- None Notifications impact -------------------- None - since initial version Other end user impact --------------------- None Performance Impact ------------------ Minimal Other deployer impact --------------------- Deployers need to make sure to bundle the compatible versions of amphora, driver, controller -- Developer impact ---------------- Need to write towards this clean interface. Implementation ============== Assignee(s) ----------- German Eichberger Work Items ---------- * Write abstract interface * Write Noop driver * Write tests Dependencies ============ None Testing ======= * Unit tests with tox and Noop-Driver * tempest tests with Noop-Driver Documentation Impact ==================== None - we won't document the interface for 0.5. If that changes we need to write an interface documentation so 3rd party drivers know what we expect. References ========== [1] https://martinfowler.com/bliki/CircuitBreaker.html [2] https://docs.openstack.org/taskflow/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/amphora-manager-interface.rst0000775000175000017500000001313000000000000023747 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== Compute Driver Interface ========================================== https://blueprints.launchpad.net/octavia/+spec/compute-driver-interface This blueprint describes how a driver will interface with Nova to manage the creation and deletion of amphora instances. It will describe the base class and other classes required to create, delete, manage the execution state, and query the status of amphorae. Problem description =================== The controller needs to be able to create, delete, and monitor the status of amphora instances. The amphorae may be virtual machines, containers, bare-metal servers, or dedicated hardware load balancers. This interface should hide the implementation details of the amphorae from the caller to the maximum extent possible. This interface will provide means to specify: - type (VM, Container, bare metal) - flavor (provides means to specify memory and storage capacity) - what else? Proposed change =============== Establish an abstract base class to model the desired functionality: .. code:: python class AmphoraComputeDriver(object): def build(self, amphora_type = VM, amphora_flavor = None, image_id = None, keys = None, sec_groups = None, network_ids = None,config_drive_files = None,user_data=None): """ build a new amphora. :param amphora_type: The type of amphora to create. For version 0.5, only VM is supported. In the future this may support Container, BareMetal, and HWLoadBalancer. :param amphora_flavor: Optionally specify a flavor. The interpretation of this parameter will depend upon the amphora type and may not be applicable to all types. :param image_id: ID of the base image for a VM amphora :param keys: Optionally specify a list of ssh public keys :param sec_groups: Optionally specify list of security groups :param network_ids: A list of network_ids to attach to the amphora :config_drive_files: A dict of files to overwrite on the server upon boot. Keys are file names (i.e. /etc/passwd) and values are the file contents (either as a string or as a file-like object). A maximum of five entries is allowed, and each file must be 10k or less. :param user_data: user data to pass to be exposed by the metadata server this can be a file type object as well or a string :returns: The id of the new instance. """ raise NotImplementedError def delete(self, amphora_id): """ delete the specified amphora """ raise NotImplementedError def status(self, amphora_id): """ Check whether the specified amphora is up :param amphora_id: the ID of the desired amphora :returns: the nova response from the amphora """ raise NotImplementedError def get_amphora(self, amphora_name = None, amphora_id = None): """ Try to find an amphora given its name or id :param amphora_name: the name of the desired amphora :param amphora_id: the id of the desired amphora :returns: the amphora object """ raise NotImplementedError Exception Model --------------- The driver is expected to raise the following well defined exceptions: * NotImplementedError - this functionality is not implemented/not supported * AmphoraComputeError - a super class for all other exceptions and the catch all if no specific exception can be found * AmphoraBuildError - An amphora of the specified type could not be built * DeleteFailed - this amphora couldn't be deleted * InstanceNotFoundError - an instance matching the desired criteria could not be found * NotSuspendedError - resume() attempted on an instance that was not suspended Things a good driver should do: ------------------------------- * Non blocking operations - If an operation will take a long time to execute, perform it asynchronously. The definition of "a long time" is open to interpretation, but a common UX guideline is 200 ms * We might employ a circuit breaker to insulate driver problems from controller problems [1] * Use appropriate logging * Use the preferred threading model This will be demonstrated in the Noop-driver code. Alternatives ------------ Data model impact ----------------- None REST API impact --------------- None Security impact --------------- None Notifications impact -------------------- None - since initial version Other end user impact --------------------- None Performance Impact ------------------ Minimal Other deployer impact --------------------- Deployers need to make sure to bundle the compatible versions of amphora, driver, controller -- Developer impact ---------------- Need to write towards this clean interface. Implementation ============== Assignee(s) ----------- Al Miller Work Items ---------- * Write abstract interface * Write Noop driver * Write tests Dependencies ============ None Testing ======= * Unit tests with tox and Noop-Driver * tempest tests with Noop-Driver Documentation Impact ==================== None - this is an internal interface and need not be externally documented. References ========== [1] http://martinfowler.com/bliki/CircuitBreaker.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/base-image.rst0000664000175000017500000001265200000000000020751 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== Octavia Base Image ========================================== Launchpad blueprint: https://blueprints.launchpad.net/octavia/+spec/base-image Octavia is an operator-grade reference implementation for Load Balancing as a Service (LBaaS) for OpenStack. The component of Octavia that does the load balancing is known as amphora. Amphora may be a virtual machine, may be a container, or may run on bare metal. Creating images for bare metal amphora installs is outside the scope of this 0.5 specification but may be added in a future release. Amphora will need a base image that can be deployed by Octavia to provide load balancing. Problem description =================== Octavia needs a method for generating base images to be deployed as load balancing entities. Proposed change =============== Leverage the OpenStack diskimage-builder project [1] tools to provide a script that builds qcow2 images or a tar file suitable for use in creating containers. This script will be modeled after the OpenStack Sahara [2] project's diskimage-create.sh script. This script and associated elements will build Amphora images. Initial support with be with an Ubuntu OS and HAProxy. The script will be able to use Fedora or CentOS as a base OS but these will not initially be tested or supported. As the project progresses and/or the diskimage-builder project adds support for additional base OS options they may become available for Amphora images. This does not mean that they are necessarily supported or tested. The script will use environment variables to customize the build beyond the Octavia project defaults, such as adding elements. The initial supported and tested image will be created using the diskimage-create.sh defaults (no command line parameters or environment variables set). As the project progresses we may add additional supported configurations. Command syntax: .. line-block:: $ diskimage-create.sh [-a i386 | **amd64** | armhf ] [-b **haproxy** ] [-c **~/.cache/image-create** | ] [-h] [-i **ubuntu** | fedora | centos ] [-o **amphora-x64-haproxy** | ] [-r ] [-s **5** | ] [-t **qcow2** | tar ] [-w ] '-a' is the architecture type for the image (default: amd64) '-b' is the backend type (default: haproxy) '-c' is the path to the cache directory (default: ~/.cache/image-create) '-h' display help message '-i' is the base OS (default: ubuntu) '-o' is the output image file name '-r' enable the root account in the generated image (default: disabled) '-s' is the image size to produce in gigabytes (default: 5) '-t' is the image type (default: qcow2) '-w' working directory for image building (default: .) .. line-block:: Environment variables supported by the script: DIB_DISTRIBUTION_MIRROR - URL to a mirror for the base OS selected (-i). DIB_REPO_PATH - Path to the diskimage-builder repository (default: ../../diskimage-builder) ELEMENTS_REPO_PATH - Path to the /tripleo-image-elements repository (default: ../../tripleo-image-elements) DIB_ELEMENTS - Override the elements used to build the image DIB_LOCAL_ELEMENTS - Elements to add to the build (requires DIB_LOCAL_ELEMENTS_PATH be specified) DIB_LOCAL_ELEMENTS_PATH - Path to the local elements directory .. topic:: Container support The Docker command line required to import a tar file created with this script is [3]: .. code:: bash $ docker import - image:amphora-x64-haproxy < amphora-x64-haproxy.tar Alternatives ------------ Deployers can manually create an image or container, but they would need to make sure the required components are included. Data model impact ----------------- None REST API impact --------------- None Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- This script will make creating an Octavia Amphora image or container simple. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Michael Johnson Work Items ---------- 1. Write diskimage-create.sh script based on Sahara project's script. 2. Identify the list of packages required for Octavia Amphora. 3. Create required elements not provided by the diskimage-builder project. 4. Create unit tests Dependencies ============ This script will depend on the OpenStack diskimage-builder project. Testing ======= Initial testing will be completed using the default settings for the diskimage-create.sh tool. * Unit tests with tox * Validate that the image is the correct size and mounts via loopback * Check that a valid kernel is installed * Check that HAProxy and all required packages are installed * tempest tests Documentation Impact ==================== References ========== .. line-block:: [1] https://github.com/openstack/diskimage-builder [2] https://github.com/openstack/sahara-image-elements [3] https://github.com/openstack/diskimage-builder/blob/master/docs/docker.md ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/component-design.rst0000664000175000017500000000452500000000000022230 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============================================= Octavia v0.5 master component design document ============================================= Problem description =================== We need to define the various components that will make up Octavia v0.5. Proposed change =============== This is the first functional release of Octavia, incorporating a scalable service delivery layer, but not yet concerned with a scalable command and control layer. See doc/source/design/version0.5 for a detailed description of the v0.5 component design. Alternatives ------------ We're open to suggestions, but note that later designs already discussed on the mailing list will incorporate several features of this design. Data model impact ----------------- Octavia 0.5 introduces the main data model which will also be used in subsequent releases. REST API impact --------------- None Security impact --------------- The only sensitive data used in Octavia 0.5 are the TLS private keys used with TERMINATED_HTTPS functionality. However, the back-end storage aspect of these secrets will be handled by Barbican. Octavia amphorae will also need to keep copies of these secrets locally in order to facilitate seamless service restarts. These local stores should be made on a memory filesystem. Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- Operator API and UI may need to be changed as a result of this specification. Developer impact ---------------- None beyond implementing the spec. :) Implementation ============== Assignee(s) ----------- Lots of us will be working on this! Work Items ---------- Again, lots of things to be done here. Dependencies ============ Barbican Testing ======= A lot of new tests will need to be written to test the separate components, their interfaces, and likely failure scenarios. Documentation Impact ==================== This specification largely defines the documentation of the component design. Component design is becoming a part of the project standard documentation. References ========== Mailing list discussion of similar designs earlier this year ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/controller-worker.rst0000664000175000017500000002026500000000000022450 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================== Controller Worker (deploy-worker) ================================== Launchpad blueprint: https://blueprints.launchpad.net/octavia/+spec/controller-worker Octavia is an operator-grade reference implementation for Load Balancing as a Service (LBaaS) for OpenStack. The component of Octavia that does the load balancing is known as Amphora. The component of Octavia that provides command and control of the Amphora is the Octavia controller. Problem description =================== Components of the Octavia controller require a shared library that provides the orchestration of create/update/delete actions for Octavia objects such as load balancers and listeners. It is expected that this library will be used by the Queue Consumer to service API requests, by the Housekeeping Manager to manage the spare Amphora pool, and by the Health Manager to fail over failed objects. Proposed change =============== The Controller Worker will be implemented as a class that provides methods to facilitate the create/update/delete actions. This class will be responsible for managing the number of simultaneous operations being executed by coordinating through the Octavia database. The Controller Worker will provide a base class that sets up and initializes the TaskFlow engines required to complete the action. Users of the library will then call the appropriate method for the action. These methods setup and launch the appropriate flow. Each flow will be contained in a separate class for code reuse and supportability. The Controller Worker library will provide the following methods: .. code:: python def create_amphora(self): """Creates an Amphora. :returns: amphora_id """ raise NotImplementedError def delete_amphora(self, amphora_id): """Deletes an existing Amphora. :param amphora_id: ID of the amphora to delete :returns: None :raises AmphoraNotFound: The referenced Amphora was not found """ raise NotImplementedError def create_load_balancer(self, load_balancer_id): """Creates a load balancer by allocating Amphorae. :param load_balancer_id: ID of the load balancer to create :returns: None :raises NoSuitableAmphora: Unable to allocate an Amphora. """ raise NotImplementedError def update_load_balancer(self, load_balancer_id, load_balancer_updates): """Updates a load balancer. :param load_balancer_id: ID of the load balancer to update :param load_balancer_updates: Dict containing updated load balancer attributes :returns: None :raises LBNotFound: The referenced load balancer was not found """ raise NotImplementedError def delete_load_balancer(self, load_balancer_id): """Deletes a load balancer by de-allocating Amphorae. :param load_balancer_id: ID of the load balancer to delete :returns: None :raises LBNotFound: The referenced load balancer was not found """ raise NotImplementedError def create_listener(self, listener_id): """Creates a listener. :param listener_id: ID of the listener to create :returns: None :raises NoSuitableLB: Unable to find the load balancer """ raise NotImplementedError def update_listener(self, listener_id, listener_updates): """Updates a listener. :param listener_id: ID of the listener to update :param listener_updates: Dict containing updated listener attributes :returns: None :raises ListenerNotFound: The referenced listener was not found """ raise NotImplementedError def delete_listener(self, listener_id): """Deletes a listener. :param listener_id: ID of the listener to delete :returns: None :raises ListenerNotFound: The referenced listener was not found """ raise NotImplementedError def create_pool(self, pool_id): """Creates a node pool. :param pool_id: ID of the pool to create :returns: None :raises NoSuitableLB: Unable to find the load balancer """ raise NotImplementedError def update_pool(self, pool_id, pool_updates): """Updates a node pool. :param pool_id: ID of the pool to update :param pool_updates: Dict containing updated pool attributes :returns: None :raises PoolNotFound: The referenced pool was not found """ raise NotImplementedError def delete_pool(self, pool_id): """Deletes a node pool. :param pool_id: ID of the pool to delete :returns: None :raises PoolNotFound: The referenced pool was not found """ raise NotImplementedError def create_health_monitor(self, health_monitor_id): """Creates a health monitor. :param health_monitor_id: ID of the health monitor to create :returns: None :raises NoSuitablePool: Unable to find the node pool """ raise NotImplementedError def update_health_monitor(self, health_monitor_id, health_monitor_updates): """Updates a health monitor. :param health_monitor_id: ID of the health monitor to update :param health_monitor_updates: Dict containing updated health monitor attributes :returns: None :raises HMNotFound: The referenced health monitor was not found """ raise NotImplementedError def delete_health_monitor(self, health_monitor_id): """Deletes a health monitor. :param health_monitor_id: ID of the health monitor to delete :returns: None :raises HMNotFound: The referenced health monitor was not found """ raise NotImplementedError def create_member(self, member_id): """Creates a pool member. :param member_id: ID of the member to create :returns: None :raises NoSuitablePool: Unable to find the node pool """ raise NotImplementedError def update_member(self, member_id, member_updates): """Updates a pool member. :param member_id: ID of the member to update :param member_updates: Dict containing updated member attributes :returns: None :raises MemberNotFound: The referenced member was not found """ raise NotImplementedError def delete_member(self, member_id): """Deletes a pool member. :param member_id: ID of the member to delete :returns: None :raises MemberNotFound: The referenced member was not found """ raise NotImplementedError def failover_amphora(self, amphora_id): """Failover an amphora :param amp_id: ID of the amphora to fail over :returns: None :raises AmphoraNotFound: The referenced Amphora was not found """ raise NotImplementedError Alternatives ------------ This code could be included in the Queue Consumer component of the controller. However this would not allow the library to be shared with other components of the controller, such as the Health Manager Data model impact ----------------- REST API impact --------------- None Security impact --------------- Notifications impact -------------------- Other end user impact --------------------- Performance Impact ------------------ Other deployer impact --------------------- Developer impact ---------------- Implementation ============== Assignee(s) ----------- Michael Johnson Work Items ---------- Dependencies ============ https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface https://blueprints.launchpad.net/octavia/+spec/neutron-network-driver https://blueprints.launchpad.net/octavia/+spec/nova-compute-driver Testing ======= Unit tests Documentation Impact ==================== None References ========== https://blueprints.launchpad.net/octavia/+spec/health-manager https://blueprints.launchpad.net/octavia/+spec/housekeeping-manager https://blueprints.launchpad.net/octavia/+spec/queue-consumer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/controller.dot0000664000175000017500000000603000000000000021111 0ustar00zuulzuul00000000000000/* * Copyright 2014 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ digraph G { subgraph cluster0 { style=filled; color=gray75; label = "Controller"; queue [label="Queue\nConsumer", fontcolor=white, color=forestgreen, style=filled]; health [label="Health\nManager", fontcolor=white, color=forestgreen, style=filled]; house [label="Housekeeping\n(Spares/Cleanup)\nManager", fontcolor=white, color=forestgreen, style=filled]; ctrl [label="Controller\nWorker", fontcolor=white, color=forestgreen, style=filled, shape=hexagon]; proxy [label="Services\nProxy", fontcolor=white, color=forestgreen, style=filled]; subgraph cluster1 { style=filled; color=gray90; fontcolor=black; label = "Amphora Driver"; msg [label="Message\nHandler", fontcolor=white, color=forestgreen, style=filled]; config [label="Config\nHandler", fontcolor=white, color=forestgreen, style=filled]; stats [label="Stats\nHandler", fontcolor=white, color=forestgreen, style=filled]; log [label="Log\nHandler", fontcolor=black, color=forestgreen, style=dashed]; } health -> msg; } db [label="Database", fontcolor=white, color=dodgerblue, style=filled]; api [label="APIs", fontcolor=white, color=forestgreen, style=filled]; oslo [label="Oslo\nMessaging", fontcolor=white, color=dodgerblue, style=filled]; nova [label="Nova", fontcolor=white, color=dodgerblue, style=filled]; neutron [label="Neutron", fontcolor=white, color=dodgerblue, style=filled]; cert [label="Certificate\nLibrary", fontcolor=white, color=dodgerblue, style=filled]; bbq [label="Barbican", fontcolor=white, color=dodgerblue, style=filled]; swift [label="SWIFT", fontcolor=white, color=dodgerblue, style=filled]; ceilo [label="Ceilometer", fontcolor=white, color=dodgerblue, style=filled]; amp [label="Amphorae", fontcolor=black, color=coral2, style=filled]; ctrl -> queue [dir="both"]; db -> api -> oslo -> queue [dir="both"]; db -> ctrl [dir="both"]; db -> queue [dir="both"]; db -> health [dir="both"]; db -> house [dir="both"]; db -> msg [dir="both"]; nova -> ctrl [dir="both"]; nova -> house [dir="both"]; neutron -> ctrl [dir="both"]; neutron -> house [dir="both"]; proxy -> swift [dir="both"]; proxy -> amp [dir="both"]; cert -> ctrl [dir="both"]; cert -> bbq [dir="both"]; stats -> ceilo [dir="both"]; msg -> amp [ltail=cluster1]; msg -> amp [ltail=cluster1]; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/controller.rst0000664000175000017500000002051400000000000021136 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================== Octavia Controller ================== Launchpad blueprint: https://blueprints.launchpad.net/octavia/+spec/controller Octavia is an operator-grade reference implementation for Load Balancing as a Service (LBaaS) for OpenStack. The component of Octavia that does the load balancing is known as Amphora. The component of Octavia that provides command and control of the Amphora is the Octavia controller. Problem description =================== Octavia requires a controller component that provides the following capabilities: * Processing Amphora configuration updates and making them available to the Amphora driver * Providing certificate information to the Amphora driver * Deploying Amphora instances * Managing the Amphora spares pool * Cleaning up Amphora instances that are no longer needed * Monitoring the health of Amphora instances * Processing alerts and messages from the Amphora (example "member down") * Respecting colocation / apolocation / flavor requirements of the Amphora * Processing statistical data from the Amphora including communicating with metering services, such as Ceilometer (https://blueprints.launchpad.net/ceilometer/+spec/ceilometer-meter-lbaas) * Responding to API requests sent by the API processes * Proxy Amphora data to other OpenStack services such as Swift for log file archival Proposed change =============== The Octavia controller will consist of the following components: * Amphora Driver * Queue Consumer * Certificate Library * Compute Driver * Controller Worker * Health Manager * Housekeeping Manager * Network Driver * Services Proxy .. graphviz:: controller.dot The manager and proxy components should be implemented as independent processes to provide a level of autonomy to these controller functions. The highly available database will provide the persistent "brain" for the Octavia controller. Octavia controller processes will share state and information about the Amphora, load balancers, and listeners via the database. It is expected that the Octavia controller and Amphora driver will directly interact with the database but the Amphorae will never directly access the database. By using a highly available database, Octavia controllers themselves do not directly keep any stateful information on Amphorae. Because of this, Amphorae are not assigned to any specific controller. Any controller is able to service monitoring, heartbeat, API, and other requests coming to or from Amphorae. **Amphora Driver** The Amphora driver abstracts the backend implementation of an Amphora. The controller will interact with Amphora via the Amphora driver. This interface is defined in the amphora-driver-interface specification. **Queue Consumer** The Queue Consumer is event driven and tasked with servicing requests from the API components via an Oslo messaging. It is also the primary lifecycle management component for Amphora. To service requests the Queue Consumer will spawn a Controller Worker process. Spawning a separate process makes sure that the Queue Consumer can continue to service API requests while the longer running deployment process is progressing. Messages received via Oslo messaging will include the load balancer ID, requested action, and configuration update data. Passing the configuration update data via Oslo messaging allows the deploy worker to rollback to a "last known good" configuration should there be a problem with the configuration update. The spawned worker will use this information to access the Octavia database to gather any additional details that may be required to complete the requested action. **Compute Driver** The Compute Driver abstracts the implementation of instantiating the virtual machine, container, appliance, or device that the Amphora will run in. **Controller Worker** The Controller Worker is spawned from the Queue Consumer or the Health Manager. It interfaces with the compute driver (in some deployment scenarios), network driver, and Amphora driver to activate Amphora instances, load balancers, and listeners. When a request for a new instance or failover is received the Controller Worker will have responsibility for connecting the appropriate networking ports to the Amphora via the network driver and triggering a configuration push via the Amphora driver. This will include validating that the targeted Amphora has the required networks plumbed to the Amphora. The Amphora configured by the Controller Worker may be an existing Amphora instance, a new Amphora from the spares pool, or a newly created Amphora. This determination will be made based on the apolocation requirements of the load balancer, the load balancer count on the existing Amphora, and the availability of ready spare Amphora in the spares pool. The Controller Worker will be responsible for passing in the required metadata via config drive when deploying an Amphora. This metadata will include: a list of controller IP addresses, controller certificate authority certificate, and the Amphora certificate and key file. The main flow of the Controller Worker is described in the amphora-lifecycle-management specification as the Activate Amphora sequence. **Certificate Library** The Certificate Library provides an abstraction for workers to access security data stored in OpenStack Barbican from the Amphora Driver. It will provide a short term (1 minute) cache of the security contents to facilitate the efficient startup of a large number of listeners sharing security content. **Health Manager** The Health Manager is tasked with checking for missing or unhealthy Amphora stored in the highly available database. The amphora-lifecycle-management specification details the health monitoring sequence. The health monitor will have a separate thread that checks these timestamps on a configurable interval to see if the Amphora has not provided a heartbeat in the required amount of time which is another configurable setting. Should a Amphora fail to report a heartbeat in the configured interval the Health Manager will initiate a failover of the Amphora by spawning a deploy worker and will update the status of the listener in the database. The Health Manager will have to be aware of the load balancer associated with the failed listener to decide if it needs to fail over additional listeners to migrate the failed listener to a new Amphora. **Housekeeping Manager** The Housekeeping Manager will manage the spare Amphora pool and the teardown of Amphora that are no longer needed. On a configurable interval the Housekeeping Manager will check the Octavia database to identify the required cleanup and maintenance actions. The amphora-lifecycle-management specification details the Create, Spare, and Delete Amphora sequences the Housekeeping Manager will follow. The operator can specify a number of Amphora instances to be held in a spares pool. Building Amphora instances can take a long time so the Housekeeping Manager will spawn threads to manage the number of Amphorae in the spares pool. The Housekeeping Manager will interface with the compute driver, network driver, and the Certificate Manager to accomplish the create and delete actions. **Network Driver** The Network Driver abstracts the implementation of connecting an Amphora to the required networks. **Services Proxy** The Services Proxy enables Amphora to reach other cloud services directly over the Load Balancer Network where the controller may need to provide authentication tokens on behalf of the Amphora, such as when archiving load balancer traffic logs into customer swift containers. Alternatives ------------ Data model impact ----------------- REST API impact --------------- Security impact --------------- Notifications impact -------------------- Other end user impact --------------------- Performance Impact ------------------ Other deployer impact --------------------- Developer impact ---------------- Implementation ============== Assignee(s) ----------- Michael Johnson Work Items ---------- Dependencies ============ Testing ======= Documentation Impact ==================== References ========== | Amphora lifecycle management: https://review.opendev.org/#/c/130424/ | LBaaS metering: | https://blueprints.launchpad.net/ceilometer/+spec/ceilometer-meter-lbaas ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/haproxy-amphora-api.rst0000664000175000017500000000455700000000000022652 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =================== HAProxy Amphora API =================== https://blueprints.launchpad.net/octavia/+spec/appliance-api The reference implementation of Octavia is going to make use of an haproxy- based amphora. As such, there will be an haproxy reference driver that speaks a well-defined protocol to the haproxy-based amphora. This document is meant to be a foundation of this interface, outlining in sufficient detail the various commands that will definitely be necessary. This design should be iterated upon as necessary going forward. Problem description =================== This API specification is necessary in order to fully develop the haproxy reference driver, both to ensure this interface is well documented, and so that different people can work on different parts of bringing Octavia to fruition. Proposed change =============== Note that this spec does not yet attempt to define the following, though these may follow shortly after this initial spec is approved: * Method for bi-directional authentication between driver and amphora. * Bootstrapping process of amphora * Transition process from "spare" to "active" amphora and other amphora lifecycle transitions This spec does attempt to provide an initial foundation for the following: * RESTful interface exposed on amphora management Alternatives ------------ None Data model impact ----------------- None (yet) REST API impact --------------- Please note that the proposed changes in this spec do NOT affect either the publicly-exposed user or operator APIs, nor really anything above the haproxy reference driver. Please see doc/main/api/haproxy-amphora-api.rst Security impact --------------- None yet, though bi-directional authentication between driver and amphora needs to be addressed. Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- stephen-balukoff david-lenwell Work Items ---------- Dependencies ============ haproxy reference driver Testing ======= Unit tests Documentation Impact ==================== None References ========== None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/housekeeping-manager-interface.rst0000775000175000017500000001337500000000000025021 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== Housekeeping Manager Specification ========================================== https://blueprints.launchpad.net/octavia/+spec/housekeeping-manager Problem description =================== The Housekeeping Manager will manage the spare amphora pool and the teardown of amphorae that are no longer needed. On a configurable interval the Housekeeping Manager will check the Octavia database to identify the required cleanup and maintenance actions required. The amphora-lifecycle-management specification details the Create and Deactivate amphora sequences the Housekeeping Manager will follow. Proposed change =============== The housekeeping manager will run as a daemon process which will perform the following actions: * Read the following from the configuration file * housekeeping_interval: The time (in seconds) that the housekeeping manager will sleep before running its checks again. * spare_amphora_pool_size: The desired number of spare amphorae. * maximum_deploying_amphora_count: The maximum number of amphorae that may be deployed simultaneously. * maximum_preserved_amphora_count: How many deactivated amphorae to preserve. 0 means delete, 1 or greater means keep up to that many amphorae for future diagnostics. Only amphorae in the ERROR and PRESERVE states are eligible to be preserved. TODO: Right now there is no PRESERVE state, for this to work we would need to define one in the amphora spec. * preservation_scheme * "keep": keep all preserved amphorae * "cycle": maintain a queue of preserved amphorae, deleting the oldest one when a new amphora is preserved. * preservation_method: Preservation must take into account the possibility that amphorae instantiated in the future may reuse MAC addresses. * "unplug": Disconnect the virtual NICs from the amphora * "snapshot": Take a snapshot of the amphora, then stop it * Get the spare pool size * Log the spare pool size * If the spare pool size is less than the spare pool target capacity, initiate creation of appropriate number of amphorae. * Obtain the list of deactivated amphorae and schedule their removal. If preservation_count > 0, and there are fewer than that many amphorae in the preserved pool, preserve the amphora. After the preserved pool size reaches preservation_count, use preservation_scheme to determine whether to keep newly failed amphorae. * Sleep for the time specified by housekeeping_interval. * Return to the top Establish a base class to model the desired functionality: .. code:: python class HousekeepingManager(object): """ Class to manage the spare amphora pool. This class should do very little actual work, its main job is to monitor the spare pool and schedule creation of new amphrae and removal of used amphrae. By default, used amphorae will be deleted, but they may optionally be preserved for future analysis. """ def get_spare_amphora_size(self): """ Return the target capacity of the spare pool """ raise NotImplementedError def get_ready_spare_amphora_count(self): """ Return the number of available amphorae in the spare pool """ raise NotImplementedError def create_amphora(self, num_to_create = 1): """ Schedule the creation of the specified number of amphorae to be added to the spare pool.""" raise NotImplementedError def remove_amphora(self, amphora_ids): """ Schedule the removal of the amphorae specified by amphora_ids.""" raise NotImplementedError Exception Model --------------- The manager is expected to raise or pass along the following well-defined exceptions: * NotImplementedError - this functionality is not implemented/not supported * AmphoraDriverError - a super class for all other exceptions and the catch all if no specific exception can be found * NotFoundError - this amphora couldn't be found/ was deleted by nova * UnauthorizedException - the driver can't access the amphora * UnavailableException - the amphora is temporary unavailable * DeleteFailed - this load balancer couldn't be deleted Alternatives ------------ Data model impact ----------------- Requires the addition of the housekeeping_interval, spare_pool_size, spare_amphora_pool_size, maximum_preserved_amphora_count, preservation_scheme, and preservation_method to the config. REST API impact --------------- None. Security impact --------------- Must follow standard practices for database access. Notifications impact -------------------- Other deployer impact --------------------- Other end user impact --------------------- There should be no end-user-visible impact. Performance Impact ------------------ The housekeeping_interval and spare_pool_size parameters will be adjustable by the operator in order to balance resource usage against performance. Developer impact ---------------- Developers of other modules need to be aware that amphorae may be created, deleted, or saved for diagnosis by this daemon. Implementation ============== Assignee(s) ----------- Al Miller Work Items ---------- * Write abstract interface * Write Noop driver * Write tests Dependencies ============ Amphora driver Config manager Testing ======= * Unit tests with tox and Noop-Driver * tempest tests with Noop-Driver Documentation Impact ==================== None - we won't document the interface for 0.5. If that changes we need to write an interface documentation so 3rd party drivers know what we expect. References ========== ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/network-driver-interface.rst0000664000175000017500000002024000000000000023667 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ======================== Network Driver Interface ======================== Include the URL of your launchpad blueprint: https://blueprints.launchpad.net/octavia/+spec/network-driver-interface We need a generic interface in which to create networking resources. This is to allow implementations that can support different networking infrastructures that accomplish frontend and backend connectivity. Problem description =================== There is a need to define a generic interface for a networking service. An Octavia controller should not know what networking infrastructure is being used underneath. It should only know an interface. This interface is needed to support differing networking infrastructures. Proposed change =============== In order to make the network driver as genericly functional as possible, it is broken down into methods that Octavia will need at a high level to accomplish frontend and backend connectivity. The idea is that to implement these methods it may require multiple requests to the networking service to accomplish the end result. The interface is meant to promote stateless implementations and suffer no issues being run in parallel. In the future we would like to create a common module that implementations of this interface can call to setup a taskflow engine to promote using a common taskflow configuration. That however, can be left once this has had time to mature. Existing data model: * class VIP * load_balancer_id * ip_address * network_id - (neutron subnet) * port_id - (neutron port) * class Amphora * load_balancer_id * compute_id * lb_network_ip * status * vrrp_ip - if an active/passive topology, this is the ip where the vrrp communication between peers happens * ha_ip - this is the highly available IP. In an active/passive topology it most likely exists on the MASTER amphora and on failure it will be raised on the SLAVE amphora. In an active/active topology it may exist on both amphorae. In the end, it is up to the amphora driver to decide how to use this. New data models: * class Interface * id * network_id - (neutron subnet) * amphora_id * fixed_ips * class Delta * amphora_id * compute_id * add_nics * delete_nics * class Network * id * name * subnets - (list of subnet ids) * tenant_id * admin_state_up * provider_network_type * provider_physical_network * provider_segmentation_id * router_external * mtu * class Subnet * id * name * network_id * tenant_id * gateway_ip * cidr * ip_version * class Port * id * name * device_id * device_owner * mac_address * network_id * status * tenant_id * admin_state_up * fixed_ips - list of FixedIP objects * FixedIP * subnet_id * ip_address * AmphoraNetworkConfig * amphora - Amphora object * vip_subnet - Subnet object * vip_port - Port object * vrrp_subnet - Subnet object * vrrp_port - Port object * ha_subnet - Subnet object * ha_port - Port object New Exceptions defined in the octavia.network package: * NetworkException - Base Exception * PlugVIPException * UnplugVIPException * PluggedVIPNotFound * AllocateVIPException * DeallocateVIPException * PlugNetworkException * UnplugNetworkException * VIPInUse * PortNotFound * SubnetNotFound * NetworkNotFound * AmphoraNotFound This class defines the methods for a fully functional network driver. Implementations of this interface can expect a rollback to occur if any of the non-nullipotent methods raise an exception. class AbstractNetworkDriver * plug_vip(loadbalancer, vip) * Sets up the routing of traffic from the vip to the load balancer and its amphorae. * loadbalancer - instance of data_models.LoadBalancer * this is to keep the parameters as generic as possible so different implementations can use different properties of a load balancer. In the future we may want to just take in a list of amphora compute ids and the vip data model. * vip = instance of a VIP * returns list of Amphora * raises PlugVIPException, PortNotFound * unplug_vip(loadbalancer, vip) * Removes the routing of traffic from the vip to the load balancer and its amphorae. * loadbalancer = instance of a data_models.LoadBalancer * vip = instance of a VIP * returns None * raises UnplugVIPException, PluggedVIPNotFound * allocate_vip(loadbalancer) * Allocates a virtual ip and reserves it for later use as the frontend connection of a load balancer. * loadbalancer = instance of a data_models.LoadBalancer * returns VIP instance * raises AllocateVIPException, PortNotFound, SubnetNotFound * deallocate_vip(vip) * Removes any resources that reserved this virtual ip. * vip = VIP instance * returns None * raises DeallocateVIPException, VIPInUse * plug_network(compute_id, network_id, ip_address=None) * Connects an existing amphora to an existing network. * compute_id = id of an amphora in the compute service * network_id = id of the network to attach * ip_address = ip address to attempt to be assigned to interface * returns Interface instance * raises PlugNetworkException, AmphoraNotFound, NetworkNotFound * unplug_network(compute_id, network_id, ip_address=None) * Disconnects an existing amphora from an existing network. If ip_address is not specified then all interfaces on that network will be unplugged. * compute_id = id of an amphora in the compute service to unplug * network_id = id of network to unplug amphora * ip_address = ip address of interface to unplug * returns None * raises UnplugNetworkException, AmphoraNotFound, NetworkNotFound, NetworkException * get_plugged_networks(compute_id): * Retrieves the current plugged networking configuration * compute_id = id of an amphora in the compute service * returns = list of Instance instances * update_vip(loadbalancer): * Hook for the driver to update the VIP information based on the state of the passed in loadbalancer * loadbalancer: instance of a data_models.LoadBalancer * get_network(network_id): * Retrieves the network from network_id * network_id = id of an network to retrieve * returns = Network data model * raises NetworkException, NetworkNotFound * get_subnet(subnet_id): * Retrieves the subnet from subnet_id * subnet_id = id of a subnet to retrieve * returns = Subnet data model * raises NetworkException, SubnetNotFound * get_port(port_id): * Retrieves the port from port_id * port_id = id of a port to retrieve * returns = Port data model * raises NetworkException, PortNotFound * failover_preparation(amphora): * Prepare an amphora for failover * amphora = amphora data model * returns = None * raises PortNotFound Alternatives ------------ * Straight Neutron Interface (networks, subnets, ports, floatingips) * Straight Nova-Network Interface (network, fixed_ips, floatingips) Data model impact ----------------- * The Interface data model defined above will just be a class. We may later decide that it needs to be stored in the database, but we can optimize on that in a later review if needed. REST API impact --------------- None Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- Need a service account to own the resources these methods create. Developer impact ---------------- This will be creating an interface in which other code will be creating network resources. Implementation ============== Assignee(s) ----------- brandon-logan Work Items ---------- Define interface Dependencies ============ None Testing ======= None Documentation Impact ==================== Just docstrings on methods. References ========== None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/nova-compute-driver.rst0000664000175000017500000000446100000000000022664 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =================== Nova Compute Driver =================== Blueprint: https://blueprints.launchpad.net/octavia/+spec/nova-compute-driver Octavia needs to interact with nova for creation of VMs for this version. This spec will flesh out all the methods described in the compute-driver-interface with nova VM specific commands. Problem description =================== This spec details operations for creating, updating, and modifying amphora that will hold the actual load balancer. It will utilize the nova client python api version 3 for the nova specific requests and commands. Proposed change =============== Expose nova operations - Build: Will need to build a virtual machine according to configuration parameters - Will leverage the nova client ServerManager method "create" to build a server - Get: Will need to retrieve details of the virtual machine from nova - Will leverage the nova client ServerManager method "get" to retrieve a server, and return an amphora object - Delete: Will need to remove a virtual machine - Will leverage the nova client ServerManager method "delete" for removal of server - Status: Will need to retrieve the status of the virtual machine - Will leverage the aforementioned get call to retrieve status of the server Alternatives ------------ None Data model impact ----------------- Add fields to existing Amphora object REST API impact --------------- None Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- Will need a nova service account and necessary credentials stored in config Implementation ============== Assignee(s) ----------- trevor-vardeman Work Items ---------- Expose nova operations Dependencies ============ compute-driver-interface Testing ======= Unit tests Functional tests Documentation Impact ==================== None References ========== https://blueprints.launchpad.net/octavia/+spec/nova-compute-driver https://docs.openstack.org/python-novaclient/latest/reference/api/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/operator-api.rst0000664000175000017500000003343000000000000021356 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== Octavia Operator API Foundation ========================================== https://blueprints.launchpad.net/octavia/+spec/operator-api Octavia needs the foundation of the Operator API created. This spec is not meant to address every functionality needed in the operator API, only to create a solid foundation to iterate on in the future. Problem description =================== This is needed because this will be the mechanism to actually communicate with Octavia. Doing CRUD operations on all entities will be needed ASAP so that the system can be thoroughly tested. Proposed change =============== Expose Pecan resources - Defined explicitly below in the REST API Impact Create WSME types - These will be responsible for request validation and deserialization, and also response serialization Setup paste deploy - This will be used in the future to interact with keystone and other middleware, however at first this will not have any authentication so tenant_ids will just have to be made up uuids. Create a handler interface and a noop logging implementation - A handler interface will be created. This abstraction layer is needed because calling the controller in the resource layer will work for 0.5 but 1.0 will be sending it off to a queue. With this abstraction layer we can easily swap out a 0.5 controller with a 1.0 controller. Call database repositories - Most if not all resources will make a call to the database Call handler - Only create, update, and delete operations should call the handler Alternatives ------------ None Data model impact ----------------- Will need to add some methods to the database repository REST API impact --------------- Exposed Resources and Methods POST /loadbalancers * Successful Status Code - 202 * JSON Request Body Attributes ** vip - another JSON object with one required attribute from the following *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string - optional - default "0" * 36 (for now) ** name - string - optional - default null ** description - string - optional - default null ** enabled - boolean - optional - default true * JSON Response Body Attributes ** id - uuid ** vip - another JSON object *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string ** name - string ** description - string ** enabled - boolean ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) PUT /loadbalancers/{lb_id} * Successful Status Code - 202 * JSON Request Body Attributes ** name - string ** description - string ** enabled - boolean * JSON Response Body Attributes ** id - uuid ** vip - another JSON object *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string ** name - string ** description - string ** enabled - boolean ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) DELETE /loadbalancers/{lb_id} * Successful Status Code - 202 * No response or request body GET /loadbalancers/{lb_id} * Successful Status Code - 200 * JSON Response Body Attributes ** id - uuid ** vip - another JSON object *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string ** name - string ** description - string ** enabled - boolean ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) GET /loadbalancers?tenant_id * Successful Status Code - 200 * tenant_id is an optional query parameter to filter by tenant_id * returns a list of load balancers POST /loadbalancers/{lb_id}/listeners * Successful Status Code - 202 * JSON Request Body Attributes ** protocol - string enum - (TCP, HTTP, HTTPS) - required ** protocol_port - integer - required ** connection_limit - integer - optional ** default_tls_container_id - uuid - optional ** tenant_id - string - optional - default "0" * 36 (for now) ** name - string - optional - default null ** description - string - optional - default null ** enabled - boolean - optional - default true * JSON Response Body Attributes ** id - uuid ** protocol - string enum - (TCP, HTTP, HTTPS) ** protocol_port - integer ** connection_limit - integer ** default_tls_container_id - uuid ** tenant_id - string - optional ** name - string - optional ** description - string - optional ** enabled - boolean - optional ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) PUT /loadbalancers/{lb_id}/listeners/{listener_id} * Successful Status Code - 202 * JSON Request Body Attributes ** protocol - string enum ** protocol_port - integer ** connection_limit - integer ** default_tls_container_id - uuid ** name - string ** description - string ** enabled - boolean * JSON Response Body Attributes ** id - uuid ** protocol - string enum - (TCP, HTTP, HTTPS) ** protocol_port - integer ** connection_limit - integer ** default_tls_container_id - uuid ** tenant_id - string - optional ** name - string - optional ** description - string - optional ** enabled - boolean - optional ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) DELETE /loadbalancers/{lb_id}/listeners/{listener_id} * Successful Status Code - 202 * No response or request body GET /loadbalancers/{lb_id}/listeners/{listener_id} * Successful Status Code - 200 * JSON Response Body Attributes ** id - uuid ** protocol - string enum - (TCP, HTTP, HTTPS) ** protocol_port - integer ** connection_limit - integer ** default_tls_container_id - uuid ** tenant_id - string - optional ** name - string - optional ** description - string - optional ** enabled - boolean - optional ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) GET /loadbalancers/{lb_id}/listeners * Successful Status Code - 200 * A list of listeners on load balancer lb_id POST /loadbalancers/{lb_id}/listeners/{listener_id}/pools * Successful Status Code - 202 * JSON Request Body Attributes ** protocol - string enum - (TCP, HTTP, HTTPS) - required ** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) - required ** session_persistence - JSON object - optional *** type - string enum - (SOURCE_IP, HTTP_COOKIE) - required *** cookie_name - string - required for HTTP_COOKIE type ** tenant_id - string - optional - default "0" * 36 (for now) ** name - string - optional - default null ** description - string - optional - default null ** enabled - boolean - optional - default true * JSON Response Body Attributes ** id - uuid ** protocol - string enum - (TCP, HTTP, HTTPS) ** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) ** session_persistence - JSON object *** type - string enum - (SOURCE_IP, HTTP_COOKIE) *** cookie_name - string ** name - string ** description - string ** enabled - boolean ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) PUT /loadbalancers/{lb_id}/listeners/{listener_id}/pools/{pool_id} * Successful Status Code - 202 * JSON Request Body Attributes ** protocol - string enum - (TCP, HTTP, HTTPS) ** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) ** session_persistence - JSON object *** type - string enum - (SOURCE_IP, HTTP_COOKIE) *** cookie_name - string ** name - string ** description - string ** enabled - boolean * JSON Response Body Attributes ** id - uuid ** protocol - string enum - (TCP, HTTP, HTTPS) ** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) ** session_persistence - JSON object *** type - string enum - (SOURCE_IP, HTTP_COOKIE) *** cookie_name - string ** name - string ** description - string ** enabled - boolean ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) DELETE /loadbalancers/{lb_id}/listeners/{listener_id}/pools/{pool_id} * Successful Status Code - 202 No request or response body GET /loadbalancers/{lb_id}/listeners/{listener_id}/pools/{pool_id} * Successful Status Code - 200 * JSON Response Body Attributes ** id - uuid ** protocol - string enum - (TCP, HTTP, HTTPS) ** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) ** session_persistence - JSON object *** type - string enum - (SOURCE_IP, HTTP_COOKIE) *** cookie_name - string ** name - string ** description - string ** enabled - boolean ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) GET /loadbalancers/{lb_id}/listeners/{listener_id}/pools * Successful Status Code - 200 * Returns a list of pools POST /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/healthmonitor * Successful Status Code - 202 * JSON Request Body Attributes ** type - string enum - (HTTP, HTTPS, TCP) - required ** delay - integer - required ** timeout - integer - required ** fall_threshold - integer - required ** rise_threshold - integer - required ** http_method - string enum - (GET, POST, PUT, DELETE) - required for HTTP(S) ** url_path - string - required for HTTP(S) ** expected_codes - comma delimited string - required for HTTP(S) ** enabled - boolean - required - default true * JSON Response Body Attributes ** type - string enum - (HTTP, HTTPS, TCP) ** delay - integer ** timeout - integer ** fall_threshold - integer ** rise_threshold - integer ** http_method - string enum - (GET, POST, PUT, DELETE) ** url_path - string ** expected_codes - comma delimited string ** enabled - boolean PUT /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/healthmonitor * Successful Status Code - 202 * JSON Request Body Attributes ** type - string enum - (HTTP, HTTPS, TCP) ** delay - integer ** timeout - integer ** fall_threshold - integer ** rise_threshold - integer ** http_method - string enum - (GET, POST, PUT, DELETE) ** url_path - string ** expected_codes - comma delimited string ** enabled - boolean * JSON Response Body Attributes ** type - string enum - (HTTP, HTTPS, TCP) ** delay - integer ** timeout - integer ** fall_threshold - integer ** rise_threshold - integer ** http_method - string enum - (GET, POST, PUT, DELETE) ** url_path - string ** expected_codes - comma delimited string ** enabled - boolean DELETE /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/healthmonitor * Successful Status Code - 202 No request or response body GET /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/healthmonitor * Successful Status Code - 200 * JSON Response Body Attributes ** type - string enum - (HTTP, HTTPS, TCP) ** delay - integer ** timeout - integer ** fall_threshold - integer ** rise_threshold - integer ** http_method - string enum - (GET, POST, PUT, DELETE) ** url_path - string ** expected_codes - comma delimited string ** enabled - boolean POST /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/members * Successful Status Code - 202 * JSON Request Body Attributes ** ip_address - IP Address - required ** protocol_port - integer - required ** weight - integer - optional ** subnet_id - uuid - optional ** tenant_id - string - optional - default "0" * 36 (for now) ** enabled - boolean - optional - default true * JSON Response Body Attributes ** id - uuid ** ip_address - IP Address ** protocol_port - integer ** weight - integer ** subnet_id - uuid ** tenant_id - string ** enabled - boolean ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) PUT /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/members/{member_id} * Successful Status Code - 202 * JSON Request Body Attributes ** protocol_port - integer - required ** weight - integer - optional ** enabled - boolean - optional - default true * JSON Response Body Attributes ** id - uuid ** ip_address - IP Address ** protocol_port - integer ** weight - integer ** subnet_id - uuid ** tenant_id - string ** enabled - boolean ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) DELETE /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/members/{member_id} * Successful Status Code - 202 No request or response body GET /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/members/{member_id} * Successful Status Code - 200 * JSON Response Body Attributes ** id - uuid ** ip_address - IP Address ** protocol_port - integer ** weight - integer ** subnet_id - uuid ** tenant_id - string ** enabled - boolean ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) GET /loadbalancers/{lb_id}/listeners/{listener_id}/ pools/{pool_id}/members * Successful Status Code - 200 Returns a list of members Security impact --------------- No authentication with keystone Notifications impact -------------------- None Other end user impact --------------------- Not ready for end user Performance Impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- brandon-logan Work Items ---------- Expose Pecan resources Create WSME types Setup paste deploy Create a handler interface and a noop logging implementation Call database repositories Call handler Dependencies ============ db-repositories Testing ======= Unit tests Documentation Impact ==================== None References ========== None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/queue-consumer.rst0000664000175000017500000001425100000000000021731 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============== Queue Consumer ============== https://blueprints.launchpad.net/octavia/+spec/queue-consumer This blueprint describes how Oslo messages are consumed, processed and delegated from the API-controller queue to the controller worker component of Octavia. The component that is responsible for these activities is called the Queue Consumer. Problem description =================== Oslo messages need to be consumed by the controller and delegated to the proper controller worker. Something needs to interface with the API-controller queue and spawn the controller workers. That "something" is what we are calling the Queue Consumer. Proposed change =============== The major component of the Queue Consumer will be a class that acts as a consumer to Oslo messages. It will be responsible for configuring and starting a server that is then able to receive messages. There will be a one-to-one mapping between API methods and consumer methods (see code snippet below). Corresponding controller workers will be spawned depending on which consumer methods are called. The threading will be handled by Oslo messaging using the 'eventlet' executor. Using the 'eventlet' executor will allow for message throttling and removes the need for the controller workers to manage threads. The benefit of using the 'eventlet' executor is that the Queue Consumer will not have to spawn threads at all, since every message received will be in its own thread already. This means that the Queue Consumer doesn't spawn a controller worker, rather it just starts the execution of the deploy code. An 'oslo_messaging' configuration section will need to be added to octavia.conf for Oslo messaging options. For the Queue Consumer, the 'rpc_thread_pool_size' config option will need to be added. This option will determine how many consumer threads will be able to read from the queue at any given time (per consumer instance) and serve as a throttling mechanism for message consumption. For example, if 'rpc_thread_pool_size' is set to 1 thread then only one controller worker will be able to conduct work. When that controller worker completes its task then a new message can be consumed and a new controller worker flow started. Below are the planned interface methods for the queue consumer. The Queue Consumer will be listening on the **OCTAVIA_PROV** (short for octavia provisioning) topic. The *context* parameter will be supplied along with an identifier such as a load balancer id, listener id, etc. relevant to the particular interface method. The *context* parameter is a dictionary and is reserved for metadata. For example, the Neutron LBaaS agent leverages this parameter to send additional request information. Additionally, update methods include a *\*_updates* parameter than includes the changes that need to be made. Thus, the controller workers responsible for the update actions will need to query the database to retrieve the old state and combine it with the updates to provision appropriately. If a rollback or exception occur, then the controller worker will only need to update the provisioning status to **ERROR** and will not need to worry about making database changes to attributes of the object being updated. .. code:: python def create_load_balancer(self, context, load_balancer_id): pass def update_load_balancer(self, context, load_balancer_updates, load_balancer_id): pass def delete_load_balancer(self, context, load_balancer_id): pass def create_listener(self, context, listener_id): pass def update_listener(self, context, listener_updates, listener_id): pass def delete_listener(self, context, listener_id): pass def create_pool(self, context, pool_id): pass def update_pool(self, context, pool_updates, pool_id): pass def delete_pool(self, context, pool_id): pass def create_health_monitor(self, context, health_monitor_id): pass def update_health_monitor(self, context, health_monitor_updates, health_monitor_id): pass def delete_health_monitor(self, context, health_monitor_id): pass def create_member(self, context, member_id): pass def update_member(self, context, member_updates, member_id): pass def delete_member(self, context, member_id): pass Alternatives ------------ There are a variety of ways to consume from Oslo messaging. For example, instead of having a single consumer on the controller we could have multiple consumers (i.e. one for CREATE messages, one for UPDATE messages, etc.). However, since we merely need something to pass messages off to controller workers other options are overkill. Data model impact ----------------- While there is no direct data model impact it is worth noting that the API will not be persisting updates to the database. Rather, delta updates will pass from the user all the way to the controller worker. Thus, when the controller worker successfully completes the prescribed action, only then will it persist the updates to the database. No API changes are necessary for create and update actions. REST API impact --------------- None Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ The only performance related item is queue throttling. This is done by design so that operators can safely throttle incoming messages dependent on their specific needs. Other deployer impact --------------------- Configuration options will need to be added to ocativa.conf. Please see above for more details. Developer impact ---------------- None Implementation ============== Assignee(s) ----------- jorge-miramontes Work Items ---------- - Implement consumer class - Add executable queue-consumer.py to bin directory Dependencies ============ https://blueprints.launchpad.net/octavia/+spec/controller-worker Testing ======= Unit tests Documentation Impact ==================== None References ========== None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/tls-data-security-1.diag0000664000175000017500000000162000000000000022560 0ustar00zuulzuul00000000000000seqdiag { span_height = 10; === If Certificate is pre-stored in Barbican === User => Octavia [label="Create LB with TLS (passing tls_certificate_id)", note="HTTPS", return="202/400/401"] { Octavia => Barbican [label="Fetch Certificate Container", note="HTTPS", return="Certificate Data"]; } === If Certificate is passed directly to Octavia === User => Octavia [label="Create LB with TLS (passing tls_certificate, tls_private_key, etc)", note="HTTPS", return=" 202/400/401"] { Octavia => Barbican [label="Store Secrets / Certificate Container", note="HTTPS", return="tls_certificate_id"]; } Octavia -> Octavia [label="Store tls_certificate_id"]; === After certificate handling, in both cases === Octavia -> Octavia [label="Fetch Amphora from Spare Pool"]; Octavia => "Amphora API" [label="Configure Amphora", note="HTTPS", return="Update LB Status"]; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/tls-data-security-2.diag0000664000175000017500000000312700000000000022565 0ustar00zuulzuul00000000000000seqdiag { span_height = 10; activation = none; Barbican; === In Octavia === Octavia -> Octavia [label="Get a new cert/key from CertGenerator"]; Octavia -> "Compute Driver" [label="Create new Amphora"] { "Compute Driver" -> Nova [label="Create instance", note="Ref Impl, ConfigDrive: Octavia Controller certificate and IP, and a generated/signed cert + private key"]; } Octavia => "Compute Driver" [label="Wait for Amphora Ready"]; loop { "Compute Driver" => Nova [label="Poll for ACTIVE Amphora", note="Ref Impl", return="Amphora Management IP"]; } Octavia -> Octavia [label="Store Amphora IP"]; Octavia => "Amp Driver" [label="Run Amphora Self-Test", return="PASS/FAIL"] { "Amp Driver" -> "Amp Driver" [label="Poll DB until first Heartbeat arrives", note="Ref Impl"]; "Amp Driver" => "Amphora API" [label="Run Self-Test", note="Ref Impl"] { === If Self-test passes === Octavia -> Octavia [label="Add Amphora to standby pool"]; === If Self-test fails === Octavia -> Octavia [label="Delete Amphora"]; } } === In the Amphora (Ref Impl) === Amphora -> Amphora [label="Start Services (API, Heartbeat)"]; "Amp Heartbeat" -> "Amp Driver" [label="Announce", note="UDP"] { "Amp Driver" -> "Amp Driver" [label="Verify Amphora by Signed UDP Heartbeat"]; === If Verification fails === "Amp Driver" -> "Amp Driver" [label="Log and Ignore"]; === If Verification succeeds === "Amp Driver" -> "Health Manager" [label="Store Heartbeat"]; } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.5/tls-data-security.rst0000664000175000017500000001271000000000000022330 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============================== TLS Data Security and Barbican ============================== Launchpad blueprint: https://blueprints.launchpad.net/octavia/+spec/tls-data-security Octavia will have some need of secure storage for TLS related data. This BP is intended to identify all of the data that needs secure storage, or any other interaction that will require the use of Barbican or another secure solution. Problem description =================== 1. Octavia will support TLS Termination (including SNI), which will require us to store and retrieve certificates and private keys from a secure repository. 2. Octavia will communicate with its Amphorae using TLS, so each Amphora will need a certificate for the controller to validate. 3. Octavia will need TLS data for exposing its own API via HTTPS. Proposed change =============== The initial supported implementation for TLS related functions will be Barbican, but the interface will be generic such that other implementations could be created later. .. seqdiag:: tls-data-security-1.diag 1. Create a CertificateManager interface for storing and retrieving certificate and private key pairs (and intermediate certs / private key passphrase). Users will pass their TLS data to Octavia in the form of a certificate_id, which is a reference to their data in some secure service. Octavia will store that certificate_id for each Listener/SNI and will retrieve the data when necessary. (Barbican specific: users will need to add Octavia's user account as an authorized user on the Container and all Secrets [1] so we catch fetch the data on their behalf.) We will need to validate the certificate data (including key and intermediates) when we initially receive it, and will assume that it remains unchanged for the lifespan of the LB (in Barbican the data is immutable so this is a safe assumption, I do not know how well this will work for other services). In the case of invalid TLS data, we will reject the request with a 400 (if it is an initial create) or else put the LB into ERROR status (if it is on a failover event or during some other non-interactive scenario). .. seqdiag:: tls-data-security-2.diag 2. Create a CertificateGenerator interface to generate certificates from CSRs. When the controller creates an Amphora, it will generate a private key and a CSR, generate a signed certificate from the CSR, and include the private key and signed certificate in a ConfigDrive for the new Amphora. It will also include a copy of the Controller's certificate on the ConfigDrive. All future communications with the Amphora will do certificate validation based on these certificates. For the Amphora, this will be based on our (private) certificate authority and the CN of the Amphora's cert matching the ID of the Amphora. For the Controller, the cert should be a complete match with the version provided. (The CertificateManager and CertificateGenerator interfaces are separate because while Barbican can perform both functions, future implementations may need to use two distinct services to achieve both.) 3. The key/cert for the main Octavia API/controller should be maintained manually by the server operators using whatever configuration management they choose. We should not need to use a specific external repo for this. The trusted CA Cert will also need to be retrieved from barbican and manually loaded in the config. Alternatives ------------ We could skip the interface and just use Barbican directly, but that would be diverging from what seems to be the accepted OpenStack model for Secret Store integration. We could also store everything locally or in the DB, but that isn't a real option for production systems because it is incredibly insecure (though there will be a "dummy driver" that operates this way for development purposes). Data model impact ----------------- Nothing new, the models for this should already be in place. Some of the columns/classes might need to be renamed more generically (currently there is a tls_container_id column, which would become tls_certificate_id to be more generic). REST API impact --------------- None Security impact --------------- Using Barbican is considered secure. Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ Adding an external touchpoint (a certificate signing service) to the Amphora spin-up workflow will increase the average time for readying an Amphora. This shouldn't be a huge problem if the standby-pool size is sufficient for the particular deployment. Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- Adam Harwell (adam-harwell) Work Items ---------- 1. Create CertificateManager interface. 2. Create CertificateGenerator interface. 3. Create BarbicanCertificateManager implementation. 4. Create BarbicanCertificateGenerator implementation. 5. Create unit tests! Dependencies ============ This script will depend on the OpenStack Barbican project, including some features that are still only at the blueprint stage. Testing ======= There will be testing. Yes. Documentation Impact ==================== Documentation changes will be primarily internal. References ========== .. line-block:: [1] https://review.opendev.org/#/c/127353/ [2] https://review.opendev.org/#/c/129048/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/specs/version0.8/0000775000175000017500000000000000000000000016222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.8/active_passive_loadbalancer.rst0000664000175000017500000003204200000000000024451 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ======================================= Active-Standby Amphora Setup using VRRP ======================================= https://blueprints.launchpad.net/octavia/+spec/activepassiveamphora This blueprint describes how Octavia implements its Active/Standby solution. It will describe the high level topology and the proposed code changes from the current supported Single topology to realize the high availability loadbalancer scenario. Problem description =================== A tenant should be able to start high availability loadbalancer(s) for the tenant's backend services as follows: * The operator should be able to configure an Active/Standby topology through an octavia configuration file, which the loadbalancer shall support. An Active/Standby topology shall be supported by Octavia in addition to the Single topology that is currently supported. * In Active/Standby, two Amphorae shall host a replicated configuration of the load balancing services. Both amphorae will also deploy a Virtual Router Redundancy Protocol (VRRP) implementation [2]. * Upon failure of the master amphora, the backup one shall seamlessly take over the load balancing functions. After the master amphora changes to a healthy status, the backup amphora shall give up the load balancing functions to the master again (see [2] section 3 for details on master election protocol). * Fail-overs shall be seamless to end-users and fail-over time should be minimized. * The following diagram illustrates the Active/Standby topology. asciiflow:: +--------+ | Tenant | |Service | | (1) | +--------+ +-----------+ | +--------+ +----+ Master +----+ | | Tenant | |VIP | Amphora |IP1 | | |Service | +--+-+-----+-----+-+--+ | | (M) | | |MGMT |VRRP | | | +--------+ | | IP | IP1 | | | | Tenant | +--+--++----+ | | | Network | | | | +-----------------+ Floating +---------+ v-v-------------^----+---v-^----v-^-+ Router | IP | | ^---------------+----v-^---+------+-+Floating <-> VIP <----------+ Internet| | Management | | | | | | | | | (MGMT) | | | | +-----------------+ +---------+ | Network | +--+--++----+ | | Paired |MGMT |VRRP | | | | | IP | IP2 | | +-----------+ | +-----+-----+ | | Octavia | ++---+ Backup +-+--+ |Controller | |VIP | Amphora |IP2 | | (s) | +----+-----------+----+ +-----------+ * The newly introduced VRRP IPs shall communicate on the same tenant network (see security impact for more details). * The existing Haproxy Jinja configuration template shall include "peer" setup for state synchronization over the VRRP IP addresses. * The VRRP IP addresses shall work with both IPv4 and IPv6. Proposed change =============== The Active/Standby loadbalancers require the following high level changes: * Add support of VRRP in the amphora base image through Keepalived. * Extend the controller worker to be able to spawn N amphorae associated with the same loadbalancer on N different compute nodes (This takes into account future work on Active/Active topology). The amphorae shall be allowed to use the VIP through "allow address pairing". These amphorae shall replicate the same listeners, and pools configuration. Note: topology is a property of a load balancer and not of one of its amphorae. * Extend the amphora driver interface, the amphora REST driver, and Jinja configuration templates for the newly introduced VRRP service [4]. * Develop a Keepalived driver. * Extend the network driver to become aware of the different loadbalancer topologies and add support of network creation. The network driver shall also pair the different amphorae in a given topology to the same VIP address. * Extend the controller worker to build the right flow/sub-flows according to the given topology. The controller worker is also responsible of creating the correct stores needed by other flow/sub-flows. * Extend the Octavia configuration and Operator API to support the Active/Standby topology. * MINOR: Extend the Health Manager to be aware of the role of the amphora (Master/Backup) [9]. If the health manager decided to spawn a new amphora to replace an unhealthy one (while a backup amphora is already in service), it must replicate the same VRRP priorities, ids, and authentication credentials to keep the loadbalancer in its appropriate configuration. Listeners associated with this load balancer shall be put in a DEGRADED provisioning state. Alternatives ------------ We could use heartbeats as an alternative to VRRP, which is also a widely adopted solution. Heartbeats better suit redundant file servers, filesystems, and databases rather than network services such as routers, firewalls, and loadbalancers. Willy Tarreau, the creator of Haproxy, provides a detailed view on the major differences between heartbeats and VRRP in [5]. Data model impact ----------------- The data model of the Octavia database shall be impacted as follows: * A new column in the load_balancer table shall indicate its topology. The topology field takes values from: SINGLE, or ACTIVE/STANDBY. * A new column in the amphora table shall indicate an amphora's role in the topology. If the topology is SINGLE, the amphora role shall be STANDALONE. If the topology is ACTIVE/STANDBY, the amphora role shall be either MASTER or BACKUP. This role field will also be of use for the Active/Active topology. * New value tables for the loadbalancer topology and the amphorae roles. * New columns in the amphora table shall indicate the VRRP priority, the VRRP ID, and the VRRP interface of the amphora. * A new column in the listener table shall indicate the TCP port used for listener internal data synchronization. * VRRP groups define the common VRRP configurations for all listeners on an amphora. A new table shall hold the VRRP groups main configuration primitives including at least: VRRP authentication information, role and priority advertisement interval. Each Active/Standby loadbalancer defines one and only one VRRP group. REST API impact --------------- ** Changes to amphora API: see [11] ** PUT /listeners/{amphora_id}/{listener_id}/haproxy PUT /vrrp/upload PUT /vrrp/{action} GET /interface/{ip_addr} ** Changes to operator API: see [10] ** POST /loadbalancers * Successful Status Code - 202 * JSON Request Body Attributes ** vip - another JSON object with one required attribute from the following *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string - optional - default "0" * 36 (for now) ** name - string - optional - default null ** description - string - optional - default null ** enabled - boolean - optional - default true * JSON Response Body Attributes ** id - uuid ** vip - another JSON object *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string ** name - string ** description - string ** enabled - boolean ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) ** **topology - string enum - (SINGLE, ACTIVE_STANDBY)** PUT /loadbalancers/{lb_id} * Successful Status Code - 202 * JSON Request Body Attributes ** name - string ** description - string ** enabled - boolean * JSON Response Body Attributes ** id - uuid ** vip - another JSON object *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string ** name - string ** description - string ** enabled - boolean ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) ** **topology - string enum - (SINGLE, ACTIVE_STANDBY)** GET /loadbalancers/{lb_id} * Successful Status Code - 200 * JSON Response Body Attributes ** id - uuid ** vip - another JSON object *** net_port_id - uuid *** subnet_id - uuid *** floating_ip_id - uuid *** floating_ip_network_id - uuid ** tenant_id - string ** name - string ** description - string ** enabled - boolean ** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED, ERROR) ** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) ** **topology - string enum - (SINGLE, ACTIVE_STANDBY)** Security impact --------------- * The VRRP driver must automatically add a security group rule to the amphora's security group to allow VRRP traffic (Protocol number 112) on the same tenant subnet. * The VRRP driver shall automatically add a security group rule to allow Authentication Header traffic (Protocol number 51). * VRRP driver shall support authentication-type MD5. * The HAProxy driver must be updated to automatically add a security group rule that allows multi-peers to synchronize their states. * Currently HAProxy **does not** support peer authentication, and state sync messages are in plaintext. * At this point, VRRP shall communicate on the same tenant network. The rationale is to fail-over based on a similar network interfaces condition which the tenant operates experience. Also, VRRP traffic and sync messages shall naturally inherit same protections applied to the tenant network. This may create fake fail-overs if the tenant network is under unplanned, heavy traffic. This is still better than failing over while the master is actually serving tenant's traffic or not failing over at all if the master has failed services. Additionally, the Keepalived shall check the health of the HAproxy service. * In next steps the following shall be taken into account: * Tenant quotas and supported topologies. * Protection of VRRP Traffic, HAproxy state sync, Router IDs, and pass phrases in both packets and DB. Notifications impact -------------------- None. Other end user impact --------------------- * The operator shall be able to specify the loadbalancer topology in the Octavia configuration file (used by default). Performance Impact ------------------ The Active/Standby can consume up to twice the resources (storage, network, compute) as required by the Single Topology. Nevertheless, one single amphora shall be active (i.e. serving end-user) at any point in time. If the Master amphora is healthy, the backup one shall remain idle until it receives no VRRP advertisements from the master. The VRRP requires executing health checks in the amphorae at fine grain granularity period. The health checks shall be as lightweight as possible such that VRRP is able to execute all check scripts within a predefined interval. If the check scripts failed to run within this predefined interval, VRRP may become unstable and may alternate the amphorae roles between MASTER and BACKUP incorrectly. Other deployer impact --------------------- * An amphora_topology config option shall be added. The controller worker shall change its taskflow behavior according to the requirement of different topologies. * By default, the amphora_topology is SINGLE and the ACTIVE/STANDBY topology shall be enabled/requested explicitly by operators. * The Keepalived version deployed in the amphora image must be newer than 1.2.8 to support unicast VRRP mode. Developer impact ---------------- None. Implementation ============== Assignee(s) ----------- Sherif Abdelwahab (abdelwas) Work Items ---------- * Amphora image update to include Keepalived. * Data model updates. * Control Worker extensions. * Keepalived driver. * Update Network driver. * Security rules. * Update Amphora REST APIs and Jinja Configurations. * Update Octavia Operator APIs. Dependencies ============ Keepalived version deployed in the amphora image must be newer than 1.2.8 to support unicast VRRP mode. Testing ======= * Unit tests with tox. * Function tests with tox. Documentation Impact ==================== * Description of the different supported topologies: Single, Active/Standby. * Octavia configuration file changes to enable the Active/Standby topology. * CLI changes to enable the Active/Standby topology. * Changes shall be introduced to the amphora APIs: see [11]. References ========== [1] Implementing High Availability Instances with Neutron using VRRP http://goo.gl/eP71g7 [2] RFC3768 Virtual Router Redundancy Protocol (VRRP) [3] https://review.opendev.org/#/c/38230/ [4] http://www.keepalived.org/LVS-NAT-Keepalived-HOWTO.html [5] http://www.formilux.org/archives/haproxy/1003/3259.html [6] https://blueprints.launchpad.net/octavia/+spec/base-image [7] https://blueprints.launchpad.net/octavia/+spec/controller-worker [8] https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface [9] https://blueprints.launchpad.net/octavia/+spec/controller [10] https://blueprints.launchpad.net/octavia/+spec/operator-api [11] doc/main/api/haproxy-amphora-api.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.8/use_glance_tag_to_refer_to_image.rst0000664000175000017500000000756600000000000025463 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =============================================================== Allow to use Glance image tag to refer to desired Amphora image =============================================================== https://blueprints.launchpad.net/octavia/+spec/use-glance-tags-to-manage-image Currently, Octavia allows to define the Glance image ID to be used to boot new Amphoras. This spec suggests another way to define the desired image, by using Glance tagging mechanism. Problem description =================== The need to hardcode image ID in the service configuration file has drawbacks. Specifically, when an updated image is uploaded into Glance, the operator is required to orchestrate configuration file update on all Octavia nodes and then restart all Octavia workers to apply the change. It is both complex and error prone. Proposed change =============== The spec suggests an alternative way to configure the desired Glance image to be used for Octavia: using Glance image tagging feature. Glance allows to tag an image with any tag which is represented by a string value. With the proposed change, Octavia operator will be able to tell Octavia to use an image with the specified tag. Then Octavia will talk to Glance to determine the exact image ID that is marked with the tag, before booting a new Amphora. Alternatives ------------ Alternatively, we could make Nova talk to Glance to determine the desired image ID based on the tag provided by Octavia. This approach is not supported by Nova community because they don't want to impose the complexity into their code base. Another alternative is to use image name instead of its ID. Nova is capable of fetching the right image from Glance by name as long as the name is unique. This is not optimal in case when the operator does not want to remove the old Amphora image right after a new image is uploaded (for example, if the operator wants to test the new image before cleaning up the old one). Data model impact ----------------- None. REST API impact --------------- None. Security impact --------------- Image tags should be managed by the same user that owns the images themselves. Notifications impact -------------------- None. Other end user impact --------------------- The proposed change should not break existing mechanism. To achieve that, the new mechanism will be guarded with a new configuration option that will store the desired Glance tag. Performance Impact ------------------ If the feature is used, Octavia will need to reach to Glance before booting a new Amphora. The performance impact is well isolated and is not expected to be significant. Other deployer impact --------------------- The change couples Octavia with Glance. It should not be an issue since there are no use cases to use Octavia without Glance installed. The new feature deprecates amp_image_id option. Operators that still use the old image referencing mechanism will be advised to switch to the new option. Eventually, the old mechanism will be removed from the tree. Developer impact ---------------- None. Implementation ============== Assignee(s) ----------- Primary assignee: ihrachys (Ihar Hrachyshka) Work Items ---------- * introduce glanceclient integration into nova compute driver * introduce new configuration option to store the glance tag * introduce devstack plugin support to configure the feature * provide documentation for the new feature Dependencies ============ None. Testing ======= Unit tests will be written to cover the feature. Octavia plugin will be switched to using the new glance image referencing mechanism. Tempest tests will be implemented to test the new feature. Documentation Impact ==================== New feature should be documented in operator visible guides. References ========== ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/specs/version0.9/0000775000175000017500000000000000000000000016223 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.9/active-active-distributor.rst0000664000175000017500000011062500000000000024056 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================================= Distributor for Active-Active, N+1 Amphorae Setup ================================================= .. attention:: Please review the active-active topology blueprint first ( :doc:`active-active-topology` ) https://blueprints.launchpad.net/octavia/+spec/active-active-topology Problem description =================== This blueprint describes how Octavia implements a *Distributor* to support the *active-active* loadbalancer (LB) solution, as described in the blueprint linked above. It presents the high-level Distributor design and suggests high-level code changes to the current code base to realize this design. In a nutshell, in an *active-active* topology, an *Amphora Cluster* of two or more active Amphorae collectively provide the loadbalancing service. It is designed as a 2-step loadbalancing process; first, a lightweight *distribution* of VIP traffic over an Amphora Cluster; then, full-featured loadbalancing of traffic over the back-end members. Since a single loadbalancing service, which is addressable by a single VIP address, is served by several Amphorae at the same time, there is a need to distribute incoming requests among these Amphorae -- that is the role of the *Distributor*. This blueprint uses terminology defined in the Octavia glossary when available, and defines new terms to describe new components and features as necessary. .. _P2: **Note:** Items marked with [`P2`_] refer to lower priority features to be designed / implemented only after initial release. Proposed change =============== * Octavia shall implement a Distributor to support the active-active topology. * The operator should be able to select and configure the Distributor (e.g., through an Octavia configuration file or [`P2`_] through a flavor framework). * Octavia shall support a pluggable design for the Distributor, allowing different implementations. In particular, the Distributor shall be abstracted through a *driver*, similarly to the current support of Amphora implementations. * Octavia shall support different provisioning types for the Distributor; including VM-based (the default, similar to current Amphorae), [`P2`_] container-based, and [`P2`_] external (vendor-specific) hardware. * The operator shall be able to configure the distribution policies, including affinity and availability (see below for details). Architecture ------------ High-level Topology Description ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * The following diagram illustrates the Distributor's role in an active-active topology: :: Front-End Back-End Internet Networks Networks (world) (tenants) (tenants) ║ A B C A B C ┌──╨───┐floating IP ║ ║ ║ ┌────────┬──────────┬────┐ ║ ║ ║ │ ├─ to VIP ──►╢◄──────║───────║──┤f.e. IPs│ Amphorae │b.e.├►╜ ║ ║ │ │ LB A ║ ║ ║ └──┬─────┤ of │ IPs│ ║ ║ │ │ ║ ║ ║ │VIP A│ Tenant A ├────┘ ║ ║ │ GW │ ║ ║ ║ └─────┴──────────┘ ║ ║ │Router│floating IP ║ ║ ║ ┌────────┬──────────┬────┐ ║ ║ │ ├─ to VIP ───║──────►╟◄──────║──┤f.e. IPs│ Amphorae │b.e.├──►╜ ║ │ │ LB B ║ ║ ║ └──┬─────┤ of │ IPs│ ║ │ │ ║ ║ ║ │VIP B│ Tenant B ├────┘ ║ │ │ ║ ║ ║ └─────┴──────────┘ ║ │ │floating IP ║ ║ ║ ┌────────┬──────────┬────┐ ║ │ ├─ to VIP ───║───────║──────►╢◄─┤f.e. IPs│ Amphorae │b.e.├────►╜ └──────┘ LB C ║ ║ ║ └──┬─────┤ of │ IPs│ ║ ║ ║ │VIP C│ Tenant C ├────┘ arp─►╢ arp─►╢ arp─►╢ └─────┴──────────┘ ┌─┴─┐ ║┌─┴─┐ ║┌─┴─┐ ║ │VIP│┌►╜│VIP│┌►╜│VIP│┌►╜ ├───┴┴┐ ├───┴┴┐ ├───┴┴┐ │IP A │ │IP B │ │IP C │ ┌┴─────┴─┴─────┴─┴─────┴┐ │ │ │ Distributor │ │ (multi-tenant) │ └───────────────────────┘ * In the above diagram, several tenants (A, B, C, ...) share the Distributor, yet the Amphorae, and the front- and back-end (tenant) networks are not shared between tenants. (See also "Distributor Sharing" below.) Note that in the initial code implementing the distributor, the distributor will not be shared between tenants, until tests verifying the security of a shared distributor can be implemented. * The Distributor acts as a (one-legged) router, listening on each load balancer's VIP and forwarding to one of its Amphorae. * Each load balancer's VIP is advertised and answered by the Distributor. An ``arp`` request for any of the VIP addresses is answered by the Distributor, hence any traffic sent for each VIP is received by the Distributor (and forwarded to an appropriate Amphora). * ARP is disabled on all the Amphorae for the VIP interface. * The Distributor distributes the traffic of each VIP to an Amphora in the corresponding load balancer Cluster. * An example of high-level data flow: 1. Internet clients access a tenant service through an externally visible floating-IP (IPv4 or IPv6). 2. The GW router maps the floating IP into a loadbalancer's internal VIP on the tenant's front-end network. 3. (1st packet to VIP only) the GW send an ``arp`` request on VIP (tenant front-end) network. The Distributor answers the ``arp`` request with its own MAC address on this network (all the Amphorae on the network can serve the VIP, but do not answer the ``arp``). 4. The GW router forwards the client request to the Distributor. 5. The Distributor forwards the packet to one of the Amphorae on the tenant's front-end network (distributed according to some policy, as described below), without changing the destination IP (i.e., still using the VIP). 6. The Amphora accepts the packet and continues the flow on the tenant's back-end network as for other Octavia loadbalancer topologies (non active-active). 7. The outgoing response packets from the Amphora are forwarded directly to the GW router (that is, it does not pass through the Distributor). Affinity of Flows to Amphorae ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Affinity is required to make sure related packets are forwarded to the same Amphora. At minimum, since TCP connections are terminated at the Amphora, all packets that belong to the same flow must be sent to the same Amphora. Enhanced affinity levels can be used to make sure that flows with similar attributes are always sent to the same Amphora; this may be desired to achieve better performance (see discussion below). - [`P2`_] The Distributor shall support different modes of client-to-Amphora affinity. The operator should be able to select and configure the desired affinity level. - Since the Distributor is L3 and the "heavy lifting" is expected to be done by the Amphorae, this specification proposes implementing two practical affinity alternatives. Other affinity alternatives may be implemented at a later time. *Source IP and source port* In this mode, the Distributor must always send packets from the same combination of Source IP and Source port to the same Amphora. Since the Target IP and Target Port are fixed per Listener, this mode implies that all packets from the same TCP flow are sent to the same Amphora. This is the minimal affinity mode, as without it TCP connections will break. *Note*: related flows (e.g., parallel client calls from the same HTML page) will typically be distributed to different Amphorae; however, these should still be routed to the same back-end. This could be guaranteed by using cookies and/or by synchronizing the stick-tables. Also, the Amphorae in the Cluster could be configured to use the same hashing parameters (avoid any random seed) to ensure all make similar decisions. *Source IP* (default) In this mode, the Distributor must always send packets from the same source IP to the same Amphora, regardless of port. This mode allows TLS session reuse (e.g., through session ids), where an abbreviated handshake can be used to improve latency and computation time. The main disadvantage of sending all traffic from the same source IP to the same Amphora is that it might lead to poor load distribution for large workloads that have the same source IP (e.g., workload behind a single nat or proxy). **Note on TLS implications**: In some (typical) TLS sessions, the additional load incurred for each new session is significantly larger than the load incurred for each new request or connection on the same session; namely, the total load on each Amphora will be more affected by the number of different source IPs it serves than by the number of connections. Moreover, since the total load on the Cluster incurred by all the connections depends on the level of session reuse, spreading a single source IP over multiple Amphorae *increases* the overall load on the Cluster. Thus, a Distributor that uniformly spreads traffic without affinity per source IP (e.g., uses per-flow affinity only) might cause an increase in overall load on the Cluster that is proportional to the number of Amphorae. For example, in a scale-out scenario (where a new Amphora is spawned to share the total load), moving some flows to the new Amphora might increase the overall Cluster load, negating the benefit of scaling-out. Session reuse helps with the certificate exchange phase. Improvements in performance with the certificate exchange depend on the type of keys used, and is greatest with RSA. Session reuse may be less important with other schemes; shared TLS session tickets are another mechanism that may circumvent the problem; also, upcoming versions of HA-Proxy may be able to obviate this problem by synchronizing TLS state between Amphorae (similar to stick-table protocol). - Per the agreement at the Mitaka mid-cycle, the default affinity shall be based on source-IP only and a consistent hashing function (see below) shall be used to distribute flows in a predictable manner; however, abstraction will be used to allow other implementations at a later time. Forwarding with OVS and OpenFlow Rules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * The reference implementation of the Distributor shall use OVS for forwarding and configure the Distributor through OpenFlow rules. - OpenFlow rules can be implemented by a software switch (e.g., OVS) that can run on a VM. Thus, can be created and managed by Octavia similarly to creation and management of Amphora VMs. - OpenFlow rules are supported by several HW switches, so the same control plane can be used for both SW and HW implementations. * Outline of Rules - A ``group`` with the ``select`` method is used to distribute IP traffic over multiple Amphorae. There is one ``bucket`` per Amphora -- adding an Amphora adds a new ``bucket`` and deleting and Amphora removes the corresponding ``bucket``. - The ``select`` method supports (OpenFlow v1.5) hashed-based selection of the ``bucket``. The hash can be set up to use different fields, including by source IP only (default) and by source IP and source port. - All buckets route traffic back on the in-port (i.e., no forwarding between ports). This ensures that the same front-end network is used (i.e., the Distributor does not route between front-end networks; therefore, does not mix traffic of different tenants). - The ``bucket`` actions do a re-write of the outgoing packets. It supports re-write of the destination MAC to that of the specific Amphora and re-write of the source MAC to that of the Distributor interface (together these MAC re-writes provide L3 routing functionality). *Note:* alternative re-write rules can be used to support other forwarding mechanisms. - OpenFlow rules are also used to answer ``arp`` requests on the VIP. ``arp`` requests for each VIP are captured, re-written as ``arp`` replies with the MAC address of the particular front-end interface and sent back on the in-port. Again, there is no routing between interfaces. * Handling Amphora failure - Initial implementation will assume a fixed size for each cluster (no elasticity). The hashing will be "consistent" by virtue of never changing the number of ``buckets``. If the cluster size is changed on the fly (there should not be an API to do so) then there are no guarantees on shuffling. - If an Amphora fails then remapping cannot be avoided -- all flows of the failed Amphora must be remapped to a different one. Rather than mapping these flows to other active Amphorae in the cluster, the reference implementation will map all flows to the cluster's *standby* Amphora (i.e. the "+1" Amphora in this "N+1" cluster). This ensures that the cluster size does not change. The only change in the OpenFlow rules would be to replace the MAC of the failed Amphora with that of the standby Amphora. - This implementation is very similar to Active-Standby fail-over. There will be a standby Amphora that can serve traffic in case of failure. The differences from Active-Standby is that a single Amphora acts as a standby for multiple ones; fail-over re-routing is handled through the Distributor (rather than by VRRP); and a whole cluster of Amphorae is active concurrently, to enable support of large workloads. - Health Manager will trigger re-creation of a failed Amphora. Once the Amphora is ready it becomes the new *standby* (no changes to OpenFlow rules). - [`P2`_] Handle concurrent failure of more than a single Amphora * Handling Distributor failover - To handle the event of a Distributor failover caused by a catastrophic failure of a Distributor, and in order to preserve the client to Amphora affinity when the Distributor is replaced, the Amphora registration process with the Distributor should preserve positional information. This should ensure that when a new Distributor is created, Amphorae will be assigned to the same buckets to which they were previously assigned. - In the reference implementation, we propose making the Distributor API return the complete list of Amphorae MAC addresses with positional information each time an Amphora is registered or unregistered. Specific proposed changes ------------------------- **Note:** These are changes on top of the changes described in the "Active-Active, N+1 Amphorae Setup" blueprint, (see https://blueprints.launchpad.net/octavia/+spec/active-active-topology) * Create flow for the creation of an Amphora cluster with N active Amphora and one extra standby Amphora. Set-up the Amphora roles accordingly. * Support the creation, connection, and configuration of the various networks and interfaces as described in `high-level topology` diagram. The Distributor shall have a separate interface for each loadbalancer and shall not allow any routing between different ports. In particular, when a loadbalancer is created the Distributor should: - Attach the Distributor to the loadbalancer's front-end network by adding a VIP port to the Distributor (the LB VIP Neutron port). - Configure OpenFlow rules: create a group with the desired cluster size and with the given Amphora MACs; create rules to answer ``arp`` requests for the VIP address. **Notes:** [`P2`_] It is desirable that the Distributor be considered as a router by Neutron (to handle port security, network forwarding without ``arp`` spoofing, etc.). This may require changes to Neutron and may also mean that Octavia will be a privileged user of Neutron. Distributor needs to support IPv6 NDP [`P2`_] If the Distributor is implemented as a container then hot-plugging a port for each VIP might not be possible. If DVR is used then routing rules must be used to forward external traffic to the Distributor rather than rely on ``arp``. In particular, DVR messes-up ``noarp`` settings. * Support Amphora failure recovery - Modify the HM and failure recovery flows to add tasks to notify the ACM when ACTIVE-ACTIVE topology is in use. If an active Amphora fails then it needs to be decommissioned on the Distributor and replaced with the standby. - Failed Amphorae should be recreated as a standby (in the new IN_CLUSTER_STANDBY role). The standby Amphora should also be monitored and recovered on failure. * Distributor driver and Distributor image - The Distributor should be supported similarly to an amphora; namely, have its own abstract driver. - Distributor image (for reference implementation) should include OVS with a recent version (>1.5) that supports hash-based bucket selection. As is done for Amphorae, Distributor image should be installed with public keys to allow secure configuration by the Octavia controller. - Reference implementation shall spawn a new Distributor VM as needed. It shall monitor its health and handle recovery using heartbeats sent to the health monitor in a similar fashion to how this is done presently with Amphorae. [`P2`_] Spawn a new Distributor if the number VIPs exceeds a given limit (to limit the number of Neutron ports attached to one Distributor). [`P2`_] Add configuration options and/or Operator API to allow operator to request a dedicated Distributor for a VIP (or per tenant). * Define a REST API for Distributor configuration (no SSH API). See below for details. * Create data-model for Distributor. Alternatives ------------ TBD Data model impact ----------------- Add table ``distributor`` with the following columns: * id ``(sa.String(36) , nullable=False)`` ID of Distributor instance. * compute_id ``(sa.String(36), nullable=True)`` ID of compute node running the Distributor. * lb_network_ip ``(sa.String(64), nullable=True)`` IP of Distributor on management network. * status ``(sa.String(36), nullable=True)`` Provisioning status * vip_port_ids (list of ``sa.String(36)``) List of Neutron port IDs. New VIFs may be plugged into the Distributor when a new LB is created. We may need to store the Neutron port IDs in order to support fail-over from one Distributor instance to another. Add table ``distributor_health`` with the following columns: * distributor_id ``(sa.String(36) , nullable=False)`` ID of Distributor instance. * last_update ``(sa.DateTime, nullable=False)`` Last time distributor heartbeat was received by a health monitor. * busy ``(sa.Boolean, nullable=False)`` Field indicating a create / delete or other action is being conducted on the distributor instance (ie. to prevent a race condition when multiple health managers are in use). Add table ``amphora_registration`` with the following columns. This describes which Amphorae are registered with which Distributors and in which order: * lb_id ``(sa.String(36) , nullable=False)`` ID of load balancer. * distributor_id ``(sa.String(36) , nullable=False)`` ID of Distributor instance. * amphora_id ``(sa.String(36) , nullable=False)`` ID of Amphora instance. * position ``(sa.Integer, nullable=True)`` Order in which Amphorae are registered with the Distributor. REST API impact --------------- Distributor will be running its own rest API server. This API will be secured using two-way SSL authentication, and use certificate rotation in the same way this is done with Amphorae today. Following API calls will be addressed. 1. Post VIP Plug Adding a VIP network interface to the Distributor involves tasks which run outside the Distributor itself. Once these are complete, the Distributor must be configured to use the new interface. This is a REST call, similar to what is currently done for Amphorae when connecting to a new member network. `lb_id` An identifier for the particular loadbalancer/VIP. Used for subsequent register/unregister of Amphorae. `vip_address` The IP of the VIP (for which IP to answer ``arp`` requests) `subnet_cidr` Netmask for the VIP's subnet. `gateway` Gateway outbound packets from the VIP ip address should use. `mac_address` MAC address of the new interface corresponding to the VIP. `vrrp_ip` In the case of HA Distributor, this contains the IP address that will be used in setting up the allowed address pairs relationship. (See Amphora VIP plugging under the ACTIVE-STANDBY topology for an example of how this is used.) `host_routes` List of routes that should be added when the VIP is plugged. `alg_extras` Extra arguments related to the algorithm that will be used to distribute requests to Amphorae part of this load balancer configuration. This consists of an algorithm name and affinity type. In the initial release of ACTIVE-ACTIVE, the only valid algorithm will be *hash*, and the affinity type may be ``Source_IP`` or [`P2`_] ``Source_IP_AND_port``. 2. Pre VIP unplug Removing a VIP network interface will involve several tasks on the Distributor to gracefully roll-back OVS configuration and other details that were set-up when the VIP was plugged in. `lb_id` ID of the VIP's loadbalancer that will be unplugged. 3. Register Amphorae This adds Amphorae to the configuration for a given load balancer. The Distributor should respond with a new list of all Amphorae registered with the Distributor with positional information. `lb_id` ID of the loadbalancer with which the Amphora will be registered `amphorae` List of Amphorae MAC addresses and (optional) position argument in which they should be registered. 4. Unregister Amphorae This removes Amphorae from the configuration for a given load balancer. The Distributor should respond with a new list of all Amphorae registered with the Distributor with positional information. `lb_id` ID of the loadbalancer with which the Amphora will be registered `amphorae` List of Amphorae MAC addresses that should be unregistered with the Distributor. Security impact --------------- The Distributor is designed to be multi-tenant by default. (Note that the first reference implementation will not be multi-tenant until tests can be developed to verify the security of a multi-tenant reference distributor.) Although each tenant has its own front-end network, the Distributor is connected to all, which might allow leaks between these networks. The rationale is two fold: First, the Distributor should be considered as a trusted infrastructure component. Second, all traffic is external traffic before it reaches the Amphora. Note that the GW router has exactly the same attributes; in other words, logically, we can consider the Distributor to be an extension to the GW (or even use the GW HW to implement the Distributor). This approach might not be considered secure enough for some cases, such as, if LBaaS is used for internal tier-to-tier communication inside a tenant network. Some tenants may want their loadbalancer's VIP to remain private and their front-end network to be isolated. In these cases, in order to accomplish active-active for this tenant we would need separate dedicated Distributor instance(s). Notifications impact -------------------- Other end user impact --------------------- Performance Impact ------------------ Other deployer impact --------------------- Developer impact ---------------- Further Discussion ------------------ .. Note:: This section captures some background, ideas, concerns, and remarks that were raised by various people. Some of the items here can be considered for future/alternative design and some will hopefully make their way into, yet to be written, related blueprints (e.g., auto-scaled topology). [`P2`_] Handling changes in Cluster size (manual or auto-scaled) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - The Distributor shall support different mechanism for preserving affinity of flows to Amphorae following a *change in the size* of the Amphorae Cluster. - The goal is to minimize shuffling of client-to-Amphora mapping during cluster size changes: * When an Amphora is removed from the Cluster (e.g., due to failure or scale-down action), all its flows are broken; however, flows to other Amphorae should not be affected. Also, if a drain method is used to empty the Amphora of client flows (in the case of a graceful removal), this should prevent disruption. * When an Amphora is *added* to the Cluster (e.g., recovery of a failed Amphora), some new flows should be distributed to the new Amphora; however, most flows should still go to the same Amphora they were distributed to before the new Amphora was added. For example, if the affinity of flows to Amphorae is per Source IP and a new Amphora was just added then the Distributor should forward packets from this IP only one of only two Amphorae: either the same Amphora as before or the Amphora that was added. Using a simple hash to maintain affinity does not meet this goal. For example, suppose we maintain affinity (for a fixed cluster size) using a hash (for randomizing key distribution) as `chosen_amphora_id = hash(sourceIP # port) mod number_of_amphorae`. When a new Amphora is added or remove the number of Amphorae changes; thus, a different Amphora will be chosen for most flows. - Below are the couple of ways to tackle this shuffling problem. *Consistent Hashing* Consistent hashing is a hashing mechanism (regardless if key is based on IP or IP/port) that preserves most hash mappings during changes in the size of the Amphorae Cluster. In particular, for a cluster with N Amphorae that grows to N+1 Amphorae, a consistent hashing function ensures that, with high probability, only 1/N of inputs flows will be re-hashed (more precisely, K/N keys will be rehashed). Note that, even with consistent hashing, some flows will be remapped and there is only a statistical bound on the number of remapped flows. The "classic" consistent hashing algorithm maps both server IDs and keys to hash values and selects for each key the server with the closest hash value to the key hash value. Lookup generally requires O(log N) to search for the "closest" server. Achieving good distribution requires multiple hashes per server (~10s) -- although these can be pre-computed there is an ~10s*N memory footprint. Other algorithms (e.g., MSFT's Magleb) have better performance, but provide weaker guarantees. There are several consistent hashing libraries available. None are supported in OVS. * Ketama https://github.com/RJ/ketama * Openstack swift https://docs.openstack.org/swift/latest/ring.html#ring * Amazon dynamo http://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf We should also strongly consider making any consistent hashing algorithm we develop available to all OpenStack components by making it part of an Oslo library. *Rendezvous hashing* This method provides similar properties to Consistent Hashing (i.e., a hashing function that remaps only 1/N of keys when a cluster with N Amphorae grows to N+1 Amphorae. For each server ID, the algorithm concatenates the key and server ID and computes a hash. The server with the largest hash is chosen. This approach requires O(N) for each lookup, but is much simpler to implement and has virtually no memory footprint. Through search-tree encoding of the server IDs it is possible to achieve O(log N) lookup, but implementation is harder and distribution is not as good. Another feature is that more than one server can be chosen (e.g., two largest values) to handle larger loads -- not directly useful for the Distributor use case. *Hybrid, Permutation-based approach* This is an alternative implementation of consistent hashing that may be simpler to implement. Keys are hashed to a set of buckets; each bucket is pre-mapped to a random permutation of the server IDs. Lookup is by computing a hash of the key to obtain a bucket and then going over the permutation selecting the first server. If a server is marked as "down" the next server in the list is chosen. This approach is similar to Rendezvous hashing if each key is directly pre-mapped to a random permutation (and like it allows more than one server selection). If the number of failed servers is small then lookup is about O(1); memory is O(N * #buckets), where the granularity of distribution is improved by increasing the number of buckets. The permutation-based approach is useful to support clusters of fixed size that need to handle a few nodes going down and then coming back up. If there is an assumption on the number of failures then memory can be reduced to O( max_failures * #buckets). This approach seems to suit the Distributor Active-Active use-case for non-elastic workloads. - Flow tracking is required, even with the above hash functions, to handle the (relatively few) remapped flows. If an existing flow is remapped, its TCP connection would break. This is acceptable when an Amphora goes down and it flows are mapped to a new one. On the other hand, it may be unacceptable when an Amphora is added to the cluster and 1/N of existing flows are remapped. The Distributor may support different modes, as follows. *None / Stateless* In this mode, the Distributor applies its most recent forwarding rules, regardless of previous state. Some existing flows might be remapped to a different Amphora and would be broken. The client would have to recover and establish a connection with the new Amphora (it would still be mapped to the same back-end, if possible). Combined with consistent (or similar) hashing, this may be good enough for many web applications that are built for failure anyway, and can restore their state upon reconnect. *Full flow Tracking* In this mode, the Distributor tracks existing flows to provide full affinity, i.e., only new flows can be remapped to different Amphorae. The Linux connection tracking may be used (e.g., through IPTables or through OpenFlow); however, this might not scale well. Alternatively, the Distributor can use an independent mechanism similar to HA-Proxy sticky-tables to track the flows. Note that the Distributor only needs to track the mapping per source IP and source port (unlike Linux connection tracking which follows the TCP state and related connections). *Use Ryu* Ryu is a well supported and tested python binding for issuing OpenFlow commands. Especially since Neutron recently moved to using this for many of the things it does, using this in the Distributor might make sense for Octavia as well. Forwarding Data-path Implementation Alternatives ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The current design uses L2 forwarding based only on L3 parameters and uses Direct Return routing (one-legged). The rational behind this approach is to keep the Distributor as light as possible and have the Amphorae do the bulk of the work. This allows one (or a few) Distributor instance(s) to serve all traffic even for very large workloads. Other approaches are possible. 2-legged Router _______________ - Distributor acts as router, being in-path on both directions. - New network between Distributor and Amphorae -- Only Distributor on VIP subnet. - No need to use MAC forwarding -- use routing rules LVS ___ Use LVS for Distributor. DNS ___ Use DNS for the Distributor. - Use DNS to map to particular Amphorae. Distribution will be of domain name rather than VIP. - No problem with per-flow affinity, as client will use same IP for entire TCP connection. - Need a different public IP for each Amphora (no VIP) Pure SDN ________ - Implement the OpenFlow rules directly in the network, without a Distributor instance. - If the network infrastructure supports this then the Distributor can become more robust and very lightweight, making it practical to have a dedicated Distributor per VIP (only the rules will be dedicated as the network and SDN controller are shared resources) Distributor Sharing ^^^^^^^^^^^^^^^^^^^ - The initial implementation of the Distributor will not be shared between tenants until tests can be written to verify the security of this solution. - The implementation should support different Distributor sharing and cardinality configurations. This includes single-shared Distributor, multiple-dedicated Distributors, and multiple-shared Distributors. In particular, an abstraction layer should be used and the data-model should include an association between the load balancer and Distributor. - A shared Distributor uses the least amount of resources, but may not meet isolation requirements (performance and/or security) or might become a bottleneck. Distributor High-Availability ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - The Distributor should be highly-available (as this is one of the motivations for the active-active topology). Once the initial active-active functionality is delivered, developing a highly available distributor should take a high priority. - A mechanism similar to the VRRP used on ACTIVE-STANDBY topology Amphorae can be used. - Since the Distributor is stateless (for fixed cluster sizes and if no connection tracking is used) it is possible to set up an active-active configuration and advertise more than one Distributor (e.g, for ECMP). - As a first step, the initial implementation will use a single Distributor instance (i.e., will not be highly-available). Health Manager will monitor the Distributor health and initiate recovery if needed. - The implementation should support plugging-in a hardware-based implementation of the Distributor that may have its own high-availability support. - In order to preserve client to Amphora affinity in the case of a failover, a VRRP-like HA Distributor has several options. We could potentially push Amphora registrations to the standby Distributor with the position arguments specified, in order to guarantee the active and standby Distributor always have the same configuration. Or, we could invent and utilize a synchronization protocol between the active and standby Distributors. This will be explored and decided when an HA Distributor specification is written and approved. Implementation ============== Assignee(s) ----------- Work Items ---------- Dependencies ============ Testing ======= * Unit tests with tox. * Function tests with tox. Documentation Impact ==================== References ========== https://blueprints.launchpad.net/octavia/+spec/base-image https://blueprints.launchpad.net/octavia/+spec/controller-worker https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface https://blueprints.launchpad.net/octavia/+spec/controller https://blueprints.launchpad.net/octavia/+spec/operator-api :doc:`../../api/haproxy-amphora-api` https://blueprints.launchpad.net/octavia/+spec/active-active-topology ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.9/active-active-topology.rst0000664000175000017500000007165300000000000023367 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================= Active-Active, N+1 Amphorae Setup ================================= https://blueprints.launchpad.net/octavia/+spec/active-active-topology Problem description =================== This blueprint describes how Octavia implements an *active-active* loadbalancer (LB) solution that is highly-available through redundant Amphorae. It presents the high-level service topology and suggests high-level code changes to the current code base to realize this scenario. In a nutshell, an *Amphora Cluster* of two or more active Amphorae collectively provide the loadbalancing service. The Amphora Cluster shall be managed by an *Amphora Cluster Manager* (ACM). The ACM shall provide an abstraction that allows different types of active-active features (e.g., failure recovery, elasticity, etc.). The initial implementation shall not rely on external services, but the abstraction shall allow for interaction with external ACMs (to be developed later). This blueprint uses terminology defined in Octavia glossary when available, and defines new terms to describe new components and features as necessary. .. _P2: **Note:** Items marked with [`P2`_] refer to lower priority features to be designed / implemented only after initial release. Proposed change =============== A tenant should be able to start a highly-available, loadbalancer for the tenant's backend services as follows: * The operator should be able to configure an active-active topology through an Octavia configuration file or [`P2`_] through a Neutron flavor, which the loadbalancer shall support. Octavia shall support active-active topologies in addition to the topologies that it currently supports. * In an active-active topology, a cluster of two or more amphorae shall host a replicated configuration of the load-balancing services. Octavia will manage this *Amphora Cluster* as a highly-available service using a pool of active resources. * The Amphora Cluster shall provide the load-balancing services and support the configurations that are supported by a single Amphora topology, including L7 load-balancing, SSL termination, etc. * The active-active topology shall support various Amphora types and implementations; including, virtual machines, [`P2`_] containers, and bare-metal servers. * The operator should be able to configure the high-availability requirements for the active-active load-balancing services. The operator shall be able to specify the number of healthy Amphorae that must exist in the load-balancing Amphora Cluster. If the number of healthy Amphorae drops under the desired number, Octavia shall automatically and seamlessly create and configure a new Amphora and add it to the Amphora Cluster. [`P2`_] The operator should be further able to define that the Amphora Cluster shall be allocated on separate physical resources. * An Amphora Cluster will collectively act to serve as a single logical loadbalancer as defined in the Octavia glossary. Octavia will seamlessly distribute incoming external traffic among the Amphorae in the Amphora Cluster. To that end, Octavia will employ a *Distributor* component that will forward external traffic towards the managed amphora instances. Conceptually, the Distributor provides an extra level of load-balancing for an active-active Octavia application, albeit a simplified one. Octavia should be able to support several Distributor implementations (e.g., software-based and hardware-based) and different affinity models (at minimum, flow-affinity should be supported to allow TCP connectivity between clients and Amphorae). * The detailed design of the Distributor component will be described in a separate document (see "Distributor for Active-Active, N+1 Amphorae Setup", active-active-distributor.rst). High-level Topology Description ------------------------------- Single Tenant ~~~~~~~~~~~~~ * The following diagram illustrates the active-active topology: :: Front-End Back-End Internet Network Network (world) (tenant) (tenant) ║ ║ ║ ┌─╨────┐ floating IP ║ ║ ┌────────┐ │Router│ to LB VIP ║ ┌────┬─────────┬────┐ ║ │ Tenant │ │ GW ├──────────────►╫◄─┤ IP │ Amphora │ IP ├─►╫◄─┤Service │ └──────┘ ║ └┬───┤ (1) │back│ ║ │ (1) │ ║ │VIP├─┬──────┬┴────┘ ║ └────────┘ ║ └───┘ │ MGMT │ ║ ┌────────┐ ╓◄───────────────────║─────────┤ IP │ ║ │ Tenant │ ║ ┌─────────┬────┐ ║ └──────┘ ╟◄─┤Service │ ║ │ Distri- │ IP├►╢ ║ │ (2) │ ║ │ butor ├───┬┘ ║ ┌────┬─────────┬────┐ ║ └────────┘ ║ └─┬──────┬┤VIP│ ╟◄─┤ IP │ Amphora │ IP ├─►╢ ┌────────┐ ║ │ MGMT │└─┬─┘ ║ └┬───┤ (2) │back│ ║ │ Tenant │ ╟◄────┤ IP │ └arp►╢ │VIP├─┬──────┬┴────┘ ╟◄─┤Service │ ║ └──────┘ ║ └───┘ │ MGMT │ ║ │ (3) │ ╟◄───────────────────║─────────┤ IP │ ║ └────────┘ ║ ┌───────────────┐ ║ └──────┘ ║ ║ │ Octavia LBaaS │ ║ ••• ║ • ╟◄─┤ Controller │ ║ ┌────┬─────────┬────┐ ║ • ║ └┬─────────────┬┘ ╙◄─┤ IP │ Amphora │ IP ├─►╢ ║ │ Amphora │ └┬───┤ (k) │back│ ║ ┌────────┐ ║ │ Cluster Mgr.│ │VIP├─┬──────┬┴────┘ ║ │ Tenant │ ║ └─────────────┘ └───┘ │ MGMT │ ╙◄─┤Service │ ╟◄─────────────────────────────┤ IP │ │ (m) │ ║ └──────┘ └────────┘ ║ Management Amphora Cluster Back-end Pool Network 1..k 1..m * An example of high-level data-flow: 1. Internet clients access a tenant service through an externally visible floating-IP (IPv4 or IPv6). 2. If IPv4, a gateway router maps the floating IP into a loadbalancer's internal VIP on the tenant's front-end network. 3. The (multi-tenant) Distributor receives incoming requests to the loadbalancer's VIP. It acts as a one-legged direct return LB, answering ``arp`` requests for the loadbalancer's VIP (see Distributor spec.). 4. The Distributor distributes incoming connections over the tenant's Amphora Cluster, by forwarding each new connection opened with a loadbalancer's VIP to a front-end MAC address of an Amphora in the Amphora Cluster (layer-2 forwarding). *Note*: the Distributor may implement other forwarding schemes to support more complex routing mechanisms, such as DVR (see Distributor spec.). 5. An Amphora receives the connection and accepts traffic addressed to the loadbalancer's VIP. The front-end IPs of the Amphorae are allocated on the tenant's front-end network. Each Amphora accepts VIP traffic, but does not answer ``arp`` request for the VIP address. 6. The Amphora load-balances the incoming connections to the back-end pool of tenant servers, by forwarding each external request to a member on the tenant network. The Amphora also performs SSL termination if configured. 7. Outgoing traffic traverses from the back-end pool members, through the Amphora and directly to the gateway (i.e., not through the Distributor). Multi-tenant Support ~~~~~~~~~~~~~~~~~~~~ * The following diagram illustrates the active-active topology with multiple tenants: :: Front-End Back-End Internet Networks Networks (world) (tenant) (tenant) ║ B A A ║ floating IP ║ ║ ║ ┌────────┐ ┌─╨────┐ to LB VIP A ║ ║ ┌────┬─────────┬────┐ ║ │Tenant A│ │Router├───────────────║─►╫◄─┤A IP│ Amphora │A IP├─►╫◄─┤Service │ │ GW ├──────────────►╢ ║ └┬───┤ (1) │back│ ║ │ (1) │ └──────┘ floating IP ║ ║ │VIP├─┬──────┬┴────┘ ║ └────────┘ to LB VIP B ║ ║ └───┘ │ MGMT │ ║ ┌────────┐ ╓◄───────────────────║──║─────────┤ IP │ ║ │Tenant A│ ║ ║ ║ └──────┘ ╟◄─┤Service │ M B A ┌────┬─────────┬────┐ ║ │ (2) │ ║ ║ ╟◄─┤A IP│ Amphora │A IP├─►╢ └────────┘ ║ ║ ║ └┬───┤ (2) │back│ ║ ┌────────┐ ║ ║ ║ │VIP├─┬──────┬┴────┘ ║ │Tenant A│ ║ ║ ║ └───┘ │ MGMT │ ╟◄─┤Service │ ╟◄───────────────────║──║─────────┤ IP │ ║ │ (3) │ ║ ║ ║ └──────┘ ║ └────────┘ ║ B A ••• B • ║ ┌─────────┬────┐ ║ ║ ┌────┬─────────┬────┐ ║ • ║ │ │IP A├─╢─►╫◄─┤A IP│ Amphora │A IP├─►╢ ┌────────┐ ║ │ ├───┬┘ ║ ║ └┬───┤ (k) │back│ ║ │Tenant A│ ║ │ Distri- │VIP├─arp►╜ │VIP├─┬──────┬┴────┘ ╙◄─┤Service │ ║ │ butor ├───┘ ║ └───┘ │ MGMT │ │ (m) │ ╟◄─ │ │ ─────║────────────┤ IP │ └────────┘ ║ │ ├────┐ ║ └──────┘ ║ │ │IP B├►╢ tenant A ║ │ ├───┬┘ ║ = = = = = = = = = = = = = = = = = = = = = ║ │ │VIP│ ║ ┌────┬─────────┬────┐ B tenant B ║ └─┬──────┬┴─┬─┘ ╟◄────┤B IP│ Amphora │B IP├─►╢ ┌────────┐ ║ │ MGMT │ └arp►╢ └┬───┤ (1) │back│ ║ │Tenant B│ ╟◄────┤ IP │ ║ │VIP├─┬──────┬┴────┘ ╟◄─┤Service │ ║ └──────┘ ║ └───┘ │ MGMT │ ║ │ (1) │ ╟◄───────────────────║────────────┤ IP │ ║ └────────┘ ║ ┌───────────────┐ ║ └──────┘ ║ M │ Octavia LBaaS │ B ••• B • ╟◄─┤ Controller │ ║ ┌────┬─────────┬────┐ ║ • ║ └┬─────────────┬┘ ╙◄────┤B IP│ Amphora │B IP├─►╢ ║ │ Amphora │ └┬───┤ (q) │back│ ║ ┌────────┐ ║ │ Cluster Mgr.│ │VIP├─┬──────┬┴────┘ ║ │Tenant B│ ║ └─────────────┘ └───┘ │ MGMT │ ╙◄─┤Service │ ╟◄────────────────────────────────┤ IP │ │ (r) │ ║ └──────┘ └────────┘ ║ Management Amphora Clusters Back-end Pool Network A(1..k), B(1..q) A(1..m),B(1..r) * Both tenants A and B share the Distributor, but each has a different front-end network. The Distributor listens on both loadbalancers' VIPs and forwards to either A's or B's Amphorae. * The Amphorae and the back-end (tenant) networks are not shared between tenants. Problem Details --------------- * Octavia should support different Distributor implementations, similar to its support for different Amphora types. The operator should be able to configure different types of algorithms for the Distributor. All algorithms should provide flow-affinity to allow TLS termination at the amphora. See :doc:`active-active-distributor` for details. * Octavia controller shall seamlessly configure any newly created Amphora ([`P2`_] including peer state synchronization, such as sticky-tables, if needed) and shall reconfigure the other solution components (e.g., Neutron) as needed. The controller shall further manage all Amphora life-cycle events. * Since it is impractical at scale for peer state synchronization to occur between all Amphorae part of a single load balancer, Amphorae that are all part of a single load balancer configuration need to be divided into smaller peer groups (consisting of 2 or 3 Amphorae) with which they should synchronize state information. Required changes ---------------- The active-active loadbalancers require the following high-level changes: Amphora related changes ~~~~~~~~~~~~~~~~~~~~~~~ * Updated Amphora image to support active-active topology. The front-end still has both a unique IP (to allow direct addressing on front-end network) and a VIP; however, it should not answer ARP requests for the VIP address (all Amphorae in a single Amphora Cluster concurrently serve the same VIP). Amphorae should continue to have a management IP on the LB Network so Octavia can configure them. Amphorae should also generally support hot-plugging interfaces into back-end tenant networks as they do in the current implementation. [`P2`_] Finally, the Amphora configuration may need to be changed to randomize the member list, in order to prevent synchronized decisions by all Amphorae in the Amphora Cluster. * Extend data model to support active-active Amphora. This is somewhat similar to active-passive (VRRP) support. Each Amphora needs to store its IP and port on its front-end network (similar to ha_ip and ha_port_id in the current model) and its role should indicate it is in a cluster. The provisioning status should be interpreted as referring to an Amphora only and not the load-balancing service. The status of the load balancer should correspond to the number of ``ONLINE`` Amphorae in the Cluster. If all Amphoae are ``ONLINE``, the load balancer is also ``ONLINE``. If a small number of Amphorae are not ``ONLINE``, then the load balancer is ``DEGRADED``. If enough Amphorae are not ``ONLINE`` (past a threshold), then the load balancer is ``DOWN``. * Rework some of the controller worker flows to support creation and deletion of Amphorae by the ACM in an asynchronous manner. The compute node may be created/deleted independently of the corresponding Amphora flow, triggered as events by the ACM logic (e.g., node update). The flows do not need much change (beyond those implied by the changes in the data model), since the post-creation/pre-deletion configuration of each Amphora is unchanged. This is also similar to the failure recovery flow, where a recovery flow is triggered asynchronously. * Create a flow (or task) for the controller worker for (de-)registration of Amphorae with Distributor. The Distributor has to be aware of the current ``ONLINE`` Amphorae, to which it can forward traffic. [`P2`_] The Distributor can do very basic monitoring of the Amphorae health (primarily to make sure network connectivity between the Distributor and Amphorae is working). Monitoring pool member health will remain the purview of the pool health monitors. * All the Amphorae in the Amphora Cluster shall replicate the same listeners, pools, and TLS configuration, as they do now. We assume all Amphorae in the Amphora Cluster can perform exactly the same load-balancing decisions and can be treated as equivalent by the Distributor (except for affinity considerations). * Extend the Amphora (REST) API and/or *Plug VIP* task to allow disabling of ``arp`` on the VIP. * In order to prevent losing session_persistence data in the event of an Amphora failure, the Amphorae will need to be configured to share session_persistence data (via stick tables) with a subset of other Amphorae that are part of the same load balancer configuration (ie. a peer group). Amphora Cluster Manager driver for the active-active topology (*new*) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Add an active-active topology to the topology types. * Add a new driver to support creation/deletion of an Amphora Cluster via an ACM. This will re-use existing controller-worker flows as much as possible. The reference ACM will call the existing drivers to create compute nodes for the Amphorae and configure them. * The ACM shall orchestrate creation and deletion of Amphora instances to meet the availability requirements. Amphora failover will utilize the existing health monitor flows, with hooks to notify the ACM when ACTIVE-ACTIVE topology is used. [`P2`_] ACM shall handle graceful amphora removal via draining (delay actual removal until existing connections are terminated or some timeout has passed). * Change the flow of LB creation. The ACM driver shall create an Amphora Cluster instance for each new loadbalancer. It should maintain the desired number of Amphorae in the Cluster and meet the high-availability configuration given by the operator. *Note*: a base functionality is already supported by the Health Manager; it may be enough to support a fixed or dynamic cluster size. In any case, existing flows to manage Amphora life cycle will be re-used in the reference ACM driver. * The ACM shall be responsible for providing health, performance, and life-cycle management at the Cluster-level rather than at Amphora-level. Maintaining the loadbalancer status (as described above) by some function of the collective status of all Amphorae in the Cluster is one example. Other examples include tracking configuration changes, providing Cluster statistics, monitoring and maintaining compute nodes for the Cluster, etc. The ACM abstraction would also support pluggable ACM implementations that may provide more advance capabilities (e.g., elasticity, AZ aware availability, etc.). The reference ACM driver will re-use existing components and/or code which currently handle health, life-cycle, etc. management for other load balancer topologies. * New data model for an Amphora Cluster which has a one-to-one mapping with the loadbalancer. This defines the common properties of the Amphora Cluster (e.g., id, min. size, desired size, etc.) and additional properties for the specific implementation. * Add configuration file options to support configuration of an active-active Amphora Cluster. Add default configuration. [`P2`_] Add Operator API. * Add or update documentation for new components added and new or changed functionality. * Communication between the ACM and Distributors should be secured using two-way SSL certificate authentication much the same way this is accomplished between other Octavia controller components and Amphorae today. Network driver changes ~~~~~~~~~~~~~~~~~~~~~~ * Support the creation, connection, and configuration of the various networks and interfaces as described in 'high-level topology' diagram. * Adding a new loadbalancer requires attaching the Distributor to the loadbalancer's front-end network, adding a VIP port to the Distributor, and configuring the Distributor to answer ``arp`` requests for the VIP. The Distributor shall have a separate interface for each loadbalancer and shall not allow any routing between different ports; in particular, Amphorae of different tenants must not be able to communicate with each other. In the reference implementation, this will be accomplished by using separate OVS bridges per load balancer. * Adding a new Amphora requires attaching it to the front-end and back-end networks (similar to current implementation), adding the VIP (but with ``arp`` disabled), and registering the Amphora with the Distributor. The tenant's front-end and back-end networks must allow attachment of dynamically created Amphorae by involving the ACM (e.g., when the health monitor replaces a failed Amphora). ([`P2`_] extend the LBaaS API to allow specifying an address range for new Amphorae usage, e.g., a subnet pool). Amphora health-monitoring support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Modify Health Manager to manage the health for an Amphora Cluster through the ACM; namely, forward Amphora health change events to the ACM, so it can decide when the Amphora Cluster is considered to be in healthy state. This should be done in addition to managing the health of each Amphora. [`P2`_] Monitor the Amphorae also on their front-end network (i.e., from the Distributor). Distributor support ~~~~~~~~~~~~~~~~~~~ * **Note:** as mentioned above, the detailed design of the Distributor component is described in a separate document). Some design considerations are highlighted below. * The Distributor should be supported similarly to an Amphora; namely, have its own abstract driver. * For a reference implementation, add support for a Distributor image. * Define a REST API for Distributor configuration (no SSH API). The API shall support: - Add and remove a VIP (loadbalancer) and specify distribution parameters (e.g., affinity, algorithm, etc.). - Registration and de-registration of Amphorae. - Status - [`P2`_] Macro-level stats * Spawn Distributors (if using on demand Distributor compute nodes) and/or attach to existing ones as needed. Manage health and life-cycle of the Distributor(s). Create, connect, and configure Distributor networks as necessary. * Create data model for the Distributor. * Add Distributor driver and flows to (re-)configure the Distributor on creation/destruction of a new loadbalancer (add/remove loadbalancer VIP) and [`P2`_] configure the distribution algorithm for the loadbalancer's Amphora Cluster. * Add flows to Octavia to (re-)configure the Distributor on adding/removing Amphorae from the Amphora Cluster. Packaging ~~~~~~~~~ * Extend Octavia installation scripts to create an image for the Distributor. Alternatives ------------ * Use external services to manage the cluster directly. This utilizes functionality that already exists in OpenStack (e.g., like Heat and Ceilometer) rather than replicating it. This approach would also benefit from future extensions to these services. On the other hand, this adds undesirable dependencies on other projects (and their corresponding teams), complicates handling of failures, and require defensive coding around service calls. Furthermore, these services cannot handle the LB-specific control configuration. * Implement a nested Octavia Use another layer of Octavia to distribute traffic across the Amphora Cluster (i.e., the Amphorae in the Cluster are back-end members of another Octavia instance). This approach has the potential to provide greater flexibility (e.g., provide NAT and/or more complex distribution algorithms). It also potentially reuses existing code. However, we do not want the Distributor to proxy connections so HA-Proxy cannot be used. Furthermore, this approach might significantly increase the overhead of the solution. Data model impact ----------------- * loadbalancer table - `cluster_id`: associated Amphora Cluster (no changes to table, 1-1 relationship from Cluster data-model) * lb_topology table - new value: ``ACTIVE_ACTIVE`` * amphora_role table - new value: ``IN_CLUSTER`` * Distributor table (*new*): Distributor information, similar to Amphora. See :doc:`active-active-distributor` * Cluster table (*new*): an extension to loadbalancer (i.e., one-to-one mapping to load-balancer) - `id` (primary key) - `cluster_name`: identifier of Cluster instance for Amphora Cluster Manager - `desired_size`: required number of Amphorae in Cluster. Octavia will create this many active-active Amphorae in the Amphora Cluster. - `min_size`: number of ``ACTIVE`` Amphorae in Cluster must be above this number for Amphora Cluster status to be ``ACTIVE`` - `cooldown`: cooldown period between successive add/remove Amphora operations (to avoid thrashing) - `load_balancer_id`: 1:1 relationship to loadbalancer - `distributor_id`: N:1 relationship to Distributor. Support multiple Distributors - `provisioning_status` - `operating_status` - `enabled` - `cluster_type`: type of Amphora Cluster implementation REST API impact --------------- * Distributor REST API -- This is a new internal API that will be secured via two-way SSL certificate authentication. See :doc:`active-active-distributor` * Amphora REST API -- support configuration of disabling ``arp`` on VIP. * [`P2`_] LBaaS API -- support configuration of desired availability, perhaps by selecting a flavor (e.g., gold is a minimum of 4 Amphorae, platinum is a minimum of 10 Amphora). * Operator API -- - Topology to use - Cluster type - Default availability parameters for the Amphora Cluster Security impact --------------- * See :doc:`active-active-distributor` for Distributor related security impact. Notifications impact -------------------- None. Other end user impact --------------------- None. Performance Impact ------------------ ACTIVE-ACTIVE should be able to deliver significantly higher performance than SINGLE or ACTIVE-STANDBY topology. It will consume more resources to deliver this higher performance. Other deployer impact --------------------- The reference ACM becomes a new process that is part of the Octavia control components (like the controller worker, health monitor and housekeeper). If the reference implementation is used, a new Distributor image will need to be created and stored in glance much the same way the Amphora image is created and stored today. Developer impact ---------------- None. Implementation ============== Assignee(s) ----------- @TODO Work Items ---------- @TODO Dependencies ============ @TODO Testing ======= * Unit tests with tox. * Function tests with tox. * Scenario tests. Documentation Impact ==================== Need to document all new APIs and API changes, new ACTIVE-ACTIVE topology design and features, and new instructions for operators seeking to deploy Octavia with ACTIVE-ACTIVE topology. References ========== https://blueprints.launchpad.net/octavia/+spec/base-image https://blueprints.launchpad.net/octavia/+spec/controller-worker https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface https://blueprints.launchpad.net/octavia/+spec/controller https://blueprints.launchpad.net/octavia/+spec/operator-api :doc:`../../api/haproxy-amphora-api` https://blueprints.launchpad.net/octavia/+spec/active-active-topology ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version0.9/stats_api.rst0000664000175000017500000001144100000000000020745 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============================================= Add statistics gathering API for loadbalancer ============================================= https://blueprints.launchpad.net/octavia/+spec/stats-support Problem description =================== Currently, Octavia does not support the gathering of loadbalancer statistics. This causes inconsistencies between the Octavia and Neutron-LBaaS APIs. Another point is that the statistics data we get from the Octavia API for the listener only reflects the first record for the listener in the Octavia database, since we're supporting more topologies than SINGLE, this needs to be to fixed too. Proposed change =============== Add one more data 'request_errors' to indicate the number of request errors for each listener, we can get this data from the stats of haproxy 'ereq'. Add a new module 'stats' to octavia.common with a class 'StatsMixin' to do the actual statistics calculation for both listener and loadbalancer. Make the mixin class as a new base class for octavia.api.v1.controllers.listener_statistics.ListenerStatisticsController, to make sure we get correct stats from Octavia API. Add a new module 'loadbalancer_statistics' to octavia.api.v1.controllers with a class LoadbalancerStatisticsController to provide a new REST API for gathering statistics at the loadbalancer level. Use evenstream to serialize the statistics messages from the octavia to neutron-lbaas via oslo_messaging, to keep consistent with neutron-lbaas API. Alternatives ------------ Update the 'stats' method in neutron-lbaas for octavia driver, allow the neutron-lbaas to get stats from octavia through REST API request, to keep consistent with neutron-lbaas API. Data model impact ----------------- One new column for table listener_statistics will be introduced to represent request errors: +--------------------+-------------+------+-----+---------+-------+ | Field | Type | Null | Key | Default | Extra | +--------------------+-------------+------+-----+---------+-------+ | request_errors | bigint(20) | NO | | NULL | | +--------------------+-------------+------+-----+---------+-------+ REST API impact --------------- Add 'request_errors' in the response of list listener statistics: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ **Example List listener statistics: JSON response** .. code:: { "listener": { "bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0, "request_errors": 0 } } Add a new API to list loadbalancer statistics ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Lists loadbalancer statistics. +----------------+------------------------------------------------+ | Request Type | ``GET`` | +----------------+------------------------------------------------+ | Endpoint | ``URL/v1/loadbalancers/{lb_id}/stats`` | +----------------+---------+--------------------------------------+ | | Success | 200 | | Response Codes +---------+--------------------------------------+ | | Error | 401, 404, 500 | +----------------+---------+--------------------------------------+ **Example List loadbalancer statistics: JSON response** .. code:: { "loadbalancer": { "bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0, "request_errors": 0, "listeners": [{ "id": "uuid" "bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0, "request_errors": 0, }] } } Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- None Implementation ============== Assignee(s) ----------- li, chen Work Items ---------- * Extend current stats collection for listener amphora * Add module 'stats' * Add new API for gathering statistics at the loadbalancer level * Update stats to neutron database Dependencies ============ None Testing ======= Function tests with tox. Documentation Impact ==================== Changes shall be introduced to the octavia APIs: see [1] References ========== [1] https://docs.openstack.org/api-ref/load-balancer/v1/octaviaapi.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/specs/version1.0/0000775000175000017500000000000000000000000016213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version1.0/flavors.rst0000664000175000017500000003334200000000000020426 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================= Provider Flavor Framework ================================= https://blueprints.launchpad.net/octavia/+spec/octavia-lbaas-flavors A Provider Flavor framework provides a mechanism for providers to specify capabilities that are not currently handled via the octavia api. It allows the operators to enable capabilities that may possibly be unique to a particular provider or simply just not available at the moment within octavia. If it is a common feature it is highly encouraged to have the non-existing features implemented via the standard Octavia api. In addition operators can configure different flavors from a maintained list of provider capabilities. This framework enables providers to supply new features with speed to market and provides operators with an ease of use experience. Problem description =================== Flavors are used in various services for specifying service capabilities and other parameters. Having the ability to create loadbalancers with various capabilities (such as HA, throughput or ddos protection) gives users a way to better plan their LB services and get a benefit of LBaaS functions which are not a part of Octavia API. Since Octavia will become the new OpenStack LBaaS API, a new flavors API should be developed inside Octavia. As for now, Octavia does not support multi providers. The ability to define different LBaaS providers is a mandatory feature for Octavia to be Openstack LbaaS API. Therefore, this spec depends on adding multi providers support to Octavia. Service providers will be configured via Octavia configuration file. Its important to mention that adding flavors capability to Octavia is not actually dependent on the work for LBaaS API spinout, from Neutron to Octavia, to be completed. This capability can be added to Octavia but not actually used until the API spinout is complete and Octavia becomes the official OpenStack LBaaS API. This spec is based on two existing specs from neutron: `Service Flavor Framework `__ `Flavor framework - Templates and meta-data `__ However, this is a spec for the first and basic flavors support. Following capabilities are not part of this spec: * Providing parameterized metainfo templates for provider profiles. * Providing meta data for specific LBaaS object as part of its creation. Proposed change =============== The Provider Flavor framework enables the ability to create distinct provider flavor profiles of supported parameters. Operators will have the ability to query the provider driver interface for a list of supported parameters. Operators can view the said list by provider and create flavors by selecting one or many parameters from the list. The parameters that will be used to enable specific functionality will be json type in transit and at rest. This json payload is assigned to a provider and a flavor name. Users then have the option of selecting from any of the existing flavors and submitting the selected flavor upon the creation of the load balancer. The following flavor name examples can be, but not limited to dev, stage, prod or bronze, silver, gold. A provider can have many flavor names and a flavor name can be used by only one provider. Each provider/flavor pair is assigned a group of meta-parameters and forms a flavor profile. The flavor name or id is submitted when creating a load balancer. The proposal is to add LBaaS service flavoring to Octavia. This will include following aspects: * Adding new flavors API to Octavia API * Adding flavors models to Octavia * Adding flavors db tables to Octavia database * Adding DB migration for new DB objects * Ensuring backwards compatibility for loadbalancer objects which were created before flavors support. This is for both cases, when loadbalancer was created before multi providers support and when loadbalancer was created with certain provider. * Adding default entries to DB tables representing the default Octavia flavor and default Octavia provider profile. * Adding "default" flavor to devstack plugin. A sample use case of the operator flavor workflow would be the following: * The operator queries the provider capabilities * The operator create flavor profile * The flavor profile is validated with provider driver * The flavor profile is stored in octavia db * The end user creates lb with the flavor * The profile is validated against driver once again, upon every lb-create Alternatives ------------ An alternative is patchset-5 within this very same spec. While the concept is the same, the design is different. Differences with patchset-5 to note is primarily with the data schemas. With patchset-5 the metadata that is passed to the load balancer has a one to one relationship with the provider. Also key/values pairs are stored in json as opposed to in normalized tables. And a list of provider supported capabilities is not maintained. That said this alternative design is an option. Data model impact ----------------- DB table 'flavor_profile' introduced to represent the profile that is created when combining a provider with a flavor. +--------------------+--------------+------+---------+----------+ | Field | Type | Null | Key | Default | +--------------------+--------------+------+---------+----------+ | id | varchar(36) | NO | PK | generated| +--------------------+--------------+------+---------+----------+ | provider_name | varchar(255) | NO | | | +--------------------+--------------+------+---------+----------+ | metadata | varchar(4096)| NO | | | +--------------------+--------------+------+---------+----------+ .. note:: The provider_name is the name the driver is advertised as via setuptools entry points. This will be validated when the operator uploads the flavor profile and the metadata is validated. DB table 'flavor' introduced to represent flavors. +--------------------+--------------+------+-----+----------+ | Field | Type | Null | Key | Default | +--------------------+--------------+------+-----+----------+ | id | varchar(36) | NO | PK | generated| +--------------------+--------------+------+-----+----------+ | name | varchar(255) | NO | UK | | +--------------------+--------------+------+-----+----------+ | description | varchar(255) | YES | | NULL | +--------------------+--------------+------+-----+----------+ | enabled | tinyint(1) | NO | | True | +--------------------+--------------+------+-----+----------+ | flavor_profile_id | varchar(36) | NO | FK | | +--------------------+--------------+------+-----+----------+ DB table attribute 'load_balancer.flavor_id' introduced to link a flavor to a load_balancer. +--------------------+--------------+------+-----+----------+ | Field | Type | Null | Key | Default | +--------------------+--------------+------+-----+----------+ | flavor_id | varchar(36) | YES | FK1 | NULL | +--------------------+--------------+------+-----+----------+ REST API impact --------------- FLAVOR(/flavors) +-----------------+-------+---------+---------+------------+-----------------+ |Attribute |Type |Access |Default |Validation/ |Description | |Name | | |Value |Conversion | | +=================+=======+=========+=========+============+=================+ |id |string |RO, admin|generated|N/A |identity | | |(UUID) | | | | | +-----------------+-------+---------+---------+------------+-----------------+ |name |string |RO, admin|'' |string |human-readable | | | | | | |name | +-----------------+-------+---------+---------+------------+-----------------+ |description |string |RO, admin|'' |string |human-readable | | | | | | |description | +-----------------+-------+---------+---------+------------+-----------------+ |enabled |bool |RO, admin|true |bool |toggle | | | | | | | | +-----------------+-------+---------+---------+------------+-----------------+ |flavor_profile_id|string |RO, admin| |string |human-readable | | | | | | |flavor_profile_id| +-----------------+-------+---------+---------+------------+-----------------+ FLAVOR PROFILE(/flavorprofiles) +-----------------+--------+---------+---------+------------+---------------+ |Attribute |Type |Access |Default |Validation/ |Description | |Name | | |Value |Conversion | | +=================+========+=========+=========+============+===============+ |id |string |admin |generated|N/A |identity | | |(UUID) | | | | | +-----------------+--------+---------+---------+------------+---------------+ |name |string |admin |'' |string |human-readable | | | | | | |name | +-----------------+--------+---------+---------+------------+---------------+ |provider-id |string |admin |'' |string |human-readable | | | | | | |provider-id | +-----------------+--------+---------+---------+------------+---------------+ |metadata |string |admin |{} |json |flavor meta | | | | | | |parameters | +-----------------+--------+---------+---------+------------+---------------+ Security impact --------------- The policy.json will be updated to allow all users to query the flavor listing and request details about a specific flavor entry, with the exception of flavor metadata. All other REST points for create/update/delete operations will be admin only. Additionally, the CRUD operations for Provider Profiles will be restricted to administrators. Notifications impact -------------------- N/A Other end user impact --------------------- An existing LB cannot be updated with a different flavor profile. A flavor profile can only be applied upon the creation of the LB. The flavor profile will be immutable. Performance Impact ------------------ There will be a minimal overhead incurred when the logical representation is scheduled onto the actual backend. Once the backend is selected, direct communications will occur via driver calls. IPv6 impact ----------- None Other deployer impact --------------------- The deployer will need to craft flavor configurations that they wish to expose to their users. During migration the existing provider configurations will be converted into basic flavor types. Once migrated, the deployer will have the opportunity to modify the flavor definitions. Developer impact ---------------- The expected developer impact should be minimal as the framework only impacts the initial scheduling of the logical service onto a backend. The driver implementations should remain unchanged except for the addition of the metainfo call. Community impact ---------------- This proposal allows operators to offer services beyond those directly implemented, and to do so in a way that does not increase community maintenance or burden. Provider driver impact ---------------------- The provider driver should have the following abilities: * Provide an interface to describe the available supported metadata options * Provide an interface to validate the flavor metadata * Be able to accept the flavor metadata parameters * Exception handling for non-supported metadata Implementation ============== Assignee(s) ----------- * Evgeny Fedoruk (evgenyf) * Carlos Puga (cpuga) Work Items ---------- * Implement the new models * Implement the REST API Extension (including tests) * Implementation migration script for existing deployments. * Add client API support * Add policies to the Octavia RBAC system Dependencies ============ Depends on provider support and provider drivers that support the validation interface and accept the flavor profile metadata. Testing ======= Tempest Tests Tempest testing including new API and scenario tests to validate new entities. Functional Tests Functional tests will need to be created to cover the API and database changes. API Tests The new API extensions will be tested using functional tests. Documentation Impact ==================== User Documentation User documentation will need be included to describe to users how to use flavors when building their logical topology. Operator Documentation Operator documentation will need to be created to detail how to manage Flavors, Providers and their respective Profiles. Developer Documentation Provider driver implementation documentation will need to be updated to cover the new interfaces expected of provider drivers and the structure of the metadata provided to the driver. API Reference The API reference documentation will need to be updated for the new API extensions. References ========== [1] https://docs.openstack.org/api-ref/load-balancer/v2/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version1.0/health_ip_port.rst0000664000175000017500000001307600000000000021755 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================== LBaaS Alternative Monitoring IP/Port ========================================== https://blueprints.launchpad.net/octavia/+spec/lbaas-health-monitoring-port In the current state, the health monitor IP address/port pair is derived from a load balancer's pool member's address and protocol port. In some use cases it would be desirable to monitor a different IP address/port pair for the health of a load balanced pool's member than the already specified address and protocol port. Due to the current state this is not possible. Problem description =================== The use case where this would be desirable would be when the End User is making the health monitor application on the member available on a IP/port that is mutually exclusive to the IP/port of the application that is being load balanced on the member. The End User would find this advantageous when attempting to limit access to health diagnostic information by not allowing it to be served over the main ingress IP/port of their application. Beyond limiting access to any health APIs, it allows the End Users to design different methods of health monitoring, such as creating distinct daemons responsible for the health of their hosts applications. Proposed change =============== The creation of a pool member would now allow the specification of an IP address and port to monitor health. The process used to assess the health of pool members would now use this new IP address and port to diagnose the member. If a health monitor IP address or port is not specified the default behavior would be to use the IP address and port specified by the member. There would likely need to be some Horizon changes to support this feature, however by maintaining the old behavior as the default we will not create a strong dependency. Alternatives ------------ An alternative is to not allow this functionality, and force all End Users to ensure their health checks are available over the member's load balanced IP address and protocol port. As stated in the *Problem Description* this would force End Users to provide additional security around their health diagnostic information so that they do not expose it to unintended audiences. Pushing this requirement on the End User is a heavier burden and limits their configuration options of the applications they run on Openstack that are load balanced. Data model impact ----------------- The Member data model would gain two new member fields called monitor_port and monitor_address. These two member fields would store the port and IP address, respectively, that the monitor will query for the health of the load balancer's listener's pool member. It is important to have the default behavior fall back on the address and protocol port of the member as this will allow any migrations to not break existing deployments of Openstack. Any Member data models without this new feature would have the fields default to the value of null to signify that Octavia's LBaaS service should use the member's protocol port to assess health status. REST API impact --------------- There are two APIs that will need to be modified, only slightly, to facilitate this change. .. csv-table:: Octavia LBaaS APIs :header: "Method", "URI" :widths: 15, 30 "POST", "/v2.0/lbaas/pools/{pool_id}/members" "PUT", "/v2.0/lbaas/pools/{pool_id}/members/{member_id}" "GET", "/v2.0/lbaas/pools/{pool_id}/members/{member_id}" The POST and PUT calls will need two additional fields added to their JSON body data for the request and the JSON response data. The GET call will need two additional fields as well, however they would only be added to the JSON response data. The fields to be added to each is: .. csv-table:: Added Fields :header: "Attribute Name","Type", "Access", "Default Value","Validation Conversion","Description" monitor_port,int,"RW, all",null,int,health check port (optional) monitor_address,string,"RW, all",null,types.IPAddressType(),health check IP address (optional) Security impact --------------- None Notifications impact -------------------- None Other end user impact --------------------- None Performance Impact ------------------ None Other deployer impact --------------------- None Developer impact ---------------- Other plugins do not have to implement this feature as it is optional due to the default behavior. If they decide to implement this feature, they would just need to supply the protocol port in their POSTs and PUTs to the health monitor APIs. Implementation ============== Assignee(s) ----------- Primary assignee: a.amerine Other contributors: None Work Items ---------- - Alter the Member Data Model - Alter Pool Member APIs - Update API reference documentation to reflect changes - Write or Alter Unit, Functional, and Tempest Tests to verify new functionality Dependencies ============ None Testing ======= Integration tests can be written to verify functionality. Generally, it should only require an existing Openstack deployment that is running LBaaS to verify health checks. Documentation Impact ==================== The REST API impact will need to be addressed in documentation so developers moving forward know about the feature and can use it. References ========== - Octavia Roadmap Considerations: Health monitoring on alternate IPs and/or ports (https://wiki.openstack.org/wiki/Octavia/Roadmap) - RFE Port based HealthMonitor in neutron_lbaas (https://launchpad.net/bugs/1541579) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version1.0/n-lbaas-api-parity.rst0000664000175000017500000000573200000000000022346 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ======================================== Align octavia API With Neutron LBaaS API ======================================== Problem description =================== For the octavia API to truly be standalone, it needs to have capability parity with Neutron LBaaS's API. Neutron LBaaS has the luxury of piggy-backing off of Neutron's API. This gives Neutron LBaaS's API resources many capabilities for free. This document is meant to enumerate those capabilities that the octavia API does not possess at the time of this writing. Proposed change =============== Complete the tasks enumerated in the `Work Items`_ section Alternatives ------------ * Do nothing and keep the status quo Data model impact ----------------- There will be some minor data model changes to octavia in support of this change. REST API impact --------------- This change will have significant impact to the octavia API. Security impact --------------- This change will improve octavia security by adding keystone authentication. Notifications impact -------------------- No expected change. Other end user impact --------------------- Users will be able to use the new octavia API endpoint for LBaaS. Performance Impact ------------------ This change may slightly improve performance by reducing the number of software layers requests will traverse before responding to the request. Other deployer impact --------------------- Over time the neutron-lbaas package will be deprecated and deployers will only require octavia for LBaaS. Developer impact ---------------- This will simplify LBaaS development by reducing the number of databases as well as repositories that require updating for LBaaS enhancements. Implementation ============== Assignee(s) ----------- blogan diltram johnsom rm_you dougwig Work Items ---------- Implement the following API Capabilities: * Keystone Authentication * Policy Engine * Pagination * Quotas * Filtering lists by query parameter * Fields by query parameter * Add the same root API endpoints as n-lbaas * Support "provider" option in the API to select a driver to spin up a load balancer. * API Handler layer to become the same as n-lbaas driver layer and allow multiple handlers/drivers. * Neutron LBaaS V2 driver to octavia API Handler shim layer Implement the following additional features that n-lbaas maintains: * OSC extension via a new repository 'python-octaviaclient' Other Features to be Considered: * Notifications for resource creating, updating, and deleting. * Flavors * Agent namespace driver or some lightweight functional driver. * Testing octavia with all of the above * REST API Microversioning Dependencies ============ None Testing ======= Api tests from neutron-lbaas will be used to validate the new octavia API. Documentation Impact ==================== The octavia api reference will need to be updated. References ========== ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version1.0/vip-qos-policy-application.rst0000664000175000017500000001375600000000000024155 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================== Vip QoS Policy Application ========================== Problem description =================== For real cases, the bandwidth of vip should be limited, because the upstream network resource is provided by the ISP or other organizations. That means it is not free. The openstack provider or users should pay for the limited bandwidth, for example, users buy the 50M bandwidth from ISP for openstack environment to access Internet, also it will be used for the connection outside of openstack to access the servers in openstack. And the servers are behind LoadBalance VIP. We cannot offer the whole bandwidth to the servers, as maybe there also are the VMs want to access the external network. So we should take a bandwidth limitation towards vip port. Also, if the upstream network resource had been used up mostly, we still want the backend servers behind loadbalancer are accessible and stable. The min bandwidth limitation is needed for this scenario. For more QoS functions, in reality, we can't limit our users or deployers to use loadbalance default drivers, such as haproxy driver and Octavia driver. They may be more concerned about the fields/functions related to QoS, like DSCP markings. They could integrate the third-party drivers which are concerned about these fields. Proposed change =============== This spec introduces the Neutron QoS function to meet the requirements. Currently, there are 3 ports(at least) in the loadbalancer created by Octavia. One is from the lb-mgmt-net, the others are from the vip-subnet, called "loadbalancer-LOADBALANCER_ID" and "octavia-lb-vrrp-LOADBALNCER_ID". The first one is vip port, the second one is for vrrp HA, and it will set "allowed_address_pairs" toward vip fixed_ip. The QoS policy should focus on the attached port "octavia-lb-vrrp-LOADBALNCER_ID". We could apply the Neutron QoS policy to the "octavia-lb-vrrp-LOADBALNCER_ID" ports, whether the topology is active-active or standalone. There are the following changes: * Extend a new column named "qos_policy_id" in vip table. * Extend Octavia API, we need pass the vip-qos-policy-id which created in Neutron into LoadBalancer creation/update. * Apply QoS policy on vip port in Loadbalancer working flow. Alternatives ------------ We accept the QoS parameters and implement the QoS function on our own. Data model impact ----------------- In this spec, the QoS function will be provided by Neutron, so Octavia should know the relationship of QoS policies and the vip port of Loadbalancers. There will be some minor data model changes to Octavia in support of this change. * vip table - `qos_policy_id`: associate QoS policy id with vip port. REST API impact --------------- Proposed attribute:: EXTEND_FIELDS = { 'vip_qos_policy_id':{'allow_post': True, 'allow_put': True, 'validate': {'type:uuid': None}, 'is_visible': True, 'default': None} } The definition in Octavia is like:: vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) Some samples in Loadbalancer creation/update. Users allow pass "vip_qos_policy_id". Create/Update Loadbalancer Request:: POST/PUT /v2.0/lbaas/loadbalancers { "loadbalancer": { "name": "loadbalancer1", "description": "simple lb", "project_id": "b7c1a69e88bf4b21a8148f787aef2081", "tenant_id": "b7c1a69e88bf4b21a8148f787aef2081", "vip_subnet_id": "013d3059-87a4-45a5-91e9-d721068ae0b2", "vip_address": "10.0.0.4", "admin_state_up": true, "flavor": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", "vip_qos_policy_id": "b61f8b45-e888-4056-94f0-e3d5af96211f" } } Response: { "loadbalancer": { "admin_state_up": true, "description": "simple lb", "id": "a36c20d0-18e9-42ce-88fd-82a35977ee8c", "listeners": [], "name": "loadbalancer1", "operating_status": "ONLINE", "provisioning_status": "ACTIVE", "project_id": "b7c1a69e88bf4b21a8148f787aef2081", "tenant_id": "b7c1a69e88bf4b21a8148f787aef2081", "vip_address": "10.0.0.4", "vip_subnet_id": "013d3059-87a4-45a5-91e9-d721068ae0b2", "flavor": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", "provider": "sample_provider", "pools": [], "vip_qos_policy_id": "b61f8b45-e888-4056-94f0-e3d5af96211f" } } Security impact --------------- None Notifications impact -------------------- No expected change. Other end user impact --------------------- Users will be able to specify qos_policy to create/update Loadbalancers. Performance Impact ------------------ * It will be a very short time to cost in loadbalancer creation, as we need validate the input QoS policy. * The QoS policy in Neutron side will affect the network performance based on the different types of QoS rules. Other deployer impact --------------------- None Developer impact ---------------- TBD. Implementation ============== Assignee(s) ----------- zhaobo reedip Work Items ---------- * Add the DB model and extend the table column. * Extending Octavia V2 API to accept QoS policy. * Add QoS application logic into Loadbalancer workflow. * Add API validation code to validate access/existence of the qos_policy which created in Neutron. * Add UTs to Octavia. * Add API tests. * Update CLI to accept QoS fields. * Documentation work. Dependencies ============ None Testing ======= Unit tests, Functional tests, API tests and Scenario tests are necessary. Documentation Impact ==================== The Octavia API reference will need to be updated. References ========== ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4582167 octavia-6.2.2/specs/version1.1/0000775000175000017500000000000000000000000016214 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version1.1/active-active-l3-distributor.rst0000664000175000017500000007211400000000000024363 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =================================================== Distributor for L3 Active-Active, N+1 Amphora Setup =================================================== .. attention:: Please review the active-active topology blueprint first ( :doc:`../version0.9/active-active-topology` ) https://blueprints.launchpad.net/octavia/+spec/l3-active-active Problem description =================== This blueprint describes a *L3 active-active* distributor implementation to support the Octavia *active-active-topology*. The *L3 active-active* distributor will leverage the capabilities of a layer 3 Clos network fabric in order to distribute traffic to an *Amphora Cluster* of 1 or more amphoras. Specifically, the *L3 active-active* distributor design will leverage Equal Cost Multipath Load Sharing (ECMP) with anycast routing to achieve traffic distribution across the *Amphora Cluster*. In this reference implementation, the BGP routing protocol will be used to inject anycast routes into the L3 fabric. In order to scale a single VIP address across multiple active amphoras it is required to have a *distributor* to balance the traffic. By leveraging the existing capabilities of a modern L3 network, we can use the network itself as the *distributor*. This approach has several advantages, which include: * Traffic will be routed via the best path to the destination amphora. There is no need to add an additional hop (*distributor*) between the network and the amphora. * The *distributor* is not in the data path and simply becomes a function of the L3 network. * The performance and scale of the *distributor* is the same as the L3 network. * Native support for both IPv4 and IPv6, without customized logic for each address family. .. _P2: **Note:** Items marked with [`P2`_] refer to lower priority features to be designed / implemented only after initial release. Proposed change =============== * Octavia shall implement the *L3 active-active* distributor through a pluggable driver. * The distributor control plane function (*bgp speaker*) will run inside the amphora and leverage the existing amphora lifecycle manager. * Each amphora will run a *bgp speaker* in the default namespace in order to announce the anycast VIP into the L3 fabric. BGP peering and announcements will occur over the lb-mgmt-net network. The anycast VIP will get advertised as a /32 or /128 route with a next-hop of the front-end IP assigned to the amphora instance. The front-end network IPs must be directly routable from the L3 fabric, such as in the provider networking model. * Octavia shall implement the ability to specify an anycast VIP/subnet and front-end subnet (provider network) when creating a new load balancer. The amphora will have ports on three networks (anycast, front-end, management). The anycast VIP will get configured on the loopback interface inside the *amphora-haproxy* network namespace. * The operator shall be able to define a *bgp peer profile*, which includes the required metadata for the amphora to establish a bgp peering session with the L3 fabric. The bgp peering information will be passed into the amphora-agent configuration file via config drive during boot. The amphora will use the bgp peering information to establish a BGP peer and announce its anycast VIP. * [`P2`_] Add the option to allow the *bgp speaker* to run on a dedicated amphora instance that is not running the software load balancer (HAProxy). In this model a dedicated *bgp speaker* could advertise anycast VIPs for one or more amphoras. Each BGP speaker (peer) can only announce a single next-hop route for an anycast VIP. In order to perform ECMP load sharing, multiple dedicated amphoras running bgp speakers will be required, each of them would then announce a different next-hop address for the anycast VIP. Each next-hop address is the front-end (provider network) IP of an amphora instance running the software load balancer. * [`P2`_] The *Amphora Cluster* will provide resilient flow handling in order to handle ECMP group flow remapping events and support amphora connection draining. * [`P2`_] Support Floating IPs (FIPs). In order to support FIPs the existing Neutron *floatingips* API would need to be extended. This will be described in more detail in a separate spec in the Neutron project. Architecture ------------ High-level Topology Description ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The below diagram shows the interaction between 2 .. n amphora instances from each tenant and how they interact with the L3 network distributor. :: Management Front-End Internet Network Networks (World) ║ (provider) ║ ║ ┌─────────────────────────────┐ ║ ║ ║ │ Amphora of Tenant A │ ║ ┌──╨──────────┐ ║ ┌────┬┴──────────┬──────────────────┴┬───╨┐ │ │ ╠══════╡MGMT│ns: default│ns: amphora-haproxy│f.e.│ │ │ ║ │ IP ├-----------┼-------------------┤ IP │ │ │ ║ └────┤ BGP │ Anycast VIP ├───╥┘ │ │ ║ │ Speaker │ (loopback) │ ║ │ │ ║ └───────────┴──────────────╥────┘ ║ │ │ ║ | ║ ║ │ │ ║ | ║ ║ │ │ Peering Session 1..* | ║ ║ │ │---------------------------+ ║ ║ │ │ {anycast VIP}/32 next-hop {f.e. IP} ║ ║ │ │ ║ ║ ║ │ │ ║ ┌─────────────────────────╨───┐ ║ │ │ ║ │ Amphora of Tenant B │ ║ │ │ ║ ┌────┬┴──────────┬──────────────────┴┬───╨┐ │ ╞════════╬══════╡MGMT│ns: default│ns: amphora-haproxy│f.e.│ │ │ ║ │ IP ├-----------┼-------------------┤ IP │ │ │ ║ └────┤ BGP │ Anycast VIP ├───╥┘ │ │ ║ │ Speaker │ (loopback) │ ║ │ │ ║ └───────────┴──────────────╥────┘ ║ │ Distributor │ ║ | ║ ║ │ (L3 Network)│ ║ | ║ ║ │ │ Peering Session 1..* | ║ ║ │ │---------------------------+ ║ ║ │ │ {anycast VIP}/32 next-hop {f.e. IP} ║ ║ │ │ ║ ║ ║ │ │ ║ ┌─────────────────────────╨───┐ ║ │ │ ║ │ Amphora of Tenant C │ ║ │ │ ║ ┌────┬┴──────────┬──────────────────┴┬───╨┐ │ │ ╚══════╡MGMT│ns: default│ns: amphora-haproxy│f.e.│ │ │ │ IP ├-----------┼-------------------┤ IP │ │ │ └────┤ BGP │ Anycast VIP ├────┘ │ │ │ Speaker │ (loopback) │ │ │ └───────────┴──────────────╥────┘ │ │ | ║ │ │ | ║ │ │ Peering Session 1..* | ║ │ │---------------------------+ ║ │ │ {anycast VIP}/32 next-hop {f.e. IP} ║ │ │ ║ │ ╞═══════════════════════════════════════════════Anycast └─────────────┘ 1..* Network * Whenever a new active-active amphora is instantiated it will create BGP peering session(s) over the lb-mgmt-net to the L3 fabric. The BGP peer will need to have a neighbor definition in order to allow the peering sessions from the amphoras. In order to ease configuration, a neighbor statement allowing peers from the entire lb-mgmt-net IP prefix range can be defined: ``neighbor 10.10.10.0/24`` * The BGP peer IP can either be a route reflector (RR) or any other network device that will redistribute routes learned from the amphora BGP speaker. In order to help scaling, it is possible to peer with the ToR switch based on the rack the amphora instance is provisioned in. The configuration can be simplified by creating an ``anycast loopback interface`` on each ToR switch, which will provide a consistent BGP peer IP regardless of which rack or hypervisor is hosting the amphora instance. * Once a peering session is established between an amphora and the L3 fabric, the amphora will need to announce its anycast VIP with a next-hop address of its front-end network IP. The front-end network IP (provider) must be routable and reachable from the L3 network in order to be used. * In order to leverage ECMP for distributing traffic across multiple amphoras, multiple equal-cost routes must be installed into the network for the anycast VIP. This requires the L3 network to have ``Multipath BGP`` enabled, so BGP installs multiple paths and does not select a single best path. * After the amphoras in a cluster are initialized there will be an ECMP group with multiple equal-cost routes for the anycast VIP. The data flow for traffic is highlighted below: 1. Traffic will ingress into the L3 network fabric with a destination IP address of the anycast VIP. 2. If this is a new flow, the flow will get hashed to one of the next-hop addresses in the ECMP group. 3. The packet will get sent to the front-end IP address of the amphora instance that was selected from the above step. 4. The amphora will accept the packet and send it to the back-end server over the front-end network or a directly attached back-end (tenant) network attached to the amphora. 5. The amphora will receive the response from the back-end server and forward it on to the next-hop gateway of front-end (provider) network using the anycast VIP as the source IP address. 6. All subsequent packets belonging to the same flow will get routed through the same path. * Adding or removing members to a L3 active-active amphora cluster will result in flow remapping, as different paths will be selected due to rehashing. It is recommended to enable the ``resilient hashing`` feature on ECMP groups in order to minimize flow remapping. Distributor (BGP Speaker) Lifecycle ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The below diagram shows the interaction between an amphora instance that is serving as a distributor and the L3 network. In this example we are peering with the ToR switch in order to disseminate anycast VIP routes into the L3 network. :: +------------------------------------------------+ | Initialize Distributor on Amphora | +------------------------------------------------+ | | | +---------------+ +---------------+ | | |1 | |4 | | | | Amphora | | Ready to | | | | (boot) | | announce | | | | | | VIP(s) | | | +-------+-------+ +-------+-------+ | | | ^ | | | | | | | | | | | | | | | | | | v | | | +-------+-------+ +-------+-------+ | | |2 | |3 Establish | | | | Read Config | | BGP connection| | | | Drive +----------->+ to ToR(s) | | | | (BGP Config) | | (BGP Speaker) | | | +---------------+ +---------------+ | | | +------------------------------------------------+ +------------------------------------------------+ | Register AMP to Distributor or Listener Start | +------------------------------------------------+ | | | +---------------+ +---------------+ | | |5 | |8 | | | | Amphora | | Amphora | | | | BGP Speaker | | (Receives VIP | | | |(Announce VIP) | | Traffic) | | | +-------+-------+ +-------+-------+ | | | ^ | | | | | | |BGP Peering | | | |Session(s) | | | | | | | v | | | +-------+-------+ +-------+-------+ | | |6 | |7 | | | | ToR(s) | | L3 Fabric | | | |(Injects Route +----------->+ Accepts Route | | | | into Fabric) | | (ECMP) | | | +---------------+ +---------------+ | | | +------------------------------------------------+ +------------------------------------------------+ | Unregister AMP to Distributor or Listener Stop | +------------------------------------------------+ | | | +---------------+ +---------------+ | | |9 | |12 | | | | Amphora | | Amphora | | | | BGP Speaker | |(No longer sent| | | |(Withdraw VIP) | | VIP traffic) | | | +-------+-------+ +-------+-------+ | | | ^ | | | | | | |BGP Peering | | | |Session(s) | | | | | | | v | | | +-------+-------+ +-------+-------+ | | |10 | |11 | | | | ToR(s) | | L3 Fabric | | | |(Removes Route +----------->+ Removes Route | | | | from Fabric) | | (ECMP) | | | +---------------+ +---------------+ | | | +------------------------------------------------+ 1. The amphora gets created and is booted. In this example, the amphora will perform both the load balancing (HAProxy) and L3 Distributor function (BGP Speaker). 2. The amphora will read in the BGP configuration information from the config drive and configure the BGP Speaker to peer with the ToR switch. 3. The BGP Speaker process will start and establish a BGP peering session with the ToR switch. 4. Once the BGP peering session is active, the amphora is ready to advertise its anycast VIP into the network with a next-hop of its front-end IP address. 5. The BGP speaker will communicate using the BGP protocol and send a BGP "announce" message to the ToR switch in order to announce a VIP route. If the amphora is serving as both a load balancer and distributor the announcement will happen on listener start. Otherwise the announce will happen on a register amphora request to the distributor. 6. The ToR switch will learn this new route and advertise it into the L3 fabric. At this point the L3 fabric will know of the new VIP route and how to reach it (via the ToR that just announced it). 7. The L3 fabric will create an ECMP group if it has received multiple route advertisements for the same anycast VIP. This will result in a single VIP address with multiple next-hop addresses. 8. Once the route is accepted by the L3 fabric, traffic will get distributed to the recently registered amphora (HAProxy). 9. The BGP speaker will communicate using the BGP protocol and send a BGP "withdraw" message to the ToR switch in order to withdraw a VIP route. If the amphora is serving as both a load balancer and distributor the withdrawal will happen on listener stop. Otherwise the withdraw will happen on an unregister amphora request to the distributor. 10. The ToR switch will tell the L3 fabric over BGP that the anycast VIP route for the amphora being unregistered is no longer valid. 11. The L3 fabric will remove the VIP address with the next-hop address to the amphora (HAProxy) being unregistered. It will keep all other existing VIP routes to other amphora (HAProxy) instances until they are explicitly unregistered. 12. Once the route is removed the amphora (HAProxy) will no longer receive any traffic for the VIP. Alternatives ------------ TBD Data model impact ----------------- Add the following columns to the existing ``vip`` table: * distributor_id ``(String(36) , nullable=True)`` ID of the distributor responsible for distributing traffic for the corresponding VIP. Add table ``distributor`` with the following columns: * id ``(String(36) , nullable=False)`` ID of Distributor instance. * distributor_type ``(String(36) , nullable=False)`` Type of distributor ``L3_BGP``. * status ``(String(36) , nullable=True)`` Provisioning status. Update existing table ``amphora``. An amphora can now serve as a distributor, lb, or both. The vrrp_* tables will be renamed to frontend_* in order to make the purpose of this interface more apparent and to better represent other use cases besides active/standy. * load_balancer_id ``(String(36) , nullable=True)`` This will be set to null if this amphora is a dedicated distributor and should not run HAProxy. * service_type ``(String(36) , nullable=True)`` New field added to the amphora table in order to describe the type of amphora. This field is used to describe the function (service) the amphora provides. For example, if this is a dedicated distributor the service type would be set to "distributor". * frontend_ip ``(String(64) , nullable=True)`` New name for former vrrp_ip field. This is the primary IP address inside the amphora-haproxy namespace used for L3 communication to back-end members. * frontend_subnet_id ``(String(36) , nullable=True)`` New field added to the amphora table, which is the neutron subnet id of the front-end network connected to the amphora. * frontend_port_id ``(String(36) , nullable=True)`` New name for former vrrp_port_id field. This represents the neutron port ID of a port attached to the front-end network. It should no longer be assumed that the front-end subnet is the same as the VIP subnet. * frontend_interface ``(String(16) , nullable=True)`` New name for former vrrp_interface field. * frontend_id ``(Integer , nullable=True)`` New name for former vrrp_id field. * frontend_priority ``(Integer , nullable=True)`` New name for former vrrp_priority field. Use existing table ``amphora_health`` with the following columns: * amphora_id ``(String(36) , nullable=False)`` ID of amphora instance running lb and/or implementing distributor function. * last_update ``(DateTime , nullable=False)`` Last time amphora heartbeat was received by a health monitor. * busy ``(Boolean , nullable=False)`` Field indicating a create / delete or other action is being conducted on the amphora instance (ie. to prevent a race condition when multiple health managers are in use). Add table ``amphora_registration`` with the below columns. This table determines the role of the amphora. The amphora can be dedicated as a distributor, load balancer, or perform a combined role of load balancing and distributor. A distributor amphora can be registered to multiple load balancers. * amphora_id ``(String(36) , nullable=False)`` ID of Amphora instance. * load_balancer_id ``(String(36) , nullable=False)`` ID of load balancer. * distributor_id ``(String(36) , nullable=True)`` ID of Distributor instance. Add table ``distributor_l3_bgp_speaker`` with the following columns: * id ``(String(36) , nullable=False)`` ID of the BGP Speaker. * ip_version ``(Integer , nullable=False)`` Protocol version of the BGP speaker. IP version ``4`` or ``6``. * local_as ``(Integer , nullable=False)`` Local AS number for the BGP speaker. Add table ``distributor_l3_bgp_peer`` with the following columns: * id ``(String(36) , nullable=False)`` ID of the BGP peer. * peer_ip ``(String(64) , nullable=False)`` The IP address of the BGP neighbor. * remote_as ``(Integer , nullable=False)`` Remote AS of the BGP peer. * auth_type ``(String(16) , nullable=True)`` Authentication type, such as ``md5``. An additional parameter will need to be set in the octavia configuration file by the admin to set the md5 authentication password that will be used with the md5 auth type. * ttl_hops ``(Integer , nullable=True)`` Number of hops between speaker and peer for ttl security ``1-254``. * hold_time ``(Integer , nullable=True)`` Amount of time in seconds that can elapse between messages from peer. * keepalive_interval ``(Integer , nullable=True)`` How often to send keep alive packets in seconds. Add table ``distributor_l3_bgp_peer_registration`` with the following columns: * distributor_l3_bgp_speaker_id ``(String(36) , nullable=False)`` ID of the BGP Speaker. * distributor_l3_bgp_peer_id ``(String(36) , nullable=False)`` ID of the BGP peer. Add table ``distributor_l3_amphora_bgp_speaker_registration`` with the following columns: * distributor_l3_bgp_speaker_id ``(String(36) , nullable=False)`` ID of the BGP Speaker. * amphora_id ``(String(36) , nullable=False)`` ID of amphora instance that the BGP speaker will run on. Add table ``distributor_l3_amphora_vip_registration`` with the following columns: * amphora_id ``(String(36) , nullable=False)`` ID of the distributor amphora instance. * load_balancer_id ``(String(36) , nullable=False)`` The ID of the load balancer. This will be used to get the VIP IP address. * nexthop_ip ``(String(64) , nullable=False)`` The amphora front-end network IP used to handle VIP traffic. This is the next-hop address that will be advertised for the VIP. This does not have to be an IP address of an amphora, as it could be external such as for UDP load balancing. * distributor_l3_bgp_peer_id ``(String(36) , nullable=True)`` The BGP peer we will announce the anycast VIP to. If not specified, we will announce over all peers. REST API impact --------------- * Octavia API -- Allow the user to specify a separate VIP/subnet and front-end subnet (provider network) when creating a new load balancer. Currently the user can only specify the VIP subnet, which results in both the VIP and front-end network being on the same subnet. * Extended Amphora API -- The L3 BGP distributor driver will call the extended amphora API in order to implement the control plane (BGP) and advertise new anycast VIP routes into the network. The below extended amphora API calls will be implemented for amphoras running as a dedicated distributor: 1. ``Register Amphora`` This call will result in the BGP speaker announcing the anycast VIP into the L3 network with a next-hop of the front-end IP of the amphora being registered. Prior to this call, the load balancing amphora will have to configure the anycast VIP on the loopback interface inside the amphora-haproxy namespace. - amphora_id ID of the amphora running the load balancer to register. - vip_ip The VIP IP address. - nexthop_ip The amphora's front-end network IP address used to handle anycast VIP traffic. - peer_id ID of the peer that will be used to announce the anycast VIP. If not specified, VIP will be announced across all peers. 2. ``Unregister Amphora`` The BGP speaker will withdraw the anycast VIP route for the specified amphora from the L3 network. After the route is withdrawn, the anycast VIP IP will be removed from the loopback interface on the load balancing amphora. - amphora_id ID of the amphora running the load balancer to unregister. - vip_ip The VIP IP address. - nexthop_ip The amphora's front-end network IP Address used to handle anycast VIP traffic. - peer_id ID of the peer that will be used to withdraw the anycast VIP. If not specified, route will be withdrawn from all peers. 3. ``List Amphora`` Will return a list of all amphora IDs and their anycast VIP routes currently being advertised by the BGP speaker. 4. [`P2`_] ``Drain Amphora`` All new flows will get redirected to other members of the cluster and existing flows will be drained. Once the active flows have been drained, the BGP speaker will withdraw the anycast VIP route from the L3 network and unconfigure the VIP from the lo interface. 5. [`P2`_] ``Register VIP`` This call will be used for registering anycast routes for non-amphora endpoints, such as for UDP load balancing. - vip_ip The VIP IP address. - nexthop_ip The nexthop network IP Address used to handle anycast VIP traffic. - peer_id ID of the peer that will be used to announce the anycast VIP. If not specified, route will be announced from all peers. 6. [`P2`_] ``Unregister VIP`` This call will be used for unregistering anycast routes for non-amphora endpoints, such as for UDP load balancing. - vip_ip The VIP IP address. - nexthop_ip The nexthop network IP Address used to handle anycast VIP traffic. - peer_id ID of the peer that will be used to withdraw the anycast VIP. If not specified, route will be withdrawn from all peers. 6. [`P2`_] ``List VIP`` Will return a list of all non-amphora anycast VIP routes currently being advertised by the BGP speaker. Security impact --------------- The distributor inherently supports multi-tenancy, as it is simply providing traffic distribution across multiple amphoras. Network isolation on a per tenant basis is handled by the amphoras themselves, as they service only a single tenant. Further isolation can be provided by defining separate anycast network(s) on a per tenant basis. Firewall or ACL policies can then be built around these prefixes. To further enhance BGP security, route-maps, prefix-lists, and communities to control what routes are allowed to be advertised in the L3 network from a particular BGP peer can be used. MD5 password and GTSM can provide additional security to limit unauthorized BGP peers to the L3 network. Notifications impact -------------------- Other end user impact --------------------- Performance Impact ------------------ Other deployer impact --------------------- Developer impact ---------------- Implementation ============== Assignee(s) ----------- Work Items ---------- Dependencies ============ Testing ======= * Unit tests with tox. * Function tests with tox. Documentation Impact ==================== The API-Ref documentation will need to be updated for load balancer create. An additional optional parameter frontend_network_id will be added. If set, this parameter will result in the primary interface inside the amphora-haproxy namespace getting created on the specified network. Default behavior is to provision this interface on the VIP subnet. References ========== * `Active-Active Topology `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version1.1/enable-provider-driver.rst0000664000175000017500000026044500000000000023330 0ustar00zuulzuul00000000000000============================== Enable Provider Driver Support ============================== .. contents:: Specification Table of Contents :depth: 4 :backlinks: none https://storyboard.openstack.org/#!/story/1655768 Provider drivers are implementations that give Octavia operators a choice of which load balancing systems to use in their Octavia deployment. Currently, the default Octavia driver is the only one available. Operators may want to employ other load balancing implementations, including hardware appliances, in addition to the default Octavia driver. Problem description =================== Neutron LBaaS v2 supports a *provider* parameter, giving LBaaS users a way to direct LBaaS requests to a specific backend driver. The Octavia API includes a *provider* parameter as well, but currently supports one provider, the Octavia driver. Adding support for other drivers is needed. With this in place, operators can configure load balancers using multiple providers, either the Octavia default or others. Proposed change =============== Available drivers will be enabled by entries in the Octavia configuration file. Drivers will be loaded via stevedore and Octavia will communicate with drivers through a standard class interface defined below. Most driver functions will be asynchronous to Octavia, and Octavia will provide a library of functions that give drivers a way to update status and statistics. Functions that are synchronous are noted below. Octavia API functions not listed here will continue to be handled by the Octavia API and will not call into the driver. Examples would be show, list, and quota requests. Driver Entry Points ------------------- Provider drivers will be loaded via `stevedore `_. Drivers will have an entry point defined in their setup tools configuration using the Octavia driver namespace "octavia.api.drivers". This entry point name will be used to enable the driver in the Octavia configuration file and as the "provider" parameter users specify when creating a load balancer. An example for the octavia reference driver would be: .. code-block:: python octavia = octavia.api.drivers.octavia.driver:OctaviaDriver Octavia Provider Driver API --------------------------- Provider drivers will be expected to support the full interface described by the Octavia API, currently v2.0. If a driver does not implement an API function, drivers should fail a request by raising a ``NotImplementedError`` exception. If a driver implements a function but does not support a particular option passed in by the caller, the driver should raise an ``UnsupportedOptionError``. It is recommended that drivers use the `jsonschema `_ package or `voluptuous `_ to validate the request against the current driver capabilities. See the `Exception Model`_ below for more details. .. note:: Driver developers should refer to the official `Octavia API reference ` document for details of the fields and expected outcome of these calls. Load balancer ^^^^^^^^^^^^^ * **create** Creates a load balancer. Octavia will pass in the load balancer object with all requested settings. The load balancer will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the load balancer to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The load balancer python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The provider will be removed as this is used for driver selection. 2. The flavor will be expanded from the provided ID to be the full dictionary representing the flavor metadata. **Load balancer object** As of the writing of this specification the create load balancer object may contain the following: +-----------------+--------+-----------------------------------------------+ | Name | Type | Description | +=================+========+===============================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------+--------+-----------------------------------------------+ | description | string | A human-readable description for the resource.| +-----------------+--------+-----------------------------------------------+ | flavor | dict | The flavor keys and values. | +-----------------+--------+-----------------------------------------------+ | listeners | list | A list of `Listener objects`_. | +-----------------+--------+-----------------------------------------------+ | loadbalancer_id | string | ID of load balancer to create. | +-----------------+--------+-----------------------------------------------+ | name | string | Human-readable name of the resource. | +-----------------+--------+-----------------------------------------------+ | pools | list | A list of `Pool object`_. | +-----------------+--------+-----------------------------------------------+ | project_id | string | ID of the project owning this resource. | +-----------------+--------+-----------------------------------------------+ | vip_address | string | The IP address of the Virtual IP (VIP). | +-----------------+--------+-----------------------------------------------+ | vip_network_id | string | The ID of the network for the VIP. | +-----------------+--------+-----------------------------------------------+ | vip_port_id | string | The ID of the VIP port. | +-----------------+--------+-----------------------------------------------+ |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | +-----------------+--------+-----------------------------------------------+ | vip_subnet_id | string | The ID of the subnet for the VIP. | +-----------------+--------+-----------------------------------------------+ The driver is expected to validate that the driver supports the request and raise an exception if the request cannot be accepted. **VIP port creation** Some provider drivers will want to create the Neutron port for the VIP, and others will want Octavia to create the port instead. In order to support both use cases, the create_vip_port() method will ask provider drivers to create a VIP port. If the driver expects Octavia to create the port, the driver will raise a NotImplementedError exception. Octavia will call this function before calling loadbalancer_create() in order to determine if it should create the VIP port. Octavia will call create_vip_port() with a loadbalancer ID and a partially defined VIP dictionary. Provider drivers that support port creation will create the port and return a fully populated VIP dictionary. **VIP dictionary** +-----------------+--------+-----------------------------------------------+ | Name | Type | Description | +=================+========+===============================================+ | project_id | string | ID of the project owning this resource. | +-----------------+--------+-----------------------------------------------+ | vip_address | string | The IP address of the Virtual IP (VIP). | +-----------------+--------+-----------------------------------------------+ | vip_network_id | string | The ID of the network for the VIP. | +-----------------+--------+-----------------------------------------------+ | vip_port_id | string | The ID of the VIP port. | +-----------------+--------+-----------------------------------------------+ |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | +-----------------+--------+-----------------------------------------------+ | vip_subnet_id | string | The ID of the subnet for the VIP. | +-----------------+--------+-----------------------------------------------+ *Creating a Fully Populated Load Balancer* If the "listener" option is specified, the provider driver will iterate through the list and create all of the child objects in addition to creating the load balancer instance. * **delete** Removes an existing load balancer. Octavia will pass in the load balancer object and cascade boolean as parameters. The load balancer will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. The API includes an option for cascade delete. When cascade is set to True, the provider driver will delete all child objects of the load balancer. * **failover** Performs a failover of a load balancer. Octavia will pass in the load balancer ID as a parameter. The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the load balancer to either ``ACTIVE`` if successfully failed over, or ``ERROR`` if not failed over. Failover can mean different things in the context of a provider driver. For example, the Octavia driver replaces the current amphora(s) with another amphora. For another provider driver, failover may mean failing over from an active system to a standby system. * **update** Modifies an existing load balancer using the values supplied in the load balancer object. Octavia will pass in the original load balancer object which is the baseline for the update, and a load balancer object with the fields to be updated. As of the writing of this specification the update load balancer object may contain the following: +-----------------+--------+-----------------------------------------------+ | Name | Type | Description | +=================+========+===============================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------+--------+-----------------------------------------------+ | description | string | A human-readable description for the resource.| +-----------------+--------+-----------------------------------------------+ | loadbalancer_id | string | ID of load balancer to update. | +-----------------+--------+-----------------------------------------------+ | name | string | Human-readable name of the resource. | +-----------------+--------+-----------------------------------------------+ |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | +-----------------+--------+-----------------------------------------------+ The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the load balancer to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def create_vip_port(self, loadbalancer_id, vip_dictionary): """Creates a port for a load balancer VIP. If the driver supports creating VIP ports, the driver will create a VIP port and return the vip_dictionary populated with the vip_port_id. If the driver does not support port creation, the driver will raise a NotImplementedError. :param: loadbalancer_id (string): ID of loadbalancer. :param: vip_dictionary (dict): The VIP dictionary. :returns: VIP dictionary with vip_port_id. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support creating VIP ports. """ raise NotImplementedError() def loadbalancer_create(self, loadbalancer): """Creates a new load balancer. :param loadbalancer (object): The load balancer object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support create. :raises UnsupportedOptionError: The driver does not support one of the configuration options. """ raise NotImplementedError() def loadbalancer_delete(self, loadbalancer, cascade=False): """Deletes a load balancer. :param loadbalancer (object): The load balancer object. :param cascade (bool): If True, deletes all child objects (listeners, pools, etc.) in addition to the load balancer. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def loadbalancer_failover(self, loadbalancer_id): """Performs a fail over of a load balancer. :param loadbalancer_id (string): ID of the load balancer to failover. :return: Nothing if the failover request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises: NotImplementedError if driver does not support request. """ raise NotImplementedError() def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): """Updates a load balancer. :param old_loadbalancer (object): The baseline load balancer object. :param new_loadbalancer (object): The updated load balancer object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support request. :raises UnsupportedOptionError: The driver does not support one of the configuration options. """ raise NotImplementedError() Listener ^^^^^^^^ * **create** Creates a listener for a load balancer. Octavia will pass in the listener object with all requested settings. The listener will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the listener to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The listener python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. 2. The default_tls_container_ref will be expanded and provided to the driver in pkcs12 format. 3. The sni_container_refs will be expanded and provided to the driver in pkcs12 format. .. _Listener objects: **Listener object** As of the writing of this specification the create listener object may contain the following: +----------------------------+--------+-------------------------------------+ | Name | Type | Description | +============================+========+=====================================+ | admin_state_up | bool | Admin state: True if up, False if | | | | down. | +----------------------------+--------+-------------------------------------+ | connection_limit | int | The max number of connections | | | | permitted for this listener. Default| | | | is -1, which is infinite | | | | connections. | +----------------------------+--------+-------------------------------------+ | default_pool | object | A `Pool object`_. | +----------------------------+--------+-------------------------------------+ | default_pool_id | string | The ID of the pool used by the | | | | listener if no L7 policies match. | +----------------------------+--------+-------------------------------------+ | default_tls_container_data | dict | A `TLS container`_ dict. | +----------------------------+--------+-------------------------------------+ | default_tls_container_refs | string | The reference to the secrets | | | | container. | +----------------------------+--------+-------------------------------------+ | description | string | A human-readable description for the| | | | listener. | +----------------------------+--------+-------------------------------------+ | insert_headers | dict | A dictionary of optional headers to | | | | insert into the request before it is| | | | sent to the backend member. See | | | | `Supported HTTP Header Insertions`_.| | | | Keys and values are specified as | | | | strings. | +----------------------------+--------+-------------------------------------+ | l7policies | list | A list of `L7policy objects`_. | +----------------------------+--------+-------------------------------------+ | listener_id | string | ID of listener to create. | +----------------------------+--------+-------------------------------------+ | loadbalancer_id | string | ID of load balancer. | +----------------------------+--------+-------------------------------------+ | name | string | Human-readable name of the listener.| +----------------------------+--------+-------------------------------------+ | protocol | string | Protocol type: One of HTTP, HTTPS, | | | | TCP, or TERMINATED_HTTPS. | +----------------------------+--------+-------------------------------------+ | protocol_port | int | Protocol port number. | +----------------------------+--------+-------------------------------------+ | sni_container_data | list | A list of `TLS container`_ dict. | +----------------------------+--------+-------------------------------------+ | sni_container_refs | list | A list of references to the SNI | | | | secrets containers. | +----------------------------+--------+-------------------------------------+ | timeout_client_data | int | Frontend client inactivity timeout | | | | in milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_member_connect | int | Backend member connection timeout in| | | | milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_member_data | int | Backend member inactivity timeout in| | | | milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_tcp_inspect | int | Time, in milliseconds, to wait for | | | | additional TCP packets for content | | | | inspection. | +----------------------------+--------+-------------------------------------+ .. _TLS container: As of the writing of this specification the TLS container dictionary contains the following: +---------------+--------+------------------------------------------------+ | Key | Type | Description | +===============+========+================================================+ | certificate | string | The PEM encoded certificate. | +---------------+--------+------------------------------------------------+ | intermediates | List | A list of intermediate PEM certificates. | +---------------+--------+------------------------------------------------+ | primary_cn | string | The primary common name of the certificate. | +---------------+--------+------------------------------------------------+ | private_key | string | The PEM encoded private key. | +---------------+--------+------------------------------------------------+ .. _Supported HTTP Header Insertions: As of the writing of this specification the Supported HTTP Header Insertions are: +-------------------+------+------------------------------------------------+ | Key | Type | Description | +===================+======+================================================+ | X-Forwarded-For | bool | When True a X-Forwarded-For header is inserted | | | | into the request to the backend member that | | | | specifies the client IP address. | +-------------------+------+------------------------------------------------+ | X-Forwarded-Port | int | A X-Forwarded-Port header is inserted into the | | | | request to the backend member that specifies | | | | the integer provided. Typically this is used to| | | | indicate the port the client connected to on | | | | the load balancer. | +-------------------+------+------------------------------------------------+ *Creating a Fully Populated Listener* If the "default_pool" or "l7policies" option is specified, the provider driver will create all of the child objects in addition to creating the listener instance. * **delete** Deletes an existing listener. Octavia will pass the listener object as a parameter. The listener will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. * **update** Modifies an existing listener using the values supplied in the listener object. Octavia will pass in the original listener object which is the baseline for the update, and a listener object with the fields to be updated. As of the writing of this specification the update listener object may contain the following: +----------------------------+--------+-------------------------------------+ | Name | Type | Description | +============================+========+=====================================+ | admin_state_up | bool | Admin state: True if up, False if | | | | down. | +----------------------------+--------+-------------------------------------+ | connection_limit | int | The max number of connections | | | | permitted for this listener. Default| | | | is -1, which is infinite | | | | connections. | +----------------------------+--------+-------------------------------------+ | default_pool_id | string | The ID of the pool used by the | | | | listener if no L7 policies match. | +----------------------------+--------+-------------------------------------+ | default_tls_container_data | dict | A `TLS container`_ dict. | +----------------------------+--------+-------------------------------------+ | default_tls_container_refs | string | The reference to the secrets | | | | container. | +----------------------------+--------+-------------------------------------+ | description | string | A human-readable description for | | | | the listener. | +----------------------------+--------+-------------------------------------+ | insert_headers | dict | A dictionary of optional headers to | | | | insert into the request before it is| | | | sent to the backend member. See | | | | `Supported HTTP Header Insertions`_.| | | | Keys and values are specified as | | | | strings. | +----------------------------+--------+-------------------------------------+ | listener_id | string | ID of listener to update. | +----------------------------+--------+-------------------------------------+ | name | string | Human-readable name of the listener.| +----------------------------+--------+-------------------------------------+ | sni_container_data | list | A list of `TLS container`_ dict. | +----------------------------+--------+-------------------------------------+ | sni_container_refs | list | A list of references to the SNI | | | | secrets containers. | +----------------------------+--------+-------------------------------------+ | timeout_client_data | int | Frontend client inactivity timeout | | | | in milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_member_connect | int | Backend member connection timeout in| | | | milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_member_data | int | Backend member inactivity timeout in| | | | milliseconds. | +----------------------------+--------+-------------------------------------+ | timeout_tcp_inspect | int | Time, in milliseconds, to wait for | | | | additional TCP packets for content | | | | inspection. | +----------------------------+--------+-------------------------------------+ The listener will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the listener to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def listener_create(self, listener): """Creates a new listener. :param listener (object): The listener object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def listener_delete(self, listener): """Deletes a listener. :param listener (object): The listener object. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def listener_update(self, old_listener, new_listener): """Updates a listener. :param old_listener (object): The baseline listener object. :param new_listener (object): The updated listener object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Pool ^^^^ * **create** Creates a pool for a load balancer. Octavia will pass in the pool object with all requested settings. The pool will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the pool to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The pool python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. .. _Pool object: **Pool object** As of the writing of this specification the create pool object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | pool. | +-----------------------+--------+------------------------------------------+ | healthmonitor | object | A `Healthmonitor object`_. | +-----------------------+--------+------------------------------------------+ | lb_algorithm | string | Load balancing algorithm: One of | | | | ROUND_ROBIN, LEAST_CONNECTIONS, or | | | | SOURCE_IP. | +-----------------------+--------+------------------------------------------+ | loadbalancer_id | string | ID of load balancer. | +-----------------------+--------+------------------------------------------+ | listener_id | string | ID of listener. | +-----------------------+--------+------------------------------------------+ | members | list | A list of `Member objects`_. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the pool. | +-----------------------+--------+------------------------------------------+ | pool_id | string | ID of pool to create. | +-----------------------+--------+------------------------------------------+ | protocol | string | Protocol type: One of HTTP, HTTPS, | | | | PROXY, or TCP. | +-----------------------+--------+------------------------------------------+ | session_persistence | dict | Defines session persistence as one of | | | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | | | | OR | | | | {'type': 'APP_COOKIE', | | | | 'cookie_name': } | +-----------------------+--------+------------------------------------------+ * **delete** Removes an existing pool and all of its members. Octavia will pass the pool object as a parameter. The pool will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. * **update** Modifies an existing pool using the values supplied in the pool object. Octavia will pass in the original pool object which is the baseline for the update, and a pool object with the fields to be updated. As of the writing of this specification the update pool object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | pool. | +-----------------------+--------+------------------------------------------+ | lb_algorithm | string | Load balancing algorithm: One of | | | | ROUND_ROBIN, LEAST_CONNECTIONS, or | | | | SOURCE_IP. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the pool. | +-----------------------+--------+------------------------------------------+ | pool_id | string | ID of pool to update. | +-----------------------+--------+------------------------------------------+ | session_persistence | dict | Defines session persistence as one of | | | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | | | | OR | | | | {'type': 'APP_COOKIE', | | | | 'cookie_name': } | +-----------------------+--------+------------------------------------------+ The pool will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the pool to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def pool_create(self, pool): """Creates a new pool. :param pool (object): The pool object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def pool_delete(self, pool): """Deletes a pool and its members. :param pool (object): The pool object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def pool_update(self, old_pool, new_pool): """Updates a pool. :param old_pool (object): The baseline pool object. :param new_pool (object): The updated pool object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Member ^^^^^^ * **create** Creates a member for a pool. Octavia will pass in the member object with all requested settings. The member will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the member to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The member python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The member will inherit the project_id from the parent load balancer. .. _Member objects: **Member object** As of the writing of this specification the create member object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | address | string | The IP address of the backend member to | | | | receive traffic from the load balancer. | +-----------------------+--------+------------------------------------------+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | backup | bool | Is the member a backup? Backup members | | | | only receive traffic when all non-backup | | | | members are down. | +-----------------------+--------+------------------------------------------+ | member_id | string | ID of member to create. | +-----------------------+--------+------------------------------------------+ | monitor_address | string | An alternate IP address used for health | | | | monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | monitor_port | int | An alternate protocol port used for | | | | health monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the member. | +-----------------------+--------+------------------------------------------+ | pool_id | string | ID of pool. | +-----------------------+--------+------------------------------------------+ | protocol_port | int | The port on which the backend member | | | | listens for traffic. | +-----------------------+--------+------------------------------------------+ | subnet_id | string | Subnet ID. | +-----------------------+--------+------------------------------------------+ | weight | int | The weight of a member determines the | | | | portion of requests or connections it | | | | services compared to the other members of| | | | the pool. For example, a member with a | | | | weight of 10 receives five times as many | | | | requests as a member with a weight of 2. | | | | A value of 0 means the member does not | | | | receive new connections but continues to | | | | service existing connections. A valid | | | | value is from 0 to 256. Default is 1. | +-----------------------+--------+------------------------------------------+ * **delete** Removes a pool member. Octavia will pass the member object as a parameter. The member will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. * **update** Modifies an existing member using the values supplied in the listener object. Octavia will pass in the original member object which is the baseline for the update, and a member object with the fields to be updated. As of the writing of this specification the update member object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | backup | bool | Is the member a backup? Backup members | | | | only receive traffic when all non-backup | | | | members are down. | +-----------------------+--------+------------------------------------------+ | member_id | string | ID of member to update. | +-----------------------+--------+------------------------------------------+ | monitor_address | string | An alternate IP address used for health | | | | monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | monitor_port | int | An alternate protocol port used for | | | | health monitoring a backend member. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the member. | +-----------------------+--------+------------------------------------------+ | weight | int | The weight of a member determines the | | | | portion of requests or connections it | | | | services compared to the other members of| | | | the pool. For example, a member with a | | | | weight of 10 receives five times as many | | | | requests as a member with a weight of 2. | | | | A value of 0 means the member does not | | | | receive new connections but continues to | | | | service existing connections. A valid | | | | value is from 0 to 256. Default is 1. | +-----------------------+--------+------------------------------------------+ The member will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the member to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. * **batch update** Set the state of members for a pool in one API call. This may include creating new members, deleting old members, and updating existing members. Existing members are matched based on address/port combination. For example, assume a pool currently has two members. These members have the following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. Now assume a PUT request is made that includes members with address/port combinations: '192.0.2.16:80' and '192.0.2.17:80'. The member '192.0.2.15:80' will be deleted because it was not in the request. The member '192.0.2.16:80' will be updated to match the request data for that member, because it was matched. The member '192.0.2.17:80' will be created, because no such member existed. The members will be in the ``PENDING_CREATE``, ``PENDING_UPDATE``, or ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the members to either ``ACTIVE`` or ``DELETED`` if successfully updated, or ``ERROR`` if the update was not successful. The batch update method will supply a list of `Member objects`_. Existing members not in this list should be deleted, existing members in the list should be updated, and members in the list that do not already exist should be created. **Abstract class definition** .. code-block:: python class Driver(object): def member_create(self, member): """Creates a new member for a pool. :param member (object): The member object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def member_delete(self, member): """Deletes a pool member. :param member (object): The member object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def member_update(self, old_member, new_member): """Updates a pool member. :param old_member (object): The baseline member object. :param new_member (object): The updated member object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def member_batch_update(self, members): """Creates, updates, or deletes a set of pool members. :param members (list): List of member objects. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Health Monitor ^^^^^^^^^^^^^^ * **create** Creates a health monitor on a pool. Octavia will pass in the health monitor object with all requested settings. The health monitor will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the health monitor to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The healthmonitor python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. .. _Healthmonitor object: **Healthmonitor object** +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | delay | int | The interval, in seconds, between health | | | | checks. | +-----------------------+--------+------------------------------------------+ | expected_codes | string | The expected HTTP status codes to get | | | | from a successful health check. This may | | | | be a single value, a list, or a range. | +-----------------------+--------+------------------------------------------+ | healthmonitor_id | string | ID of health monitor to create. | +-----------------------+--------+------------------------------------------+ | http_method | string | The HTTP method that the health monitor | | | | uses for requests. One of CONNECT, | | | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | | | | PUT, or TRACE. | +-----------------------+--------+------------------------------------------+ | max_retries | int | The number of successful checks before | | | | changing the operating status of the | | | | member to ONLINE. | +-----------------------+--------+------------------------------------------+ | max_retries_down | int | The number of allowed check failures | | | | before changing the operating status of | | | | the member to ERROR. A valid value is | | | | from 1 to 10. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the monitor. | +-----------------------+--------+------------------------------------------+ | pool_id | string | The pool to monitor. | +-----------------------+--------+------------------------------------------+ | timeout | int | The time, in seconds, after which a | | | | health check times out. This value must | | | | be less than the delay value. | +-----------------------+--------+------------------------------------------+ | type | string | The type of health monitor. One of HTTP, | | | | HTTPS, PING, TCP, or TLS-HELLO. | +-----------------------+--------+------------------------------------------+ | url_path | string | The HTTP URL path of the request sent by | | | | the monitor to test the health of a | | | | backend member. Must be a string that | | | | begins with a forward slash (/). | +-----------------------+--------+------------------------------------------+ * **delete** Deletes an existing health monitor. Octavia will pass in the health monitor object as a parameter. The health monitor will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. * **update** Modifies an existing health monitor using the values supplied in the health monitor object. Octavia will pass in the original health monitor object which is the baseline for the update, and a health monitor object with the fields to be updated. As of the writing of this specification the update health monitor object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | delay | int | The interval, in seconds, between health | | | | checks. | +-----------------------+--------+------------------------------------------+ | expected_codes | string | The expected HTTP status codes to get | | | | from a successful health check. This may | | | | be a single value, a list, or a range. | +-----------------------+--------+------------------------------------------+ | healthmonitor_id | string | ID of health monitor to create. | +-----------------------+--------+------------------------------------------+ | http_method | string | The HTTP method that the health monitor | | | | uses for requests. One of CONNECT, | | | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | | | | PUT, or TRACE. | +-----------------------+--------+------------------------------------------+ | max_retries | int | The number of successful checks before | | | | changing the operating status of the | | | | member to ONLINE. | +-----------------------+--------+------------------------------------------+ | max_retries_down | int | The number of allowed check failures | | | | before changing the operating status of | | | | the member to ERROR. A valid value is | | | | from 1 to 10. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the monitor. | +-----------------------+--------+------------------------------------------+ | timeout | int | The time, in seconds, after which a | | | | health check times out. This value must | | | | be less than the delay value. | +-----------------------+--------+------------------------------------------+ | url_path | string | The HTTP URL path of the request sent by | | | | the monitor to test the health of a | | | | backend member. Must be a string that | | | | begins with a forward slash (/). | +-----------------------+--------+------------------------------------------+ The health monitor will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the health monitor to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def health_monitor_create(self, healthmonitor): """Creates a new health monitor. :param healthmonitor (object): The health monitor object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def health_monitor_delete(self, healthmonitor): """Deletes a healthmonitor_id. :param healthmonitor (object): The health monitor object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def health_monitor_update(self, old_healthmonitor, new_healthmonitor): """Updates a health monitor. :param old_healthmonitor (object): The baseline health monitor object. :param new_healthmonitor (object): The updated health monitor object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() L7 Policy ^^^^^^^^^ * **create** Creates an L7 policy. Octavia will pass in the L7 policy object with all requested settings. The L7 policy will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the L7 policy to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The l7policy python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The l7policy will inherit the project_id from the parent load balancer. .. _L7policy objects: **L7policy object** As of the writing of this specification the create l7policy object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | action | string | The L7 policy action. One of | | | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | | | | REJECT. | +-----------------------+--------+------------------------------------------+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | L7 policy. | +-----------------------+--------+------------------------------------------+ | l7policy_id | string | The ID of the L7 policy. | +-----------------------+--------+------------------------------------------+ | listener_id | string | The ID of the listener. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the L7 policy. | +-----------------------+--------+------------------------------------------+ | position | int | The position of this policy on the | | | | listener. Positions start at 1. | +-----------------------+--------+------------------------------------------+ | redirect_pool_id | string | Requests matching this policy will be | | | | redirected to the pool with this ID. | | | | Only valid if action is REDIRECT_TO_POOL.| +-----------------------+--------+------------------------------------------+ | redirect_url | string | Requests matching this policy will be | | | | redirected to this URL. Only valid if | | | | action is REDIRECT_TO_URL. | +-----------------------+--------+------------------------------------------+ | rules | list | A list of l7rule objects. | +-----------------------+--------+------------------------------------------+ *Creating a Fully Populated L7 policy* If the "rules" option is specified, the provider driver will create all of the child objects in addition to creating the L7 policy instance. * **delete** Deletes an existing L7 policy. Octavia will pass in the L7 policy object as a parameter. The l7policy will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. * **update** Modifies an existing L7 policy using the values supplied in the l7policy object. Octavia will pass in the original L7 policy object which is the baseline for the update, and an L7 policy object with the fields to be updated. As of the writing of this specification the update L7 policy object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | action | string | The L7 policy action. One of | | | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | | | | REJECT. | +-----------------------+--------+------------------------------------------+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | description | string | A human-readable description for the | | | | L7 policy. | +-----------------------+--------+------------------------------------------+ | l7policy_id | string | The ID of the L7 policy. | +-----------------------+--------+------------------------------------------+ | name | string | Human-readable name of the L7 policy. | +-----------------------+--------+------------------------------------------+ | position | int | The position of this policy on the | | | | listener. Positions start at 1. | +-----------------------+--------+------------------------------------------+ | redirect_pool_id | string | Requests matching this policy will be | | | | redirected to the pool with this ID. | | | | Only valid if action is REDIRECT_TO_POOL.| +-----------------------+--------+------------------------------------------+ | redirect_url | string | Requests matching this policy will be | | | | redirected to this URL. Only valid if | | | | action is REDIRECT_TO_URL. | +-----------------------+--------+------------------------------------------+ The L7 policy will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the L7 policy to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def l7policy_create(self, l7policy): """Creates a new L7 policy. :param l7policy (object): The l7policy object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def l7policy_delete(self, l7policy): """Deletes an L7 policy. :param l7policy (object): The l7policy object. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def l7policy_update(self, old_l7policy, new_l7policy): """Updates an L7 policy. :param old_l7policy (object): The baseline l7policy object. :param new_l7policy (object): The updated l7policy object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() L7 Rule ^^^^^^^ * **create** Creates a new L7 rule for an existing L7 policy. Octavia will pass in the L7 rule object with all requested settings. The L7 rule will be in the ``PENDING_CREATE`` provisioning_status and ``OFFLINE`` operating_status when it is passed to the driver. The driver will be responsible for updating the provisioning status of the L7 rule to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. The Octavia API will accept and do basic API validation of the create request from the user. The l7rule python object representing the request body will be passed to the driver create method as it was received and validated with the following exceptions: 1. The project_id will be removed, if present, as this field is now deprecated. The listener will inherit the project_id from the parent load balancer. .. _L7rule objects: **L7rule object** As of the writing of this specification the create l7rule object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | compare_type | string | The comparison type for the L7 rule. One | | | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | | | | or STARTS_WITH. | +-----------------------+--------+------------------------------------------+ | invert | bool | When True the logic of the rule is | | | | inverted. For example, with invert True, | | | | equal to would become not equal to. | +-----------------------+--------+------------------------------------------+ | key | string | The key to use for the comparison. For | | | | example, the name of the cookie to | | | | evaluate. | +-----------------------+--------+------------------------------------------+ | l7policy_id | string | The ID of the L7 policy. | +-----------------------+--------+------------------------------------------+ | l7rule_id | string | The ID of the L7 rule. | +-----------------------+--------+------------------------------------------+ | type | string | The L7 rule type. One of COOKIE, | | | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | +-----------------------+--------+------------------------------------------+ | value | string | The value to use for the comparison. For | | | | example, the file type to compare. | +-----------------------+--------+------------------------------------------+ * **delete** Deletes an existing L7 rule. Octavia will pass in the L7 rule object as a parameter. The L7 rule will be in the ``PENDING_DELETE`` provisioning_status when it is passed to the driver. The driver will notify Octavia that the delete was successful by setting the provisioning_status to ``DELETED``. If the delete failed, the driver will update the provisioning_status to ``ERROR``. * **update** Modifies an existing L7 rule using the values supplied in the l7rule object. Octavia will pass in the original L7 rule object which is the baseline for the update, and an L7 rule object with the fields to be updated. As of the writing of this specification the update L7 rule object may contain the following: +-----------------------+--------+------------------------------------------+ | Name | Type | Description | +=======================+========+==========================================+ | admin_state_up | bool | Admin state: True if up, False if down. | +-----------------------+--------+------------------------------------------+ | compare_type | string | The comparison type for the L7 rule. One | | | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | | | | or STARTS_WITH. | +-----------------------+--------+------------------------------------------+ | invert | bool | When True the logic of the rule is | | | | inverted. For example, with invert True, | | | | equal to would become not equal to. | +-----------------------+--------+------------------------------------------+ | key | string | The key to use for the comparison. For | | | | example, the name of the cookie to | | | | evaluate. | +-----------------------+--------+------------------------------------------+ | l7rule_id | string | The ID of the L7 rule. | +-----------------------+--------+------------------------------------------+ | type | string | The L7 rule type. One of COOKIE, | | | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | +-----------------------+--------+------------------------------------------+ | value | string | The value to use for the comparison. For | | | | example, the file type to compare. | +-----------------------+--------+------------------------------------------+ The L7 rule will be in the ``PENDING_UPDATE`` provisioning_status when it is passed to the driver. The driver will update the provisioning_status of the L7 rule to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the update was not successful. The driver is expected to validate that the driver supports the request. The method will then return or raise an exception if the request cannot be accepted. **Abstract class definition** .. code-block:: python class Driver(object): def l7rule_create(self, l7rule): """Creates a new L7 rule. :param l7rule (object): The L7 rule object. :return: Nothing if the create request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() def l7rule_delete(self, l7rule): """Deletes an L7 rule. :param l7rule (object): The L7 rule object. :return: Nothing if the delete request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. """ raise NotImplementedError() def l7rule_update(self, old_l7rule, new_l7rule): """Updates an L7 rule. :param old_l7rule (object): The baseline L7 rule object. :param new_l7rule (object): The updated L7 rule object. :return: Nothing if the update request was accepted. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: if driver does not support request. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Flavor ^^^^^^ Octavia flavors are defined in a separate specification (see References below). Support for flavors will be provided through two provider driver interfaces, one to query supported flavor metadata keys and another to validate that a flavor is supported. Both functions are synchronous. * **get_supported_flavor_keys** Retrieves a dictionary of supported flavor keys and their description. .. code-block:: python {"topology": "The load balancer topology for the flavor. One of: SINGLE, ACTIVE_STANDBY", "compute_flavor": "The compute driver flavor to use for the load balancer instances"} * **validate_flavor** Validates that the driver supports the flavor metadata dictionary. The validate_flavor method will be passed a flavor metadata dictionary that the driver will validate. This is used when an operator uploads a new flavor that applies to the driver. The validate_flavor method will either return or raise a ``UnsupportedOptionError`` exception. Following are interface definitions for flavor support: .. code-block:: python def get_supported_flavor_metadata(): """Returns a dictionary of flavor metadata keys supported by this driver. The returned dictionary will include key/value pairs, 'name' and 'description.' :returns: The flavor metadata dictionary :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support flavors. """ raise NotImplementedError() .. code-block:: python def validate_flavor(flavor_metadata): """Validates if driver can support flavor as defined in flavor_metadata. :param flavor_metadata (dict): Dictionary with flavor metadata. :return: Nothing if the flavor is valid and supported. :raises DriverError: An unexpected error occurred in the driver. :raises NotImplementedError: The driver does not support flavors. :raises UnsupportedOptionError: if driver does not support one of the configuration options. """ raise NotImplementedError() Exception Model ^^^^^^^^^^^^^^^ DriverError """"""""""" This is a catch all exception that drivers can return if there is an unexpected error. An example might be a delete call for a load balancer the driver does not recognize. This exception includes two strings: The user fault string and the optional operator fault string. The user fault string, "user_fault_string", will be provided to the API requester. The operator fault string, "operator_fault_string", will be logged in the Octavia API log file for the operator to use when debugging. .. code-block:: python class DriverError(Exception): user_fault_string = _("An unknown driver error occurred.") operator_fault_string = _("An unknown driver error occurred.") def __init__(self, *args, **kwargs): self.user_fault_string = kwargs.pop('user_fault_string', self.user_fault_string) self.operator_fault_string = kwargs.pop('operator_fault_string', self.operator_fault_string) super(DriverError, self).__init__(*args, **kwargs) NotImplementedError """"""""""""""""""" Driver implementations may not support all operations, and are free to reject a request. If the driver does not implement an API function, the driver will raise a NotImplementedError exception. .. code-block:: python class NotImplementedError(Exception): user_fault_string = _("A feature is not implemented by this driver.") operator_fault_string = _("A feature is not implemented by this driver.") def __init__(self, *args, **kwargs): self.user_fault_string = kwargs.pop('user_fault_string', self.user_fault_string) self.operator_fault_string = kwargs.pop('operator_fault_string', self.operator_fault_string) super(NotImplementedError, self).__init__(*args, **kwargs) UnsupportedOptionError """""""""""""""""""""" Provider drivers will validate that they can complete the request -- that all options are supported by the driver. If the request fails validation, drivers will raise an UnsupportedOptionError exception. For example, if a driver does not support a flavor passed as an option to load balancer create(), the driver will raise an UnsupportedOptionError and include a message parameter providing an explanation of the failure. .. code-block:: python class UnsupportedOptionError(Exception): user_fault_string = _("A specified option is not supported by this driver.") operator_fault_string = _("A specified option is not supported by this driver.") def __init__(self, *args, **kwargs): self.user_fault_string = kwargs.pop('user_fault_string', self.user_fault_string) self.operator_fault_string = kwargs.pop('operator_fault_string', self.operator_fault_string) super(UnsupportedOptionError, self).__init__(*args, **kwargs) Driver Support Library ---------------------- Provider drivers need support for updating provisioning status, operating status, and statistics. Drivers will not directly use database operations, and instead will callback to Octavia using a new API. .. warning:: The methods listed here are the only callable methods for drivers. All other interfaces are not considered stable or safe for drivers to access. Update provisioning and operating status API ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The update status API defined below can be used by provider drivers to update the provisioning and/or operating status of Octavia resources (load balancer, listener, pool, member, health monitor, L7 policy, or L7 rule). For the following status API, valid values for provisioning status and operating status parameters are as defined by Octavia status codes. If an existing object is not included in the input parameter, the status remains unchanged. provisioning_status: status associated with lifecycle of the resource. See `Octavia Provisioning Status Codes `_. operating_status: the observed status of the resource. See `Octavia Operating Status Codes `_. The dictionary takes this form: .. code-block:: python { "loadbalancers": [{"id": "123", "provisioning_status": "ACTIVE", "operating_status": "ONLINE"},...], "healthmonitors": [], "l7policies": [], "l7rules": [], "listeners": [], "members": [], "pools": [] } .. code-block:: python def update_loadbalancer_status(status): """Update load balancer status. :param status (dict): dictionary defining the provisioning status and operating status for load balancer objects, including pools, members, listeners, L7 policies, and L7 rules. :raises: UpdateStatusError :returns: None """ Update statistics API ^^^^^^^^^^^^^^^^^^^^^ Provider drivers can update statistics for listeners using the following API. Similar to the status function above, a single dictionary with multiple listener statistics is used to update statistics in a single call. If an existing listener is not included, the statistics for that object will remain unchanged. The general form of the input dictionary is a list of listener statistics: .. code-block:: python { "listeners": [{"id": "123", "active_connections": 12, "bytes_in": 238908, "bytes_out": 290234, "request_errors": 0, "total_connections": 3530},...] } .. code-block:: python def update_listener_statistics(statistics): """Update listener statistics. :param statistics (dict): Statistics for listeners: id (string): ID of the listener. active_connections (int): Number of currently active connections. bytes_in (int): Total bytes received. bytes_out (int): Total bytes sent. request_errors (int): Total requests not fulfilled. total_connections (int): The total connections handled. :raises: UpdateStatisticsError :returns: None """ Get Resource Support ^^^^^^^^^^^^^^^^^^^^ Provider drivers may need to get information about an Octavia resource. As an example of its use, a provider driver may need to sync with Octavia, and therefore need to fetch all of the Octavia resources it is responsible for managing. Provider drivers can use the existing Octavia API to get these resources. See the `Octavia API Reference `_. API Exception Model ^^^^^^^^^^^^^^^^^^^ The driver support API will include two Exceptions, one for each of the two API groups: * UpdateStatusError * UpdateStatisticsError Each exception class will include a message field that describes the error and references to the failed record if available. .. code-block:: python class UpdateStatusError(Exception): fault_string = _("The status update had an unknown error.") status_object = None status_object_id = None status_record = None def __init__(self, *args, **kwargs): self.fault_string = kwargs.pop('fault_string', self.fault_string) self.status_object = kwargs.pop('status_object', None) self.status_object_id = kwargs.pop('status_object_id', None) self.status_record = kwargs.pop('status_record', None) super(UnsupportedOptionError, self).__init__(*args, **kwargs) class UpdateStatisticsError(Exception): fault_string = _("The statistics update had an unknown error.") stats_object = None stats_object_id = None stats_record = None def __init__(self, *args, **kwargs): self.fault_string = kwargs.pop('fault_string', self.fault_string) self.stats_object = kwargs.pop('stats_object', None) self.stats_object_id = kwargs.pop('stats_object_id', None) self.stats_record = kwargs.pop('stats_record', None) super(UnsupportedOptionError, self).__init__(*args, **kwargs) Alternatives ------------ **Driver Support Library** An alternative to this library is a REST interface that drivers use directly. A REST implementation can still be used within the library, but wrapping it in an API simplifies the programming interface. Data model impact ----------------- None, the required data model changes are already present. REST API impact --------------- None, the required REST API changes are already present. Security impact --------------- None. Notifications impact -------------------- None. Other end user impact --------------------- Users will be able to direct requests to specific backends using the *provider* parameter. Users may want to understand the availability of provider drivers, and can use Octavia APIs to do so. Performance Impact ------------------ The performance impact on Octavia should be minimal. Driver requests will need to be scheduled, and Octavia will process driver callbacks through a REST interface. As provider drivers are loaded by Octavia, calls into drivers are through direct interfaces. Other deployer impact --------------------- Minimal configuration is needed to support provider drivers. The work required is adding a driver name to Octavia's configuration file, and installing provider drivers supplied by third parties. Developer impact ---------------- The proposal defines interaction between Octavia and backend drivers, so no developer impact is expected. Implementation ============== Assignee(s) ----------- Work Items ---------- * Implement loading drivers defined the Octavia configuration. * Implement scheduling requests to drivers. * Implement validating flavors with provider drivers. * Implement getting and testing flavors with provider drivers. * Implement a no-op driver for testing. * Implement driver support library functions: * Update status functions * Update statistics functions * Migrate the existing Octavia reference driver to use this interface. Dependencies ============ * Octavia API: https://docs.openstack.org/api-ref/load-balancer/ * Flavors: https://docs.openstack.org/octavia/latest/contributor/specs/version1.0/flavors.html Testing ======= Tempest tests should be added for testing: * Scheduling: test that Octavia effectively schedules to drivers besides the default driver. * Request validation: test request validation API. * Flavor profile validation: test flavor validation. * Flavor queries: test flavor queries. * Statistics updates Functional API tests should be updated to test the provider API. Documentation Impact ==================== A driver developer guide should be created. References ========== Octavia API https://docs.openstack.org/api-ref/load-balancer/v2/index.html Octavia Flavors Specification https://docs.openstack.org/octavia/latest/contributor/specs/version1.0/flavors.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs/version1.1/udp_support.rst0000664000175000017500000003774000000000000021345 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode =========== UDP Support =========== https://storyboard.openstack.org/#!/story/1657091 Problem description =================== Currently, the default driver of Octavia (haproxy) only supports TCP, HTTP, HTTPS, and TERMINATED_HTTPS. We need support for load balancing UDP. For some use-cases, UDP load balancing support is useful. One such case are real-time media streaming applications which are based on RTSP [#foot1]_. For the Internet of Things (IoT) [#foot2]_, there are many services or applications that use UDP as their transmission protocol. For example: CoAP [#foot3]_ (Constrained Application Protocol), DDS [#foot4]_ (Data Distribution Service) for Real-Time systems, and the introduction protocol Thread [#foot5]_. Applications with high demand for real-time (like video chatting) run on RDUP [#foot6]_ (Reliable User Datagram Protocol), RTP [#foot7]_ (RealTime Protocol) and UDT [#foot8]_ (UDP-based Data Transfer Protocol). These protocols also are based on UDP. There isn't any option in the API for these protocols, which Layer 4 UDP would provide. This means that customers lack a way to support these services which may be running on VM instances in an OpenStack environment. Proposed change =============== This spec extends the LBaaSv2 API to support `UDP` as a protocol in Listener and Pool resource requests. It will require a new load balancing engine to support this feature, as the current haproxy engine only supports TCP based protocols. If users want a load balancer which supports both TCP and UDP, this need cannot be met by launching haproxy-based amphora instances. It's the good time to extend octavia to support more load balancing scenarios. This spec will introduce how LVS [#foot9]_ can work with haproxy for UDP loadbalancing. The reason for choosing LVS is that we can easily integrate it with the existing ``keepalived`` service. That means we can configure LVS via ``keepalived``, and check member health as well. For the current service VM driver implementation, haproxy runs in the amphora-haproxy namespace in an amphora instance. So we also need to configure ``keeplived`` in the same namespace for UDP cases even in SINGLE topology. For ACTIVE_STANDBY, ``keepalived`` will serve two purposes: UDP and VRRP. So, one instance of ``keepalived`` must be bound in the namespace, along with the LVS instance it configures. The main idea is to use ``keepalived`` to configure and manage LVS [#foot10]_ and its configuration. We also need to check the members' statuses with ``keepalived`` instead of ``haproxy``, so there must be a different workflow in Octavia resources and deployment topologies. The simplest implementation is LVS within NAT mode, so we will only support this mode to start. If possible we will add other modes in the future. Currently, a single ``keepalived`` instance can support multiple virtual server configurations, but for minimal impact of reconfiguration to the existing listeners, we'd better not to refresh all the ``keepalived`` configuration files and restart the instances, because that would cause all listeners traffic to be blocked if the LVS configuration maintained by ``keepalived`` is removed. This spec proposes that each listener will have its own ``keepalived`` process, but that process won't contain a VRRP instance, just the configuration of virtual server and real servers. That means if the Loadbalancer service is running with ACTIVE-STANDBY topology, each amphora instance will run multiple ``keepalived`` instances, the count being N+1 (where N is the UDP ``Listener`` count, and +1 is the VRRP instance for HA). The existing ``keepalived`` will be used, but each "UDP Listener keepalived process" will need to be controlled by health check of the Main VRRP keepalived process. Then the VIP could be moved to the BACKUP amphorae instance in ACTIVE/STANDBY topology if there is any issue with these UDP keepalived processes. The health check will simply reflect whether the keepalived processes are alive. The workflow for this feature contains: 1. Add a new ``keepalived`` jinja template to support LVS configuration. 2. Add ``netcat`` into dib-elements for supporting all platforms. 3. Extend the ability of amphora agent to run ``keepalived`` with LVS configuration in amphora instances, including the init configuration, such as systemd, sysvinit and upstart. 4. Enhance the session persistence to work with UDP and enable/disable the "One-Packet-Scheduling" option. 5. Update the database to allow listeners to support both ``tcp`` and ``udp`` on the same port, add ``udp`` as a valid protocol and ``ONE_PACKET_SCHEDULING`` as a valid session_persistence_type in the database. 6. Setup validation code for supported features of UDP load balancing (such as session persistence, types of health monitors, load balancing algorithms, number of L7 policies allowed, etc). 7. Extend the existing LBaaSv2 API in Octavia to allow ``udp`` parameters in the ``Listener`` resource. 8. Extend the Loadbalancer/Listener flows to support udp loadbalancer in the particular topologies. Alternatives ------------ Introduce a new UDP driver based on LVS or other Loadbalancer engines. Then find a way to fix the gap of the current Octavia data models which have a strong relationship with HTTP which based on TCP. Provide a new driver provider framework to change the amphorae backend from haproxy to some other load balancer engines, for example, if we introduce LVS driver, we may just support the simple L7 functions with LVS, as it's a risk to change provider from existing haproxy-based amphora instances to LVS ones. If possible, we need to limit the API to not support fields/resources if the backend driver is LVS, such as "insert_headers" in Listener, L7Policies, L7Rules and etc, a series fields/resources that related to L7 layer. The all things are to match the real ability of backend. That means all the configuration of L7 resources will be ignored or translate to LVS configuration if the backend is LVS. For other load balancer engines which support UDP, such as f5/nginx, we may also need to do this. Combining the 2 load balancer engines for a simple reference implementation, LVS would only support the L4 layer LB, and haproxy would provide the L7 LB functionality which is more specific and detailed. For other engines like f5/nginx, Octavia can directly pass the UDP parameters to backend. This is very good for the community environment. Then Octavia may support more powerful and complex LoadBalancing solutions. Data model impact ----------------- There may not be any data model changes, this spec just allows a user to input the ``udp`` protocol to create/update the ``Listener`` and ``Pool`` resources. So here, just extend the ``SUPPORTED_PROTOCOLS`` to add the value ``PROTOCOL_UDP``. .. code-block:: python SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP, PROTOCOL_TERMINATED_HTTPS, PROTOCOL_PROXY, PROTOCOL_UDP) Also add a record into the table ``protocol`` for ``PROTOCOL_UDP``. As LVS only operates in Layer 4, there are some conflicts with current Octavia data models. There are some limitation below: 1. No L7 policies allowed. 2. For session persistence, this spec will intro ``persistence_timeout`` (sec) and ``persistence_granularity`` (subnet mask) [#foot11]_ in the virtual server configuration. The function will be based on the LVS. With no session persistence specified, LVS will be configured with a persistence_timeout of 0. There are two valid session persistence options for UDP (if session persistence is specified), ``SOURCE_IP`` and ``ONE_PACKET_SCHEDULING``. 3. Intro a 'UDP_CONNECT' type for UDP in ``healthmonitor``, for the simple, only check the UDP port is open by ``nc`` command. And for current API of ``healthmonitor``, we need to make clear the meaning of LVS with the current ``healthmonitor`` API like the mapping below +---------------------+--------------------------+-------------------------+ | Option Mapping | Healthmonitor | Keepalived LVS | | Healthmonitor->LVS | Description | Description | +=====================+==========================+=========================+ | | Set the time in seconds, | Delay timer for service | | delay -> delay_loop | between sending probes | polling. | | | to members. | | +---------------------+--------------------------+-------------------------+ | max_retires_down -> | Set the number of allowed| Number of retries | | retry | check failure before | before fail. | | | changing the operating | | | | status of the member to | | | | ERROR. | | +---------------------+--------------------------+-------------------------+ | timeout -> | Set the maximum time, in | delay before retry | | delay_before_retry | seconds, that a monitor | (default 1 unless | | | waits to connect before | otherwise specified) | | | it times out. This value | | | | must be less than the | | | | delay value. | | +---------------------+--------------------------+-------------------------+ 4. For UDP load balancing, we can support the same algorithms at first. Such as SOURCE_IP(sh), ROUND_ROBIN(rr) and LEAST_CONNECTIONS(lc). REST API impact --------------- * Allow the ``protocol`` fields to accept ``udp``. * Allow the ``healthmonitor.type`` field to accept UDP type values. * Add some fields to ``session_persistence`` that are specific to UDP though ``SOURCE_IP`` type and a new type ``ONE_PACKET_SCHEDULING``. Create/Update Listener Request:: POST/PUT /v2.0/lbaas/listeners { "listener": { "admin_state_up": true, "connection_limit": 100, "description": "listener one", "loadbalancer_id": "a36c20d0-18e9-42ce-88fd-82a35977ee8c", "name": "listener1", "protocol": "UDP", "protocol_port": "18000" } } .. note:: It is the same as the current relationships, where one ``listener`` will have only one default ``pool`` for UDP. A ``loadbalancer`` can have multiple ``listeners`` for UDP loadbalancing on different ports. Create/Update Pool Request ``SOURCE_IP`` type case:: POST/PUT /v2.0/lbaas/pools { "pool": { "admin_state_up": true, "description": "simple pool", "lb_algorithm": "ROUND_ROBIN", "name": "my-pool", "protocol": "UDP", "session_persistence": { "type": "SOURCE_IP", "persistence_timeout": 60, "persistence_granularity": "255.255.0.0", } "listener_id": "39de4d56-d663-46e5-85a1-5b9d5fa17829", } } ``ONE_PACKET_SCHEDULING`` type case:: POST/PUT /v2.0/lbaas/pools { "pool": { "admin_state_up": true, "description": "simple pool", "lb_algorithm": "ROUND_ROBIN", "name": "my-pool", "protocol": "UDP", "session_persistence": { "type": "ONE_PACKET_SCHEDULING" } "listener_id": "39de4d56-d663-46e5-85a1-5b9d5fa17829", } } .. note:: The validation part for UDP will just allow to set the specific fields which associated with UDP. For example, user can not set the ``protocol`` with "udp" and ``insert_headers`` in the same request. Create/Update Health Monitor Request:: POST/PUT /v2.0/lbaas/healthmonitors { "healthmonitor": { "name": "Good health monitor" "admin_state_up": true, "pool_id": "c5e9e801-0473-463b-a017-90c8e5237bb3", "delay": 10, "max_retries": 4, "max_retries_down": 4, "timeout": 5, "type": "UDP_CONNECT" } } .. note:: We don't allow to create a ``healthmonitor`` with any other L7 parameters, like "http_method", "url_path" and "expected_code" if the associated ``pool`` support UDP. But for the positional option "max_retries", it's different from API description in keepalived/LVS, so the default value is the same as the value of "max_retires_down" if user specified. In general, "max_retires_down" should be overridden by "max_retries". Security impact --------------- The security should be affected by the UDP server, we need to add another neutron security group rule to the existing security group to support UDP. Security impact is minimal as the keepalived/LVS will be running in the tenant traffic network namespace. Notifications impact -------------------- No expected change. Other end user impact --------------------- Users will be able to pass "UDP" to create/update Listener/Pool resources for UDP load balancer. Performance Impact ------------------ * If enabled driver is LVS, it will have a good performance for L4 load balancing, but lack the any functionality in L7. * As this spec introduces LVS and Haproxy working together, if users update the ``Listener`` or ``Pool`` resources in a ``LoadBalancer`` instance frequently, the loadbalancer functionality may be delayed for a while as the refresh of UDP related LVS configuration. * As we need to add keepalived monitoring process for each UDP listeners, it is necessary to consider RAM about amphora VM instances. Other deployer impact --------------------- No expected change. Developer impact ---------------- No expected change. Implementation ============== Assignee(s) ----------- zhaobo Work Items ---------- * Add/extend startup script templates for keepalived processes, including configuration. * Extend the ability of existing amphora agent and driver to generate and control LVS by ``keepalived`` in amphora instances. * Extend the exist Octavia V2 API to access ``udp`` parameter in ``Listener`` and ``pools`` resources. * Extend the Loadbalancer/Listener flows to support udp loadbalancer in the particular topologies. * Extend Octavia V2 API to accept UDP fields. * Add the specified logic which involved into haproxy agent and the affected resource workflow in Octavia. * Add API validation code to validate the fields of UDP cases. * Add Unit Tests to Octavia. * Add API functional tests. * Add scenario tests into octavia tempest plugin. * Update CLI and Octavia-dashboard to support UDP fields input. * Documentation work. Dependencies ============ None Testing ======= Unit tests, Functional tests, API tests and Scenario tests are necessary. Documentation Impact ==================== The description of Octavia API reference will need to be updated. The load balancing cookbook should be also updated. Make it clear the difference of ``healthmonitor`` behaviors in UDP cases. References ========== .. [#foot1] https://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol .. [#foot2] https://en.wikipedia.org/wiki/Internet_of_things .. [#foot3] https://en.wikipedia.org/wiki/Constrained_Application_Protocol .. [#foot4] https://en.wikipedia.org/wiki/Data_Distribution_Service .. [#foot5] https://en.wikipedia.org/wiki/Thread_(network_protocol) .. [#foot6] https://en.wikipedia.org/wiki/Reliable_User_Datagram_Protocol .. [#foot7] https://de.wikipedia.org/wiki/Real-Time_Transport_Protocol .. [#foot8] https://en.wikipedia.org/wiki/UDP-based_Data_Transfer_Protocol .. [#foot9] http://www.linuxvirtualserver.org/ .. [#foot10] https://github.com/acassen/keepalived/blob/master/doc/keepalived.conf.SYNOPSIS#L559 .. [#foot11] http://www.linuxvirtualserver.org/docs/persistence.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4542167 octavia-6.2.2/specs-tests/0000775000175000017500000000000000000000000015447 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs-tests/__init__.py0000664000175000017500000000000000000000000017546 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/specs-tests/test_titles.py0000664000175000017500000000772400000000000020376 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import docutils.core from docutils.parsers import rst from docutils.parsers.rst import directives from docutils.parsers.rst import roles import testtools class FakeDirective(rst.Directive): has_content = True def run(self): return [] def fake_role(name, rawtext, text, lineno, inliner, options=None, content=None): return [], [] directives.register_directive('seqdiag', FakeDirective) directives.register_directive('blockdiag', FakeDirective) directives.register_directive('nwdiag', FakeDirective) directives.register_directive('actdiag', FakeDirective) directives.register_directive('graphviz', FakeDirective) roles.register_local_role('doc', fake_role) class TestTitles(testtools.TestCase): def _get_title(self, section_tree): section = { 'subtitles': [], } for node in section_tree: if node.tagname == 'title': section['name'] = node.rawsource elif node.tagname == 'section': subsection = self._get_title(node) section['subtitles'].append(subsection['name']) return section def _get_titles(self, spec): titles = {} for node in spec: if node.tagname == 'section': section = self._get_title(node) titles[section['name']] = section['subtitles'] return titles def _check_titles(self, titles): self.assertEqual(7, len(titles)) problem = 'Problem description' self.assertIn(problem, titles) self.assertEqual(0, len(titles[problem])) proposed = 'Proposed change' self.assertIn(proposed, titles) self.assertIn('Alternatives', titles[proposed]) self.assertIn('Data model impact', titles[proposed]) self.assertIn('REST API impact', titles[proposed]) self.assertIn('Security impact', titles[proposed]) self.assertIn('Notifications impact', titles[proposed]) self.assertIn('Other end user impact', titles[proposed]) self.assertIn('Performance Impact', titles[proposed]) self.assertIn('Other deployer impact', titles[proposed]) self.assertIn('Developer impact', titles[proposed]) impl = 'Implementation' self.assertIn(impl, titles) self.assertEqual(2, len(titles[impl])) self.assertIn('Assignee(s)', titles[impl]) self.assertIn('Work Items', titles[impl]) deps = 'Dependencies' self.assertIn(deps, titles) self.assertEqual(0, len(titles[deps])) testing = 'Testing' self.assertIn(testing, titles) self.assertEqual(0, len(titles[testing])) docs = 'Documentation Impact' self.assertIn(docs, titles) self.assertEqual(0, len(titles[docs])) refs = 'References' self.assertIn(refs, titles) self.assertEqual(0, len(titles[refs])) def test_template(self): files = set(glob.glob('specs/*.rst') + glob.glob('specs/*/*')) files = files - set(glob.glob('specs/*/*.dot')) files = files - set(glob.glob('specs/*/*.diag')) for filename in files: self.assertTrue(filename.endswith(".rst"), "spec's file must use 'rst' extension.") with open(filename) as f: data = f.read() spec = docutils.core.publish_doctree(data) titles = self._get_titles(spec) self._check_titles(titles) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/test-requirements.txt0000664000175000017500000000143700000000000017440 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=3.0,<4.1.0 # Apache-2.0 requests-mock>=1.2.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD flake8-import-order==0.12 # LGPLv3 python-subunit>=1.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 pylint>=2.2.0,<=2.5.3 # GPLv2 testrepository>=0.0.18 # Apache-2.0/BSD testtools>=2.2.0 # MIT testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD doc8>=0.6.0 # Apache-2.0 bandit!=1.6.0,>=1.1.0 # Apache-2.0 tempest>=17.1.0 # Apache-2.0 # Required for pep8 - doc8 tests sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD bashate>=0.5.1 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4582167 octavia-6.2.2/tools/0000775000175000017500000000000000000000000014332 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tools/__init__.py0000664000175000017500000000000000000000000016431 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tools/coding-checks.sh0000775000175000017500000000310400000000000017370 0ustar00zuulzuul00000000000000#!/bin/sh # This script is copied from neutron and adapted for octavia. set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run octavia's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire octavia module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } join_args() { if [ -z "$scriptargs" ]; then scriptargs="$opt" else scriptargs="$scriptargs $opt" fi } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) join_args;; esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="octavia" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint -j 0 --max-nested-blocks 7 --extension-pkg-whitelist netifaces --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tools/create_flow_docs.py0000775000175000017500000002257600000000000020225 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import argparse import importlib import os import graphviz from taskflow import engines from octavia.api.drivers import utils from octavia.common import constants from octavia.tests.common import data_model_helpers as dmh def main(): arg_parser = argparse.ArgumentParser( description='Generate graphviz representations of the ' 'Octavia TaskFlow flows.') arg_parser.add_argument('-f', '--flow-list', required=True, help='Path to flow list file') arg_parser.add_argument('-o', '--output-directory', required=True, help='Path to flow list file') args = arg_parser.parse_args() generate(args.flow_list, args.output_directory) def generate(flow_list, output_directory): # Create the diagrams base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir) diagram_list = [] with open(os.path.join(base_path, flow_list), 'r') as flowlist: for row in flowlist: if row.startswith('#'): continue current_tuple = tuple(row.strip().split(' ')) current_class = getattr(importlib.import_module(current_tuple[0]), current_tuple[1]) current_instance = current_class() get_flow_method = getattr(current_instance, current_tuple[2]) if (current_tuple[1] == 'AmphoraFlows' and current_tuple[2] == 'get_failover_amphora_flow'): amp1 = dmh.generate_amphora() amp2 = dmh.generate_amphora() lb = dmh.generate_load_balancer(amphorae=[amp1, amp2]) if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) amp1 = amp1.to_dict() current_engine = engines.load( get_flow_method(amp1, 2)) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_create_load_balancer_flow'): current_engine = engines.load( get_flow_method( constants.TOPOLOGY_ACTIVE_STANDBY)) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_delete_load_balancer_flow'): lb = dmh.generate_load_balancer() if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) delete_flow = get_flow_method(lb) else: delete_flow, store = get_flow_method(lb) current_engine = engines.load(delete_flow) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_cascade_delete_load_balancer_flow'): listeners = [{constants.LISTENER_ID: '368dffc7-7440-4ee0-aca5-11052d001b05'}, {constants.LISTENER_ID: 'd9c45ec4-9dbe-491b-9f21-6886562348bf'}] pools = [{constants.POOL_ID: '6886a40b-1f2a-41a3-9ece-5c51845a7ac4'}, {constants.POOL_ID: '08ada7a2-3eff-42c6-bdd8-b6f2ecd73358'}] lb = dmh.generate_load_balancer() if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) delete_flow = get_flow_method(lb, listeners, pools) else: delete_flow, store = get_flow_method(lb) current_engine = engines.load(delete_flow) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_failover_LB_flow'): amp1 = dmh.generate_amphora() amp2 = dmh.generate_amphora() lb = dmh.generate_load_balancer( amphorae=[amp1, amp2], topology=constants.TOPOLOGY_ACTIVE_STANDBY) if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) flavor = {constants.LOADBALANCER_TOPOLOGY: constants.TOPOLOGY_ACTIVE_STANDBY} lb[constants.FLAVOR] = flavor amp1 = amp1.to_dict() amp2 = amp2.to_dict() current_engine = engines.load( get_flow_method([amp1, amp2], lb)) elif (current_tuple[1] == 'MemberFlows' and current_tuple[2] == 'get_batch_update_members_flow'): current_engine = engines.load( get_flow_method([], [], [])) else: current_engine = engines.load(get_flow_method()) current_engine.compile() # We need to render svg and not dot here so we can scale # the image in the restructured text page src = graphviz.Source(current_engine.compilation. execution_graph.export_to_dot()) src.format = 'svg' src.render(filename=current_tuple[1] + '-' + current_tuple[2], directory=os.path.join(base_path, output_directory), cleanup=True) diagram_list.append((current_tuple[1], current_tuple[2])) # Create the class docs diagram_list = sorted(diagram_list, key=getDiagKey) class_tracker = None current_doc_file = None for doc_tuple in diagram_list: # If we are still working on the same class, append if doc_tuple[0] == class_tracker: current_doc_file.write('\n') current_doc_file.write(doc_tuple[1] + '\n') current_doc_file.write('-' * len(doc_tuple[1]) + '\n') current_doc_file.write('\n') current_doc_file.write('.. only:: html\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') current_doc_file.write(' :target: ../../../_images/' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write('\n') current_doc_file.write('.. only:: latex\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') # First or new class, create the file else: if current_doc_file is not None: current_doc_file.close() current_doc_file = open(os.path.join( base_path, output_directory, doc_tuple[0] + '.rst'), 'w+') class_tracker = doc_tuple[0] file_title = constants.FLOW_DOC_TITLES.get(doc_tuple[0], 'Unknown Flows') current_doc_file.write('=' * len(file_title) + '\n') current_doc_file.write(file_title + '\n') current_doc_file.write('=' * len(file_title) + '\n') current_doc_file.write('\n') current_doc_file.write('.. contents::\n') current_doc_file.write(' :depth: 2\n') current_doc_file.write(' :backlinks: top\n') current_doc_file.write('\n') current_doc_file.write('.. only:: html\n') current_doc_file.write('\n') current_doc_file.write(' Click on any flow to view full size.\n') current_doc_file.write('\n') current_doc_file.write(doc_tuple[1] + '\n') current_doc_file.write('-' * len(doc_tuple[1]) + '\n') current_doc_file.write('\n') current_doc_file.write('.. only:: html\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') current_doc_file.write(' :target: ../../../_images/' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write('\n') current_doc_file.write('.. only:: latex\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') current_doc_file.close() def getDiagKey(item): return item[0] + '-' + item[1] if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tools/flow-list-v2.txt0000664000175000017500000000520700000000000017344 0ustar00zuulzuul00000000000000# List of TaskFlow flows that should be documented # Some flows are used by other flows, so just list the primary flows here # Format: # module class flow octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows get_create_amphora_flow octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows get_failover_amphora_flow octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_create_listener_flow octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_create_all_listeners_flow octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_delete_listener_flow octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_update_listener_flow octavia.controller.worker.v2.flows.pool_flows PoolFlows get_create_pool_flow octavia.controller.worker.v2.flows.pool_flows PoolFlows get_delete_pool_flow octavia.controller.worker.v2.flows.pool_flows PoolFlows get_update_pool_flow octavia.controller.worker.v2.flows.member_flows MemberFlows get_create_member_flow octavia.controller.worker.v2.flows.member_flows MemberFlows get_delete_member_flow octavia.controller.worker.v2.flows.member_flows MemberFlows get_update_member_flow octavia.controller.worker.v2.flows.member_flows MemberFlows get_batch_update_members_flow octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tools/flow-list.txt0000664000175000017500000000520700000000000017017 0ustar00zuulzuul00000000000000# List of TaskFlow flows that should be documented # Some flows are used by other flows, so just list the primary flows here # Format: # module class flow octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_create_amphora_flow octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_amphora_flow octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_listener_flow octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_all_listeners_flow octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_delete_listener_flow octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_update_listener_flow octavia.controller.worker.v1.flows.pool_flows PoolFlows get_create_pool_flow octavia.controller.worker.v1.flows.pool_flows PoolFlows get_delete_pool_flow octavia.controller.worker.v1.flows.pool_flows PoolFlows get_update_pool_flow octavia.controller.worker.v1.flows.member_flows MemberFlows get_create_member_flow octavia.controller.worker.v1.flows.member_flows MemberFlows get_delete_member_flow octavia.controller.worker.v1.flows.member_flows MemberFlows get_update_member_flow octavia.controller.worker.v1.flows.member_flows MemberFlows get_batch_update_members_flow octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tools/misc-sanity-checks.sh0000775000175000017500000000451400000000000020373 0ustar00zuulzuul00000000000000#! /bin/sh # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1 export TMPDIR trap "rm -rf $TMPDIR" EXIT FAILURES=$TMPDIR/failures check_pot_files_errors () { find octavia -type f -regex '.*\.pot?' \ -print0|xargs -0 -n 1 --no-run-if-empty msgfmt \ --check-format -o /dev/null if [ "$?" -ne 0 ]; then echo "PO files syntax is not correct!" >>$FAILURES fi } check_identical_policy_files () { # For unit tests, we maintain their own policy.json file to make test suite # independent of whether it's executed from the octavia source tree or from # site-packages installation path. We don't want two copies of the same # file to diverge, so checking that they are identical diff etc/policy.json octavia/tests/etc/policy.json 2>&1 > /dev/null if [ "$?" -ne 0 ]; then echo "policy.json files must be identical!" >>$FAILURES fi } check_no_duplicate_api_test_idempotent_ids() { # For API tests, an idempotent ID is assigned to each single API test, # those IDs should be unique output=$(check-uuid --package octavia.tests.tempest) if [ "$?" -ne 0 ]; then echo "There are duplicate idempotent ids in the API tests" >>$FAILURES echo "please, assign unique uuids to each API test:" >>$FAILURES echo "$output" >>$FAILURES fi } # Add your checks here... check_pot_files_errors # TODO(johnsom) Uncomment when we have policies to test # check_identical_policy_files # TODO(johnsom) Uncomment when we have idempotent ids # check_no_duplicate_api_test_idempotent_ids # Fail, if there are emitted failures if [ -f $FAILURES ]; then cat $FAILURES exit 1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tools/pkcs7_to_pem.py0000775000175000017500000000673100000000000017310 0ustar00zuulzuul00000000000000#!/usr/bin/python # # Copyright 2016 IBM. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Converts a PKCS7 certificate bundle in DER or PEM format into # a sequence of PEM-encoded certificates. import base64 import sys from cryptography.hazmat import backends from cryptography.hazmat.primitives import serialization from cryptography import x509 from pyasn1.codec.der import decoder as der_decoder from pyasn1.codec.der import encoder as der_encoder from pyasn1_modules import rfc2315 PKCS7_BEG = """-----BEGIN PKCS7-----""" PKCS7_END = """-----END PKCS7-----""" # Based on pyasn1-modules.pem.readPemBlocksFromFile, but eliminates the need # to operate on a file handle. def _read_pem_blocks(data, *markers): stSpam, stHam, stDump = 0, 1, 2 startMarkers = dict(map(lambda x: (x[1], x[0]), enumerate(map(lambda x: x[0], markers)))) stopMarkers = dict(map(lambda x: (x[1], x[0]), enumerate(map(lambda x: x[1], markers)))) idx = -1 state = stSpam data = data.decode('utf-8') for certLine in data.replace('\r', '').split('\n'): if not certLine: break certLine = certLine.strip() if state == stSpam: if certLine in startMarkers: certLines = [] idx = startMarkers[certLine] state = stHam continue if state == stHam: if certLine in stopMarkers and stopMarkers[certLine] == idx: state = stDump else: certLines.append(certLine) if state == stDump: yield ''.encode().join([ base64.b64decode(x) for x in certLines]) state = stSpam def _process_pkcs7_substrate(substrate): contentInfo, _ = der_decoder.decode(substrate, asn1Spec=rfc2315.ContentInfo()) contentType = contentInfo.getComponentByName('contentType') if contentType != rfc2315.signedData: raise Exception content, _ = der_decoder.decode( contentInfo.getComponentByName('content'), asn1Spec=rfc2315.SignedData()) for blob in content.getComponentByName('certificates'): cert = x509.load_der_x509_certificate(der_encoder.encode(blob), backends.default_backend()) print(cert.public_bytes( encoding=serialization.Encoding.PEM).decode( 'unicode_escape'), end='') # Main program code if len(sys.argv) != 1: print('Usage: cat | %s' % sys.argv[0]) sys.exit(-1) # Need to read in binary bytes in case DER encoding of PKCS7 bundle data = sys.stdin.buffer.read() # Look for PEM encoding if PKCS7_BEG in str(data): for substrate in _read_pem_blocks(data, (PKCS7_BEG, PKCS7_END)): _process_pkcs7_substrate(substrate) # If no PEM encoding, assume this is DER encoded and try to decode else: _process_pkcs7_substrate(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/tox.ini0000664000175000017500000001462400000000000014514 0ustar00zuulzuul00000000000000[tox] minversion = 2.5.0 envlist = docs,py36,py37,functional-py36,functional-py37,pep8,specs skipsdist = True ignore_basepython_conflict = True [testenv] basepython = python3 usedevelop = True setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning install_command = pip install {opts} {packages} whitelist_externals = find deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} stestr slowest [testenv:api-ref] # This environment is called from CI scripts to test and publish # the API Ref to docs.openstack.org. deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt whitelist_externals = rm commands = rm -rf api-ref/build sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:cover] whitelist_externals = sh setenv = {[testenv]setenv} PYTHON=coverage run --source octavia --parallel-mode commands = coverage erase sh -c 'OS_TEST_PATH={toxinidir}/octavia/tests/unit stestr run {posargs}' sh -c 'OS_TEST_PATH={toxinidir}/octavia/tests/functional stestr run {posargs}' coverage combine # Generate a new HTML report with the combined results # otherwise the HTML report will only show partial results coverage html -d cover coverage xml -o cover/coverage.xml coverage report --fail-under=90 --skip-covered [testenv:functional] # This will use whatever 'basepython' is set to, so the name is ambiguous. setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional [testenv:functional-py36] basepython = python3.6 setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional [testenv:functional-py37] basepython = python3.7 setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional [testenv:debug] commands = oslo_debug_helper {posargs} [testenv:pep8] commands = flake8 # RST linter doc8 --ignore-path doc/source/contributor/modules specs \ doc/source octavia CONSTITUTION.rst HACKING.rst README.rst \ TESTING.rst # Run security linter {[testenv:bandit]commands} # Make sure specs follow our template find . -type f -name "*.pyc" -delete python -m unittest specs-tests.test_titles sh ./tools/misc-sanity-checks.sh {toxinidir}/tools/coding-checks.sh --pylint {posargs} {[testenv:bashate]commands} whitelist_externals = sh find bash [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/doc/requirements.txt whitelist_externals = rm commands = rm -rf doc/build api-guide/build api-ref/build doc/source/contributor/modules sphinx-build -W -b html doc/source doc/build/html sphinx-build -W -b html api-ref/source api-ref/build/html [testenv:pdf-docs] deps = {[testenv:docs]deps} whitelist_externals = make rm commands = rm -rf doc/build/pdf sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:venv] commands = {posargs} [testenv:genconfig] whitelist_externals = mkdir commands = mkdir -p etc/octavia oslo-config-generator --output-file etc/octavia/octavia.conf.sample \ --namespace octavia \ --namespace oslo.db \ --namespace oslo.log \ --namespace oslo.messaging \ --namespace keystonemiddleware.auth_token \ --namespace cotyledon [testenv:genpolicy] whitelist_externals = mkdir commands = mkdir -p etc/octavia oslopolicy-sample-generator \ --config-file etc/policy/octavia-policy-generator.conf [testenv:specs] whitelist_externals = rm find commands = find . -type f -name "*.pyc" -delete python -m unittest specs-tests.test_titles [testenv:bandit] commands = bandit -r octavia -ll -ii -x tests [flake8] # [H104]: Empty file with only comments # [W504]: Line break after binary operator ignore = H104,W504 show-source = true builtins = _ exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build import-order-style = pep8 # [H106]: Don't put vim configuration in source files # [H203]: Use assertIs(Not)None to check for None # [H204]: Use assert(Not)Equal to check for equality # [H205]: Use assert(Greater|Less)(Equal) for comparison # [H904]: Delay string interpolations at logging calls enable-extensions=H106,H203,H204,H205,H904 [testenv:bashate] envdir = {toxworkdir}/shared commands = bash -c "find {toxinidir} \ -not \( -type d -name .tox\* -prune \) \ -not \( -type d -name .venv\* -prune \) \ -type f \ -name \*.sh \ # [E005]: File does not begin with #! or have a .sh prefix # [E006]: Check for lines longer than 79 columns # [E042]: Local declaration hides errors # [E043]: Arithmetic compound has inconsistent return semantics -print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043" [hacking] import_exceptions = octavia.i18n [flake8:local-plugins] extension = O316 = checks:assert_true_instance O318 = checks:assert_equal_or_not_none O323 = checks:assert_equal_true_or_false O324 = checks:no_mutable_default_args O338 = checks:assert_equal_in O339 = checks:no_log_warn O341 = checks:no_translate_logs O342 = checks:check_raised_localized_exceptions O345 = checks:check_no_eventlet_imports O346 = checks:check_line_continuation_no_backslash O347 = checks:revert_must_have_kwargs O348 = checks:check_no_logging_imports paths = ./octavia/hacking [doc8] max-line-length = 79 [testenv:releasenotes] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt whitelist_externals = rm commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:requirements] deps = -egit+https://opendev.org/openstack/requirements#egg=openstack-requirements whitelist_externals = sh commands = sh -c '{envdir}/src/openstack-requirements/playbooks/files/project-requirements-change.py --req {envdir}/src/openstack-requirements --local {toxinidir} master' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1637052692.4582167 octavia-6.2.2/zuul.d/0000775000175000017500000000000000000000000014413 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/zuul.d/jobs.yaml0000664000175000017500000000767300000000000016251 0ustar00zuulzuul00000000000000- job: name: publish-openstack-octavia-amphora-image parent: publish-openstack-artifacts run: playbooks/image-build/run.yaml post-run: playbooks/image-build/post.yaml required-projects: - openstack/diskimage-builder - openstack/octavia - openstack/octavia-lib - job: name: publish-openstack-octavia-amphora-image-bionic parent: publish-openstack-octavia-amphora-image description: | Publish Ubuntu Bionic (18.04) based amphora image to tarballs.o.o. vars: amphora_os: ubuntu amphora_os_release: bionic - job: name: publish-openstack-octavia-amphora-image-centos8 parent: publish-openstack-octavia-amphora-image description: | Publish CentOS 8 based amphora image to tarballs.o.o. vars: amphora_os: centos amphora_image_size: 3 amphora_os_release: 8 - job: name: octavia-grenade parent: grenade required-projects: - opendev.org/openstack/grenade - opendev.org/openstack/octavia - opendev.org/openstack/octavia-lib - opendev.org/openstack/octavia-tempest-plugin - opendev.org/openstack/python-octaviaclient vars: devstack_localrc: DIB_LOCAL_ELEMENTS: openstack-ci-mirrors LIBVIRT_TYPE: kvm LIBVIRT_CPU_MODE: host-passthrough devstack_plugins: octavia: https://opendev.org/openstack/octavia.git octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin.git devstack_services: s-account: false s-container: false s-object: false s-proxy: false c-api: false c-bak: false c-vol: false cinder: false octavia: true o-api: true o-cw: true o-hm: true o-hk: true tempest_plugins: - octavia-tempest-plugin tempest_test_regex: ^octavia_tempest_plugin.*\[.*\bsmoke\b.*\] tox_envlist: all zuul_copy_output: '/var/log/dib-build/': 'logs' '/var/log/octavia-tenant-traffic.log': 'logs' '/var/log/octavia-amphora.log': 'logs' - job: name: octavia-v2-dsvm-scenario-amphora-v2 parent: octavia-v2-dsvm-scenario vars: devstack_localrc: OCTAVIA_ENABLE_AMPHORAV2_PROVIDER: True devstack_local_conf: post-config: $OCTAVIA_CONF: api_settings: default_provider_driver: amphorav2 enabled_provider_drivers: amphorav2:The v2 amphora driver. task_flow: jobboard_expiration_time: 100 test-config: "$TEMPEST_CONFIG": load_balancer: enabled_provider_drivers: amphorav2:The v2 amphora driver. provider: amphorav2 - project-template: name: octavia-tox-tips check: jobs: - octavia-tox-py37-tips - octavia-tox-functional-py37-tips - job: name: octavia-tox-py37-tips parent: openstack-tox-py37 description: | Run tox python 3.7 unit tests against master of related libraries. vars: tox_install_siblings: true zuul_work_dir: src/opendev.org/openstack/octavia required-projects: - openstack/octavia-lib - openstack/octavia - job: name: octavia-tox-functional-py37-tips parent: openstack-tox-functional-py37 description: | Run tox python 3.7 functional against master of related libraries. vars: tox_install_siblings: true zuul_work_dir: src/opendev.org/openstack/octavia required-projects: - openstack/octavia-lib - openstack/octavia - job: name: octavia-amphora-image-build parent: base description: | Builds the amphora image using the released diskimage-builder version, not Git master. This job does not publish the image. run: playbooks/image-build/run.yaml required-projects: - openstack/octavia - openstack/octavia-lib vars: amphora_os: ubuntu amphora_os_release: bionic ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1637052636.0 octavia-6.2.2/zuul.d/projects.yaml0000664000175000017500000000670100000000000017134 0ustar00zuulzuul00000000000000# Note: Some official OpenStack wide jobs are still defined in the # project-config repository - project: templates: - check-requirements - periodic-stable-jobs-neutron - openstack-cover-jobs - openstack-python3-ussuri-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 - octavia-tox-tips check: jobs: - openstack-tox-functional-py36: irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^etc/.*$ - ^octavia/tests/unit/.*$ - ^releasenotes/.*$ - octavia-v2-dsvm-noop-api: irrelevant-files: &irrelevant-files - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^octavia/tests/.*$ - ^releasenotes/.*$ nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-scenario: irrelevant-files: *irrelevant-files nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-tls-barbican: irrelevant-files: *irrelevant-files nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-spare-pool: irrelevant-files: *irrelevant-files nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-act-stdby-dsvm-scenario: irrelevant-files: *irrelevant-files nodeset: octavia-single-node-ubuntu-bionic - octavia-grenade: irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^octavia/tests/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^(test-|)requirements.txt$ - ^tox.ini$ nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-cinder-amphora: irrelevant-files: *irrelevant-files voting: false nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-scenario-two-node: irrelevant-files: *irrelevant-files voting: false nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-scenario-amphora-v2: irrelevant-files: *irrelevant-files voting: false nodeset: octavia-single-node-ubuntu-bionic gate: fail-fast: true queue: octavia jobs: - openstack-tox-functional-py36: irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^etc/.*$ - ^octavia/tests/unit/.*$ - ^releasenotes/.*$ - octavia-v2-dsvm-noop-api: nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-scenario: nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-tls-barbican: nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-dsvm-spare-pool: nodeset: octavia-single-node-ubuntu-bionic - octavia-v2-act-stdby-dsvm-scenario: nodeset: octavia-single-node-ubuntu-bionic - octavia-grenade: nodeset: octavia-single-node-ubuntu-bionic periodic: jobs: - publish-openstack-octavia-amphora-image-bionic: branches: ^(?!stable/.*).*$ - publish-openstack-octavia-amphora-image-centos8: branches: ^(?!stable/.*).*$ - octavia-amphora-image-build