././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1546743 oslo.messaging-14.9.0/0000775000175000017500000000000000000000000014560 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/.coveragerc0000664000175000017500000000015100000000000016676 0ustar00zuulzuul00000000000000[run] branch = True source = oslo_messaging omit = oslo_messaging/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/.pre-commit-config.yaml0000664000175000017500000000252400000000000021044 0ustar00zuulzuul00000000000000# We from the Oslo project decided to pin repos based on the # commit hash instead of the version tag to prevend arbitrary # code from running in developer's machines. To update to a # newer version, run `pre-commit autoupdate` and then replace # the newer versions with their commit hash. default_language_version: python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: 9136088a246768144165fcc3ecc3d31bb686920a # v3.3.0 hooks: - id: trailing-whitespace # Replaces or checks mixed line ending - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' # Forbid files which have a UTF-8 byte-order marker - id: check-byte-order-marker # Checks that non-binary executables have a proper shebang - id: check-executables-have-shebangs # Check for files that contain merge conflict strings. - id: check-merge-conflict # Check for debugger imports and py37+ breakpoint() # calls in python source - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ - repo: local hooks: - id: flake8 name: flake8 additional_dependencies: - hacking>=6.1.0,<=6.2.0 language: python entry: flake8 files: '^.*\.py$' exclude: '^(doc|releasenotes|tools)/.*$' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/.stestr.conf0000664000175000017500000000006700000000000017034 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./oslo_messaging/tests top_path=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/.zuul.yaml0000664000175000017500000000630300000000000016523 0ustar00zuulzuul00000000000000- job: name: oslo.messaging-tox-py310-func-scenario01 parent: openstack-tox-py310 vars: tox_envlist: py310-func-scenario01 bindep_profile: rabbit - job: name: oslo.messaging-tox-py310-func-scenario02 parent: openstack-tox-py310 vars: tox_envlist: py310-func-scenario02 bindep_profile: rabbit kafka amqp1 # Begin v3 native jobs # See https://docs.openstack.org/devstack/latest/ - job: name: oslo.messaging-devstack-tempest-full-base description: | Base for all devstack based tempest full testing jobs (with neutron) abstract: true parent: devstack-tempest timeout: 10800 required-projects: - openstack/oslo.messaging vars: tox_envlist: full - job: name: oslo.messaging-src-dsvm-full-rabbit description: | Run full tempest tests against rabbitmq parent: oslo.messaging-devstack-tempest-full-base - job: name: oslo.messaging-src-dsvm-full-amqp1-hybrid description: | Run the full tempest tests using the AMQP 1.0 driver for RPC and RabbitMQ for Notifications. parent: oslo.messaging-devstack-tempest-full-base required-projects: - openstack/devstack-plugin-amqp1 vars: devstack_localrc: AMQP1_SERVICE: qpid-hybrid devstack_plugins: devstack-plugin-amqp1: https://opendev.org/openstack/devstack-plugin-amqp1 zuul_copy_output: '{{ devstack_log_dir }}/qdrouterd.log': logs - job: name: oslo.messaging-src-dsvm-full-kafka-hybrid description: | Run the full tempest tests using the AMQP 1.0 driver for RPC and Apache Kafka for Notifications. parent: oslo.messaging-devstack-tempest-full-base required-projects: - openstack/devstack-plugin-kafka vars: devstack_plugins: devstack-plugin-kafka: https://opendev.org/openstack/devstack-plugin-kafka zuul_copy_output: '{{ devstack_log_dir }}/server.log': logs - job: name: oslo.messaging-grenade parent: grenade timeout: 10800 required-projects: - openstack/oslo.messaging irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - job: name: oslo.messaging-grenade-multinode parent: grenade-multinode timeout: 10800 required-projects: - openstack/oslo.messaging irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - project: templates: - check-requirements - lib-forward-testing-python3 - openstack-cover-jobs - openstack-python3-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - oslo.messaging-tox-py310-func-scenario01 - oslo.messaging-tox-py310-func-scenario02: voting: false - oslo.messaging-src-dsvm-full-rabbit - oslo.messaging-src-dsvm-full-amqp1-hybrid: voting: false - oslo.messaging-src-dsvm-full-kafka-hybrid: voting: false - oslo.messaging-grenade: voting: false - oslo.messaging-grenade-multinode: voting: false gate: jobs: - oslo.messaging-tox-py310-func-scenario01 - oslo.messaging-src-dsvm-full-rabbit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/AUTHORS0000664000175000017500000002363400000000000015640 0ustar00zuulzuul00000000000000Aaron Rosen Abhijeet Malawade Adam Spiers Ala Rezmerita Alex Holden Alexei Kornienko Alexey Lebedeff Alexi Yelistratov Andras Kovi Andreas Jaeger Andreas Jaeger Andrew Bogott Andrew Smith Andy Smith Anh Tran Arnaud Morin Arnaud Morin Assaf Muller Atsushi SAKAI Ayoub BOUSSELMI BANASHANKAR KALEBELAGUNDI VEERA Balazs Gibizer Balazs Gibizer Ben Nemec Boris Pavlovic Brant Knudson Brian Elliott Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) Charles Short Chenjun Shen Chet Burgess Ching Kuo Chris Dent Christian Berendt Christian Strack Clark Boylan Claudiu Belu Clint Byrum Corey Bryant Corey Wright Cyril Roelandt Daisuke Fujita Dan Prince Dan Smith Daniel Alvarez Daniel Bengtsson Davanum Srinivas (dims) Davanum Srinivas Davanum Srinivas David Medberry Dina Belova Dirk Mueller Dmitriy Rabotyagov Dmitriy Ukhlov Dmitry Mescheryakov Dmitry Tantsur Dong Ma Doug Hellmann Doug Hellmann Doug Royal Dougal Matthews Edan David Edu Alcaniz Elancheran Subramanian Elena Ezhova Eric Brown Eric Guo Eyal Fei Long Wang Flaper Fesp Flavio Percoco Frode Nordahl Gabriele Gauvain Pocentek George Silvis, III Gevorg Davoian Ghanshyam Mann Gordon Sim Gorka Eguileor Gregory Haynes Guillaume Espanel Guillaume Espanel Haifeng.Yan Hanxi Liu Hervé Beraud Hiroyasu.OHYAMA Hu Yupeng Ian Wienand Ihar Hrachyshka Ildar Svetlov Ilya Pekelny Ilya Shakhat Ilya Shakhat Ilya Tyaptin Iswarya_Vakati James Carey James E. Blair James Page Jamie Lennox Javeme Jay Faulkner Jens Rosenboom Jeremy Hanmer Jeremy Liu Jeremy Stanley JiaJunsu Jian Wen Jie Li Jim Rollenhagen Joe Gordon Joe Harrison John Eckersberg John L. Villalovos Jorhson Deng Joshua Harlow Joshua Harlow Joshua Harlow Juan Antonio Osorio Robles Julien Danjou Kenneth Giusti Kevin Benton Kirill Bespalov Komei Shimamura Konstantin Kalin Kui Shi LIU Yulong Lance Bragstad Li Ma Li-zhigang Lukas Bezdicka Luong Anh Tuan Mark McLoughlin Matt Riedemann Matt Riedemann Matthew Booth Mehdi Abaakouk Mehdi Abaakouk Mehdi Abaakouk Michal Arbet Mitsuhiro SHIGEMATSU Moisés Guimarães de Medeiros Monty Taylor Nejc Saje Nguyen Hung Phuong Nicolas Simonds Nikhil Manchanda Nikita Kalyanov Nikola Dipanov Numan Siddique Oleg Bondarev Oleksii Zamiatin OpenStack Release Bot Oscar Huang Paul Michali Paul Vinciguerra Pierre Riteau Pierre Riteau QingchuanHao Rajath Agasthya Ronald Bradford Ruby Loo Russell Bryant Ryan Rossiter Sairam Vengala Sandy Walsh Sean Dague Sean McGinnis Sean McGinnis Sean Mooney Sergey Lukjanov Sergey Vilgelm Shahar Lev Slawek Kaplonski Stanislav Kudriashev Stanisław Pitucha Stephen Finucane Stephen Finucane Steve Kowalik Swapnil Kulkarni (coolsvap) Takashi Kajinami Takashi NATSUME Thomas Bechtold Thomas Goirand Thomas Goirand Thomas Herve Thomas Herve Tobias Urdin Tobias Urdin TommyLike Tony Breeds Tovin Seven Victor Sergeyev Victor Stinner Victor Stinner Vincent Untz Vu Cong Tuan William Henry Wonil Choi Xavier Queralt XiaBing Yao YAMAMOTO Takashi Yaguo Zhou Yulia Portnova ZhangHongtao Zhao Lei Zhen Qin Zhi Kun Liu ZhiQiang Fan ZhijunWei ZhongShengping Zhongyue Luo armando-migliaccio avnish blue55 caoyuan chenxing damani42 dengzhaosen dparalen dukhlov ericxiett frankming gecong1973 gord chung gordon chung gtt116 hamza alqtaishat howardlee hussainchachuliya jacky06 jazeltq jinxingfang jolie joyce julien.cosmao kbespalov kgriffs lidong lihong7313 <38098369@qq.com> lingyongxu liu-lixiu liusheng liuyamin loooosy lqslan maoshuai mb melissaml ozamiatin pengyuesheng ricolin root shenjiatong sonu.kumar tengqm ting.wang ushen venkatamahesh viktor-krivak wanglmopenstack weiweigu wu.shiming xuanyandong yan.haifeng yangyawei zhang-shaoman zhangboye zhangjl zhangshengping2012 zhangxuanyuan zhiCHang1990 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/CONTRIBUTING.rst0000664000175000017500000000135100000000000017221 0ustar00zuulzuul00000000000000If you would like to contribute to the development of oslo's libraries, first you must take a look to this page: https://specs.openstack.org/openstack/oslo-specs/specs/policy/contributing.html If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo.messaging ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/ChangeLog0000664000175000017500000022461700000000000016346 0ustar00zuulzuul00000000000000CHANGES ======= 14.9.0 ------ * Reduce log level to DEBUG for new messages * Deprecate the option heartbeat\_in\_pthread * Add RPC incoming and reply log 14.8.1 ------ * Handle NotFound exception when declaring a queue * rabbit: Make PreconditionFailed warning log more informative 14.8.0 ------ * reno: Update master for unmaintained/zed * Remove old excludes * Make oslo.messaging, magnum, and zaqar reproducible * Fix incorrect desc of rabbit\_stream\_fanout option * Update master for stable/2024.1 * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * kafka: Fix invalid hostaddr format for IPv6 address * Use StopWatch timer when waiting for message * Restore read stream queues from last known offset 14.7.0 ------ * Display coverage report * reno: Update master for unmaintained/yoga * Display the reply queue's name in timeout logs * Bump hacking (again) * Bump hacking * Remove scenario 03/04 tests from CI * Drop unused function from scenario test script * Utilize the new RequestContext redacted\_copy method * cleanup amqp1 scenarios remnants setups * Add an option to use rabbitmq stream for fanout queues * Update the python search path for extra qdrouter modules * Update python classifier in setup.cfg 14.6.0 ------ * Remove translation sections from setup.cfg * Fix clearing of the fake RPC Exchange * Add QManager to amqp driver * Enable use of quorum queues for transient messages * Auto-delete the failed quorum rabbit queues * Allow creating transient queues with no expire 14.5.0 ------ * Add some logs when sending RPC messages * Move integration jobs to Ubuntu 22.04 (Jammy) * Imported Translations from Zanata * test py311 job * Add is\_admin to safe fields list for notifications * Update master for stable/2023.2 14.4.0 ------ * Add Python3 antelope unit tests * Only allow safe context fields in notifications * Set default heartbeat\_rate to 3 * Imported Translations from Zanata * Bump bandit and make oslo.messaging compatible with latest rules 14.3.1 ------ * Increase ACK\_REQUEUE\_EVERY\_SECONDS\_MAX to exceed default kombu\_reconnect\_delay 14.3.0 ------ * Disable greenthreads for RabbitDriver "listen" connections * Update master for stable/2023.1 * Fix typo in quorum-related variables for RabbitMQ 14.2.0 ------ * Support overriding class for get\_rpc\_\* helper functions * tox cleanups 14.1.0 ------ * Implement get\_rpc\_client function * Warn when we force creating a non durable exchange * Deprecate the amqp1 driver and Remove qpid functional tests * Update master for stable/zed 14.0.0 ------ * Change default value of "heartbeat\_in\_pthread" to False * Remove logging from ProducerConnection.\_produce\_message 13.0.0 ------ * update hacking pin to support flake8 3.8.3 * Drop python3.6/3.7 support in testing runtime 12.14.0 ------- * Add EXTERNAL as rabbit login method * Add quorum queue control configurations * Bump bandit * tests: Fix test failures with kombu >= 5.2.4 * Add Python3 zed unit tests * Update master for stable/yoga 12.13.0 ------- * Adding support for rabbitmq quorum queues 12.12.0 ------- * [rabbit] use retry parameters during notification sending * Update python testing classifier * Force creating non durable control exchange when a precondition failed * Reproduce bug 1917645 12.11.1 ------- * amqp1: fix race when reconnecting * Add a new option to enforce the OpenSSL FIPS mode 12.11.0 ------- * Remove deprecation of heartbeat\_in\_pthread 12.10.0 ------- * rabbit: move stdlib\_threading bits into \_utils * Add Python3 yoga unit tests * Update master for stable/xena * use message id cache for RPC listener 12.9.1 ------ * amqp1: Do not reuse \_socket\_connection on reconnect * amqp1: re-organize TestFailover to be reused by TestSSL * Revert "Disable AMQP 1.0 SSL unit tests" 12.9.0 ------ * limit maximum timeout in the poll loop * Add Support For oslo.metrics * Changed minversion in tox to 3.18.0 * Upgrade the pre-commit-hooks version * setup.cfg: Replace dashes with underscores * Remove the oslo\_utils.fnmatch 12.8.0 ------ * Remove references to 'sys.version\_info' * Fix formatting of release list * Move flake8 as a pre-commit local target * Add Python3 xena unit tests * Update master for stable/wallaby 12.7.1 ------ * Remove lower constraints * Correctly handle missing RabbitMQ queues 12.7.0 ------ * Deprecate the mandatory flag 12.6.1 ------ * remove unicode from code * Remove six * Fix type of direct\_mandatory\_flag opt * Dropping lower constraints testing * Move jobs to py38 * fix variable name * Fix doc title rendering * Use TOX\_CONSTRAINTS\_FILE * Use py3 as the default runtime for tox 12.6.0 ------ * Python 3.9: isAlive is removed * add min of 1 to rpc\_conn\_pool\_size * Adding pre-commit * Add Python3 wallaby unit tests * Update master for stable/victoria 12.5.0 ------ * [goal] Migrate testing to ubuntu focal 12.4.0 ------ * Run rabbitmq heartbeat in python thread by default * Add a ping endpoint to RPC dispatcher 12.3.0 ------ * Cancel consumer if queue down * Bump bandit version 12.2.2 ------ * Move legacy grenade jobs to Zuul v3 * Catch ConnectionForced Exception 12.2.1 ------ * tests: Resolves issues with kombu > 4.6.8 * Simplify tools/test-setup.sh * Drop a python 2 exception management * Fix pygments style * bindep: Add 'librdkafka-dev' dependency 12.2.0 ------ * Fix hacking min version to 3.0.1 * Switch to newer openstackdocstheme and reno versions * Remove the unused coding style modules * Print warning message when connection running out * Remove six usage * Remove monotonic usage * Align contributing doc with oslo's policy * Bump default tox env from py37 to py38 * Add py38 package metadata * Add release notes links to doc index * Imported Translations from Zanata * Add Python3 victoria unit tests * Update master for stable/ussuri * Fix some typos 12.1.0 ------ * Update hacking for Python3 12.0.0 ------ * Remove the deprecated blocking executor * remove outdated header * reword releasenote for py27 support dropping * Setup backend scenarios for functional tests 11.0.0 ------ * [ussuri][goal] Drop python 2.7 support and testing * Don't log NoSuchMethod for special non-existing methods * Add support for kafka SSL autentication * Adding debug logs on AMQPListener poll * tox: Trivial cleanup 10.5.0 ------ * Ignore releasenote cache within git untracked files * Do not use threading.Event * Removed unused variable pools 10.4.1 ------ * Revert "Add RPC incoming and reply log" * Remove telemetry checks 10.4.0 ------ * Migrate grenade jobs to py3 * Make sure minimum amqp is 2.5.2 10.3.0 ------ * Remove unused variable WAKE\_UP * Switch to Ussuri jobs * tox: Keeping going with docs * Add RPC incoming and reply log * Modify some comments to make them clickable * Fix spacing in help message * Update the constraints url * Align message serialization between drivers * Update master for stable/train * Fix help text for heartbeat\_in\_pthread option 10.2.0 ------ * Add the mandatory flag for direct send 10.1.0 ------ * Introduce RabbitMQ driver documentation * Allow users run the rabbitmq heartbeat inside a standard pthread * Fix nits on kafka compression help text 10.0.0 ------ * Correcting typo in acknowledge spelling * Bump the openstackdocstheme extension to 1.20 9.8.0 ----- * Add Python 3 Train unit tests * Blacklist sphinx 2.1.0 (autodoc bug) * Use default exchange for direct messaging * doc: Cleanup admin docs * Implement mandatory flag for RabbitMQ driver * Implement the transport options * Add the "transport\_options" parameter to the amqp1 and kafka drivers * Support kafka message compression * fix typos * Add transport\_options parameter * Download kafka from archive.apache.org 9.7.2 ----- 9.7.1 ----- * Add thread name to the RabbitMQ heartbeat thread * Add help msg to payload for CLI notifier 9.7.0 ----- * Cap Bandit below 1.6.0 and update Sphinx requirement * Fix switch connection destination when a rabbitmq cluster node disappear * Replace git.openstack.org URLs with opendev.org URLs * Remove log translation and i18n 9.6.0 ----- * OpenDev Migration Patch * Dropping the py35 testing * Consider the topic parameter as an array in client-notify * Handle collections.abc deprecations * Retry to declare a queue after internal error * Unmark RabbitMQ heartbeat as experimental * Replace openstack.org git:// URLs with https:// * Explain why Listener connections cannot be pooled * Update master for stable/stein * Clarify the documentation for pooled Notification Listeners * Update messaging intermediaries for amqp1 tests 9.5.0 ----- * Handle unexpected failures during call monitor heartbeat * add python 3.7 unit test job * Mark telemetry tests nv and remove from gate * Bump amqp requirement version to >=2.4.1 * Change python3.5 job to python3.7 job on Stein+ 9.4.0 ----- * Bump amqp requirement version to >= 2.4.0 * Kafka driver deployment guide * Update hacking version 9.3.1 ----- * Avoid unnecessary use of items() 9.3.0 ----- * Update mailinglist from dev to discuss * Switch driver to confluent-kafka client library * Don't use monotonic with Python >=3.3 9.2.1 ----- * Use ensure\_connection to prevent loss of connection error logs 9.2.0 ----- * Add a test for rabbit URLs lacking terminating '/' 9.1.1 ----- * Use '/' for the vhost if the transport\_url has no trailing '/' 9.1.0 ----- * Using pip as a python module * doc: Remove crud from conf.py file * Clean up .gitignore references to personal tools * Allow transport\_url initialization in ConfFixture constructor 9.0.1 ----- * Fix oslo.messaging default transport * always build universal wheels * Use default exchange for direct messaging 9.0.0 ----- * Refactor GetTransportSadPathTestCase * Add release note about deprecated option removals * Remove rpc\_backend and ConfFixture.transport\_driver * Remove deprecated rabbit options * Use templates for cover and lower-constraints * Remove deprecated amqp1 options * Remove rabbit\_durable\_queues deprecated option * Remove default\_{host,port} deprecated options * Remove the deprecated ZeroMQ driver * Fix the coverage tox tests * Avoid logging passwords on connection events * add lib-forward-testing-python3 test job * add python 3.6 unit test job * import zuul job settings from project-config * Call listener stop only if listener is initialized * Update reno for stable/rocky * Remove setting of DEVSTACK\_GATE\_EXERCISES 8.1.0 ----- * Bump py-amqp to >= 2.3.0 * Issue blocking ACK for RPC requests from the consumer thread 8.0.0 ----- * Do not access the connection's socket during error callback * Fix debug log message - missing argument * py37: deal with Exception repr changes * py37: drop use of 'async' as parameter name * Remove transport aliases support * Moving stestr to correct package order in test-requirements.txt * Switch to stestr * No longer allow redundant calls to server start() * Fix the bandit security linter test 7.0.0 ----- * Replace 'raise StopIteration' with 'return' * Remove fake\_rabbit configuration option * Add release notes link to README * Add ZeroMQ deprecation release note 6.5.0 ----- * Fix oslo messaging gating * Enable RPC call monitoring in AMQP 1.0 driver * Mark the ZeroMQ driver deprecated 6.4.1 ----- * fix tox python3 overrides * Add warning output if failed to rebuild exception when deserialize * Correct usage of transport\_url in example 6.4.0 ----- * Add ConfFixture.transport\_url 6.3.0 ----- * Convert legacy zuul jobs to v3 * [rabbitmq] Implement active call monitoring * Make oslo.messaging-devstack-amqp1 job non-voting * Remove stale pip-missing-reqs tox test * Add a skeleton for a v3-native devstack job * Add heartbeat() method to RpcIncomingMessage * Trivial: Update pypi url to new url * Add kafka for python 3 functional test 6.2.0 ----- * Move requirements for the optional drivers (amqp1, kafka) * set default python to python3 * fix lower constraints and uncap eventlet 6.1.0 ----- * Revert "rabbit: Don't prefetch when batch\_size is set" * Update kafka and dsvm jobs * add lower-constraints job * remove zmq tests * Updated from global requirements 6.0.0 ----- * Remove the deprecated Pika driver * update configuration for qdrouter v1.0.0 * Updated from global requirements * Add restart() method to DecayingTimer 5.36.0 ------ * Imported Translations from Zanata * Add rabbitmq-server for platform:rpm into bindep.txt * Restore devstack project name in amqp1 test * Switch from pip\_missing\_reqs to pip\_check\_reqs * Add kafka config options for security (ssl/sasl) * Zuul: Remove project name * Modify grammatical errors * Fixed telemetry integration zuul jobs * Zuul: Remove project name * Updated from global requirements * Imported Translations from Zanata * Update reno for stable/queens * Updated from global requirements * Imported Translations from Zanata * Add support for synchronous commit * Update telemetry integration playbooks * Follow the new PTI for document build 5.35.0 ------ * Add kafka driver vhost emulation * Updated from global requirements * Create doc/requirements.txt * Update kafka functional test * Imported Translations from Zanata * Updated from global requirements 5.34.1 ------ * Imported Translations from Zanata * Avoid tox\_install.sh for constraints support * rabbitmq: don't wait for message ack/requeue * Provide bindep\_profile in openstack-tox job setup * Updated from global requirements * Add zmq packages that are no longer in bindep-fallback * don't convert generator to list unless required * sort when using groupby 5.34.0 ------ * Remove setting of version/release from releasenotes * Updated from global requirements * Updated from global requirements * Catch socket.timeout when doing heartbeat\_check * Updated from global requirements * fix batch handling * Remove stable/newton from zuul settings * Zuul: add file extension to playbook path 5.33.1 ------ * Move legacy zuulv3 tests into oslo.messaging repo * Imported Translations from Zanata * Flesh out transport\_url help * Fix typo in contributor docs title 5.33.0 ------ * Fix default value of RPC dispatcher access\_policy * Fix wrong transport warnings in functional tests * Updated from global requirements 5.32.0 ------ * Updated from global requirements * Warn when wrong transport instance is used * Fix some reST field lists in docstrings * Remove pbr version from setup.py * Suppress excessive debug logs when consume rabbit * Fix use of print function on python3 5.31.0 ------ * Remove envelope argument from driver send() interface * Imported Translations from Zanata * Updated from global requirements * Update amqp 1.0 driver deployment guide * Prevent rabbit from raising unexpected exceptions * Updated from global requirements * Remove unnecessary setUp function in testcase * Add licenses and remove unused import in doc/source/conf.py * Ensure RPC endpoint target attribute is correct * Fix a typo * Update links in README * Updated from global requirements * Class-level \_exchanges in FakeExchangeManager * fix 'configration' typo * Update reno for stable/pike * Add support for virtual hosts * Remove the test that counts kombu connect calls 5.30.0 ------ * Updated from global requirements * Update URLs in documents according to document migration * Add monkey\_patch to demo code 5.29.0 ------ * switch from oslosphinx to openstackdocstheme * update the docs url in the readme * rearrange content to fit the new standard layout * Updated from global requirements * Enable some off-by-default checks 5.28.0 ------ * Updated from global requirements * Add kafka\_driver directory 5.27.0 ------ * Updated from global requirements * Fix html\_last\_updated\_fmt for Python3 * Add note for blocking executor deprecation * Fix rabbitmq driver with blocking executor * Build universal wheels * Updated from global requirements * Fix serializer tests * deprecated blocking executor 5.26.0 ------ * Updated from global requirements * Clean up the TransportURL documentation * Mark the Pika driver as deprecated 5.25.0 ------ * Updated from global requirements * Updated from global requirements * Add missing {posargs:} to AMQP 1.0 functional tests * rabbit: restore synchronous ack/requeue 5.24.2 ------ * Updated from global requirements * [AMQP 1.0] Properly shut down test RPC server 5.24.1 ------ * Updated from global requirements * Fix the amqp1 SSL test CA certificate * Add get\_rpc\_transport call * Disable AMQP 1.0 SSL unit tests 5.24.0 ------ 5.23.0 ------ * Fix notification tests not unmocking logging * Remove use of mox stubs * Fix aliases deprecation * tests: fix MultiStrOpt value * Retry support for oslo\_messaging\_notifications driver 5.22.0 ------ * [AMQP 1.0] Add default SASL realm setting * Updated from global requirements * Remove usage of parameter enforce\_type 5.21.0 ------ * Optimize the link address * [AMQP 1.0] if RPC call is configured as presettled ignore acks * Mock 'oslo\_messaging.notify.\_impl\_routing.LOG' in notifier tests * Updated from global requirements * Add "ssl" option for amqp driver * Refactor logic of getting exector's executor\_thread\_pool\_size * remove all kombu<4.0.0 workarounds 5.20.0 ------ * serializer: remove deprecated RequestContextSerializer * Try to fix TestSerializer.test\_call\_serializer failed randomly * Updated from global requirements * Deprecate username/password config options in favor of TRANSPORT\_URL * Add HACKING.rst * Break user credentials from host at the rightmost '@' * [zmq] Prevent access to rpc\_response\_timeout * [zmq] pass a dummy TransportURL to register\_opts * Fix simulator's use of Notifier - use 'topics' not 'topic' * Trivial: Add executor 'threading' in docstring * Deprecate parameter aliases * Use Sphinx 1.5 warning-is-error * tox: Build docs with Python 2.7 5.19.0 ------ * Updated from global requirements * Remove self.mox * Move decorator updated\_kwarg\_default\_value to right place 5.18.0 ------ * Remove old messaging notify driver alias * [Fix gate]Update test requirement * Updated from global requirements * Allow checking if notifier is enabled * RabbitMQ: Standardize SSL parameter names * drop topic keyword from Notifier * Validate the transport url query string * drivers: use common.ConfigOptsProxy everywhere * Stop using oslotest.mockpatch * tests: don't run functional tests in parallel * rabbit: make ack/requeue thread-safe * Fix releasenotes * Remove mox3 from test-requirements.txt * Updated from global requirements * [zmq] Update configurations documentation * Fix type of the kafka\_consumer\_timeout option * [zmq] Dynamic connections send failure * support kombu4 * Test:Use unittest.mock on Python 3 * Fix the typo * pbr.version.VersionInfo needs package name (oslo.xyz and not oslo\_xyz) * [zmq] Properly analyse \`use\_dynamic\_connections\` option * [zmq] Dummy add value aging mechanism * kafka: skip multiple servers tests * kafka: ensure topics are created * kafka: fix python3 exception * kafka: Remove testing hack for kafka * [zmq] Failure of dynamic connections fanout * Update reno for stable/ocata * Return list of addresses for IPV4 and IPV6 5.17.0 ------ * [zmq] Dynamic connections failover * [zmq] Fix py35 gate * [zmq] Use more stable configuration in voting job * Remove references to Python 3.4 * [AMQP 1.0] Fix SSL client authentication * [zmq] Support py35 testenv * [zmq] Distinguish Round-Robin/Fanout socket sending mode * tests: cleanup monkey path * [AMQP 1.0] Resend messages that are released or modified * gate: Remove useless files * [zmq] Redis TTL for values * eventlet is no more a hard dependency * [AMQP 1.0] Propagate authentication errors to caller * ensure we set channel in lock * tox: use already installed kafka if present * kafka: remove no really implemented feature * kafka: return to poller when timeout is reach * kafka: Don't hide unpack/unserialize exception * kafka: timeout is in milliseconds * kafka: disable batch for functional tests * kafka: Remove Producer singleton * Moving driver to new kafka-python version * tox: rename zeromq target * tests: make rabbit failover failure more helpful * [zmq] Refactor make \`zmq\_address.target\_to\_key\` a universal method * Updated from global requirements * [zmq] Restore static direct connections * reject when skipping failed messages * fix one typo * [AMQP 1.0] Setup the amqp1 test environment on ubuntu * test\_rabbitmq: remove dead code 5.16.0 ------ * Updated from global requirements * Replace mox with mock * tests: fix test-setup.sh * tests: remove useless debug * [rabbit] Log correct topic on cast/call 5.15.0 ------ * Updated from global requirements * kafka separate unit/functionnal tests * Add bindep.txt/test-setup.sh to prepare the system * [zmq] Matchmaker redis available time 5.14.0 ------ * [AMQP 1.0] Simplify the I/O event loop code * [zmq] Support message versions for rolling upgrades * [zmq] Fix non voting gate jobs * Fix transport url with empty port * Remove ordering assumption from functional test * Periodically purge sender link cache 5.13.0 ------ * Remove small job timeout * Register opts if we're going to check conf.transport\_url in parse() * [doc] Fix three typos * [zmq] Fix zmq-specific f-tests from periodic hangs * [zmq] Fix functional gates proxy/pub-sub * Show team and repo badges on README * [zmq] Send fanouts without pub/sub in background * Use assertGreater(len(x), 0) instead of assertTrue(len(x) > 0) * Add Constraints support * Replace six.iteritems() with .items() * [zmq] Fix configuration for functional gate job * Document the transport backend driver interface * Fix a docstring typo in impl\_pika.py * [sentinel] Move master/slave discovering from \_\_init\_\_ * rabbit: on reconnect set socket timeout after channel is set * Updated from global requirements * [zmq] Don't create real matchmaker in unit tests * update srouce doc pika\_driver.rst the charactor then to than * Remove useless logging import statements * rabbit: Avoid busy loop on epoll\_wait with heartbeat+eventlet * [zmq] Refactor receivers * [zmq] Cleanup changes to zmq-specific f-tests * Updated from global requirements * This patch cleans up the 'notification\_listener.rst' documetion by removing some class which don't exist and adding some function which exist in current source * Remove nonexistent functions from documentation * Replace retrying with tenacity 5.12.0 ------ * Updated from global requirements * Updated from global requirements * Remove the temporary hack in code * Using assertIsNone() instead of assertEqual(None) * Change assertTrue(isinstance()) by optimal assert * [zmq] Don't fallback to topic if wrong server specified * [TrivialFix] Replace old style assertions with new style assertions * [TrivialFix] Fix typo in oslo.messaging * [simulator] Fix transport\_url usage * [simulator] Fix a message length generator usage * Update .coveragerc after the removal of respective directory * [sentinels] Fix hosts extracting and slaves usage * [zmq] SUB-PUB local proxy 5.11.0 ------ * Fix typos in addressing.py and setup.cfg * Updated from global requirements * Record length of queues for ReplyWaiters * rabbit: Don't prefetch when batch\_size is set * [AMQP 1.0] Avoid unnecessary thread switch on ack * [zmq] Fix issues with broken messages on proxies * [zmq] Maintain several redis hosts * Removed redundant 'the' * Fix a typo in server.py * [document] The example which is written in the developer guide of 'Notification Listener' doesn't work * Enable release notes translation * cast() and RPC replies should not block waiting for endpoint to ack * [simulator] Automatic stopping of rpc-servers * Fix whitespace formatting issue * Properly deserializes built-in exceptions * [zmq] Fix send\_cast in AckManager * Remove debug logs from fast path * [zmq] Routing table refactoring, dynamic direct connections * Fix simulator bool command line args * Replace 'the' with 'to' in docstring * Remove default=None when set value in Config * [zmq] Add acks from proxy for PUB/SUB messages * [zmq] Refactor consumers and incoming messages * [zmq] Make second ROUTER socket optional for proxy * Use method fetch\_current\_thread\_functor from oslo.utils * [zmq] Fix ZmqSocket.send\_string * [zmq] Remove unused methods from executors * [zmq] Added a processing to handle ImportError in Redis plugin of Matchmaker * modify the home-page info with the developer documentation * Set the valid choices for the rabbit login methods * [zmq] Unify delimeters * [zmq] Fix fanout without PUB/SUB * [zmq] Send immediate ack after message receiving * Corrects documentation typo * [zmq] Remove unnecessary subscriptions from SubConsumer * Fixups to the inline documentation * Fix consuming from unbound reply queue * Add configurable serialization to pika * [zmq] Remove ZmqSocket.close\_linger attribute * [zmq] Make ZMQ TCP keepalive options configurable * [zmq] Fix TestZmqAckManager periodic failure * [zmq] Make ThreadingPoller work with ZmqSocket * Fix notify filter when data item is None * [zmq] Rename rpc\_cast\_timeout option * [AMQP 1.0] Update setup test environment dispatch router backend * Allow dispatcher to restrict endpoint methods * [AMQP 1.0] Add Acknowledgement and Batch Notification Topics * Update reno for stable/newton * [kafka] invoke TypeError exception when 'listen()' method of KafkaDriver is called * [zmq] Proxy has to skip broken multi-part message * Add Documentation String for PikaDriver * [zmq] Implement retries for unacknowledged CALLs 5.10.0 ------ * [AMQP 1.0] Make the default settlement behavior configurable * [zmq] Eliminate GreenPool from GreenPoller * Avoid sending cast after server shutdown in functional test * [zmq] Update ZMQ-driver documentation * Updated from global requirements 5.9.0 ----- * [zmq] Add --log-file option to zmq-proxy * Updated from global requirements * [zmq] Host name and target in socket identity 5.8.0 ----- * [zmq] Make zmq\_immediate configurable * Fix calculating of duration in simulator.py * [zmq] Redis unavailability is not critical * [zmq] Discover new publisher proxy * Clean outdated docstring and comment * [AMQP 1.0] small fixes to improve timer scalability * Add docstring for get\_notification\_transport * Add warning when credential is not specified for each host * Updated from global requirements * [zmq] Implement retries for unacknowledged CASTs * Fix the help info format 5.7.0 ----- * Move zmq driver options into its own group * Log a warning when connected to a routable message bus * Updated from global requirements * [AMQP 1.0] Add link credit configuration options * Updated from global requirements * [AMQP 1.0] AMQP 1.0 Driver User Guide Document update * AMQP 1.0 Driver Architecture Overview Document * Remove the max\_send\_retries option 5.6.0 ----- * Fix pika functional tests * [zmq] Use zmq.IMMEDIATE option for round-robin * fix a typo in impl\_rabbit.py * Updated from global requirements * [AMQP 1.0] Cancel response treatment for detached link * Fix syntax error on notification listener docs * Delete fanout queues on gracefully shutdown * Set the default link property to allow message acks * Properly cleanup listener and driver on simulator exit * Fix a timer leak in the AMQP 1.0 driver * [zmq] Let proxy serve on a static port numbers * Introduce TTL for idle connections * Fix parameters of assertEqual are misplaced * Fix misstyping issue * Updated from global requirements * Updated from global requirements * notify: add a CLI tool to manually send notifications * Add deprecated relnote for max\_retries rabbit configuration option * [zmq] Add py34 configuration for functional tests * [zmq] Merge publishers * Add Python 3.5 classifier and venv * Replace assertEqual(None, \*) with assertIsNone in tests * Updated from global requirements * [zmq] Use json/msgpack instead of pickle * [AMQP 1.0] Add configuration parameters for send message deadline * [zmq] Refactor publishers * Re-factor the AMQP 1.0 addressing semantics * Add Python 3.4 functional tests for AMQP 1.0 driver * tests: allow to override the functionnal tests suite args * [zmq] Additional configurations for f-tests * Remove discover from test-requirements * tests: rabbitmq failover tests * [AMQP 1.0] Add acknowledge and requeue handling for incoming message * Imported Translations from Zanata * Updated from global requirements * Remove rabbitmq max\_retries * Config: no need to set default=None 5.5.0 ----- * [zmq] Fix message sending when using proxy and not using PUB/SUB * AMQP 1.0 - create only one Cyrus SASL configuration for the tests * Updated from global requirements * Refactor AMQP 1.0 command task to support timers * [zmq] Remove redundant Envelope class * [zmq] Properly stop ZmqServer * Refactor link management to support link recovery * [Trival] fix a typo nit * [zmq] Fix backend router port for proxy 5.4.0 ----- * [zmq] Remove unused Request.close method * Add query paramereters to TransportURL * Fix temporary problems with pika unit tests * [zmq] Periodic updates of endpoints connections 5.3.0 ----- * Improve the impl\_rabbit logging * Modify info of default\_notification\_exchange * Imported Translations from Zanata * [zmq] Remove rpc\_zmq\_concurrency option * [zmq] Fix timeout in ThreadingPoller.poll * Fix typo: 'olso' to 'oslo' * Updated from global requirements * [zmq] Don't skip non-direct message types * [zmq] Refactoring of zmq client * [impl\_rabbit] Remove deprecated get\_expiration method 5.2.0 ----- * Updated from global requirements * [AMQP 1.0] Randomize host list connection attempts * Modify the TransportURL's docstrings * Fix problems after refactoring RPC client * deprecate usage of transport aliases * Documents recommended executor * kafka: Deprecates host, port options * Updated from global requirements * Add reno for releasenotes management * Remove logging from serialize\_remote\_exception * [kafka] Add several bootstrap servers support * Add the proper branch back to .gitreview * Fix consuming from missing queues * Fix bug with version\_cap and target.version in RPCClient * Make TransportURL.parse aware of transport\_url * rabbit: Deprecates host, port, auth options * Remove deprecated localcontext * zeromq: Deprecates host, port options * Reorganize the AMQP 1.0 driver source files * Implements configurable connection factory * The need to wait for a given time is no longer valid in 3.2+ * [zmq] Reduce object serialization on router proxy * Updated from global requirements * [zmq] Add backend ROUTER to increase bandwidth * [zmq] Add Sentinel instructions to deployment guide * Rabbit driver: failure of rpc-calls with float timeout 5.1.0 ----- * Use eventletutils to check is\_monkey\_patched * remove feature branch from master .gitreview * [zmq] Second router proxy doesn't dispatch messages properly * Add parse.unquote to transport\_url * Fix simulator stat printing * Use single producer and to avoid an exchange redeclaration * [zmq] Redesign router proxy * Add feature branch to .gitreview file * Remove Beta development status from classifiers 5.0.0 ----- * Updated from global requirements * Fixes sumulator.py signal\_handler logic * Refactor RPC client * Send notify if notify=True passed * Improves exception handling and logging * Implements pika thread safe connection * Fix incorrect parameters order in assertIn call * Update the RPC cast() documentation * Fix unstable work of cast func tests * [zmq] Reduce threading from python proxy * Imported Translations from Zanata * use thread safe fnmatch * Refactor base interfaces * Gracefully handle missing TCP\_USER\_TIMEOUT * Simulator: handle SIGINT and SIGTERM signals * Updated from global requirements * Log the unique\_id in listener than msg\_id * serializer: deprecate RequestContextSerializer * Don't set html\_last\_updated\_fmt without git * Amqp driver send method temporary work-around * Updated from global requirements * Updated from global requirements * Allow simulator to be launched from arbitrary directory * [zmq] Fix cast message loss in simulator * Make transport\_url config option secret * Fix oslo.messaging for Mac OS X * Refactor driver's listener interface * [kafka] Do not remove kafka\_client during reset * Updated from global requirements * Replace expriration\_time by timer * [zmq] Reduce number of connections * Move server related logic from dispatchers * Fix typos in Oslo.messaging files * Fix Break in Windows platforms * [py34] replace file() with open() * Claim python3 compatability for Newton onwards * Simulator: collect error stats * Simulator: make parameter wait\_after\_msg float * Update CheckForLoggingIssues hacking rule from keystone * Simulator: align stats to whole seconds * Support python3 in simulator.py * Fix typo passend should be passenv * Always set all socket timeouts * Add a py34 functional test for rabbit * Small fixes * Use only unique topics for the Kafka driver * [zmq] Refactoring consumer side * [Kafka] Ensure a topics before consume messages * Fix problems during unstable network * Missing version parameter in can\_send\_version() * Bump rabbit\_transient\_queues\_ttl to 30 mins * Explicitly exclude tests from bandit scan * Fix Notification listener blocking behavior * Pika: fix sending fanout messages * Revert "Ensure the json result type is bytes on Python 3" * Replace deprecated LOG.warn with LOG.warning * Simulator: store results in JSON format * Simulator: calculate message latency statistics * Fix the driver shutdown/failover logic * Always delete exc\_info tuple, even if reply fails * Do not leak Listeners on failover * Simulator: always use random messages for time-bound tests * Fallback if git is absent * Simulator: implement own random generator instead of scipy * Simulator: fix batch-notify-server command * Work with kombu from upstream * Fail quickly if there on bad password * [zmq] Dynamic port range is ignored * [zmq] Implement Response and Envelope classes * [kafka] Use notification priority * Make simulator more asynchronous * Adds exhange declaration on sender's side * Updated from global requirements 4.5.0 ----- * amqp: log time elapsed between receiving a message and replying * [zmq] Matchmaker redis set instead of list * Allow Notifier to have multiple topics * Fix a minor syntax error in a log statement * Use PortOpt on kafka\_default\_port * Added duration to notify server/client * Ensure the json result type is bytes on Python 3 * Improves logging * Use more efficient mask\_dict\_password to mask password * Improves poller's stop logic * Typos of 'recieve' instead of 'receive' * [zmq] Support transport URL * Get kafka notifications to work with kafka-python 0.9.5 * Move server's logic from executors * Avoid hardcoding the notification topic and specify driver * [zmq] Fix cinder create volume hangs * Py3: Replace filter()/map() if a list is needed * Py3: Switch json to oslo\_serialization * Updated from global requirements 4.4.0 ----- * Updated from global requirements * Option rpc\_response\_timeout should not be used in zmq driver * Remove duplicate requirements * Reduce number of rabbitmq consumer tag used * Documents the mirror queue policy of RabbitMQ 3.0 * fix override\_pool\_size * Remove executor callback * Log format change in simulator.py * Fix kombu accept different TTL since version 3.0.25 * .testr.conf: revert workaround of testtools bug * Remove aioeventlet executor 4.3.0 ----- * simulator.py improvements * rabbit: improvements to QoS * Updated from global requirements * Remove server queue creating if target's server is empty * Updated from global requirements * Correctly set socket timeout for publishing * Updated from global requirements * Use more secure yaml.safe\_load() instead of yaml.load() * [kombu] Implement experimental message compression * [zmq] Multithreading access to zmq sockets * [zmq] ZMQ\_LINGER default value * Remove matchmaker\_redis configs from [DEFAULT] * Refactors base classes 4.2.0 ----- * Switches pika driver to eager connection to RabbitMQ * Remove bandit.yaml in favor of defaults * [zmq] Use PUSH/PULL for direct CAST * Updated from global requirements * support ability to set thread pool size per listener * Fix misspellings * [zmq] RPC timeout for CAST * Enable pep8 on oslo\_messaging/tests 4.1.0 ----- * [zmq] Fix slow down * Update translation setup * Let PikaDriver inherit base.BaseDriver * Improve simulator.py * Fixed some warnings about imports and variable * test: Don't test message's reply timeout * Updated from global requirements * Adds document and configuration guide * [zmq] Support KeyboardInterrupt for broker * [zmq] Reduce proxy for direct messaging * Fixed a couple of pep8 errors/warnings * assertEquals is deprecated, use assertEqual * Updated from global requirements * Updated from global requirements * Trivial: Remove unused logging import * replace string format arguments with function parameters * Adds params field to BlockingConnection object * Python 3 deprecated the logger.warn method in favor of warning * Fix URL in warning message * [zmq] Implement background redis polling from the client-side * rabbit: Add option to configure QoS prefetch count * rabbit: making interval\_max configurable * Imported Translations from Zanata * Updated from global requirements * Logging rpc client/server targets * Updated from global requirements * Topic/server arguments changed in simulator.py * [zmq] Update zmq-guide with new options * [zmq] Listeners management cleanup * Drop H237,H402,H904 in flake8 ignore list * Replace deprecated library function os.popen() with subprocess * py3: Replaces xrange() with six.moves.range() * Kombu: make reply and fanout queues expire instead of auto-delete * fix .gitreview - bad merge from pika branch * Explicitly add pika dependencies * Add duration option to simulator.py * [zmq] Added redis sentinel HA implementation to zmq driver * rabbit: set interval max for auto retry * [zmq] Add TTL to redis records * Updated from global requirements * make enforce\_type=True in CONF.set\_override * Use assertTrue/False instead of assertEqual(T/F) * Improvement of logging acorrding to oslo.i18n guideline * Updated from global requirements * rabbit: fix unit conversion error of expiration * list\_opts: update the notification options group * rabbit: Missing to pass parameter timeout to next * Fix formatting of code blocks in zmq docs * Adds unit tests for pika\_poll module * Updated from global requirements * [zmq] Switch notifications to PUB/SUB pattern * Optimize sending of a reply in RPC server * Optimize simulator.py for better throughput * Remove stale directory synced from oslo-incubator * Fix wrong bugs report URL in CONTRIBUTING * zmq: Don't log error when can't import zmq module 4.0.0 ----- * assertIsNone(val) instead of assertEqual(None,val) * Adds tests for pika\_message.py * [zmq] PUB-SUB pipeline * Updated from global requirements * Fixes conflicts after merging master * Updated from global requirements * Move to debug a too verbose log * Cleanup parameter docstrings * Removes MANIFEST.in as it is not needed explicitely by PBR * Revert "default of kombu\_missing\_consumer\_retry\_timeout" * Don't trigger error\_callback for known exc * Adds comment for pika\_pooler.py * Improves comment * Fix reconnection when heartbeat is missed * Revert "serializer: deprecate RequestContextSerializer" * Fix notifier options registration * notif: Check the driver features in dispatcher * batch notification listener * Updated from global requirements * Adds comment, updates pika-pool version * Preparations for configurable serialization * creates a dispatcher abstraction * Remove unnecessary quote * Fix multiline strings with missing spaces * Properly skip zmq tests without ZeroMQ being installed * kombu: remove compat of folsom reply format * Follow the plan about the single reply message 3.1.0 ----- * default of kombu\_missing\_consumer\_retry\_timeout * rename kombu\_reconnect\_timeout option * Skip Cyrus SASL tests if proton does not support Cyrus SASL * setUp/tearDown decorator for set/clear override * Adds comments and small fixes * Support older notifications set\_override keys * Don't hold the connection when reply fail * doc: explain rpc call/cast expection * Add a driver for Apache Kafka * Option group for notifications * Move ConnectionPool and ConnectionContext outside amqp.py * Use round robin failover strategy for Kombu driver * Revert "serializer: remove deprecated RequestContextSerializer" * Updated from global requirements * [zmq] Random failure with ZmqPortRangeExceededException * [zmq] Driver optimizations for CALL * Updated from global requirements * Use oslo\_config new type PortOpt for port options * serializer: remove deprecated RequestContextSerializer * Add log info for AMQP client * Updated from global requirements * Provide missing parts of error messages * Add Warning when we cannot notify * ignore .eggs directory * serializer: deprecate RequestContextSerializer * middleware: remove oslo.context usage * Removes additional select module patching * Fix delay before host reconnecting 3.0.0 ----- * Remove qpidd's driver from the tree * Provide alias to oslo\_messaging.notify.\_impl\_messaging * make pep8 faster * Updated from global requirements * Robustify locking in MessageHandlingServer * Updated from global requirements * cleanup tox.ini 2.9.0 ----- * [zmq] Add config options to specify dynamic ports range * [zmq] Make bind address configurable * [zmq][matchmaker] Distinguish targets by listener types * [zmq] Update zmq-deployment guide according to the new driver * Implements more smart retrying * Make "Connect(ing|ed) to AMQP server" log messages DEBUG level * Updated from global requirements * Decouple transport for RPC and Notification * Fixing the server example code Added server.stop() before server.wait() 2.8.1 ----- * Revert "Robustify locking in MessageHandlingServer" * Splits pika driver into several files * Fixes and improvements after testing on RabbitMQ cluster: * Move supported messaging drivers in-tree 2.8.0 ----- * Add a "bandit" target to tox.ini * Fix fanout exchange name pattern * Updated from global requirements * Remove a useless statement * Robustify locking in MessageHandlingServer * Use "secret=True" for password-related options * Imported Translations from Zanata * Modify simulator.py tool * Fix target resolution mismatch in neutron, nova, heat * Use yaml.safe\_load instead of yaml.load * Trivial locking cleanup in test\_listener * Remove unused event in ServerThreadHelper * Fix a race calling blocking MessageHandlingServer.start() * Fix assumptions in test\_server\_wait\_method * Rename MessageHandlingServer.\_executor for readability * Implements rabbit-pika driver * bootstrap branch * Updated from global requirements 2.7.0 ----- * Updated from global requirements * Some executors are not async so update docstring to reflect that * Updated from global requirements * Updated from global requirements * Small grammar messaging fix * Use a condition (and/or a dummy one) instead of a lock * Updated from global requirements 2.6.1 ----- * Fix failures when zmq is not available 2.6.0 ----- * AMQP1.0: Turn off debug tracing when running tox * Fix typo in rpc/server.py and notify/listener.py * Fix a typo in server.py * Use the hostname from the Transport for GSSAPI Authentication * Adapt functional tests to pika-driver * ConfFixture should work even when zmq/redis is not present * Added matchmaker timeouts and retries * AMQP 1.0: Properly initialize AMQP 1.0 configuration options * Updated from global requirements * Workaround test stream corruption issue * Skip Redis specific tests when it is not installed * Port the AMQP 1.0 driver to Python 3 * rabbit: shuffle hosts before building kombu URL * Updated from global requirements * Remove unnecessary rpc\_zmq\_port option * Non-blocking outgoing queue was implemented * Allow custom notification drivers * Fix the home-page value with Oslo wikipage * Include changelog/history in docs * Fix spelling typo in output * Change ignore-errors to ignore\_errors * Unsubscribe target listener when leaving * Add SASL configuration options for AMQP 1.0 driver * Updated from global requirements * Fix a few leaks in the AMQP 1.0 driver * Disable ACL if authentication cannot be performed * Imported Translations from Zanata * Enhance start/stop concurrency race condition fix * Updated from global requirements * Extend logging in amqpdriver * Remove useless additional requirement file * Fix AMQP 1.0 functional and unit test failures * Provide the executor 'wait' function a timeout and use it 2.5.0 ----- * Imported Translations from Transifex * Update path to subunit2html in post\_test\_hook * Fix typos in a document and a comment * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Port the AMQP1 driver to new Pyngus SASL API * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Add config options to the documentation * Updated from global requirements 2.4.0 ----- * Mask passwords when logging messages * Updated from global requirements * Use proper translating helper for logging * Improve simulator.py 2.3.0 ----- * Imported Translations from Transifex * Added trace logging for debuggability * Log warning instead of raising RuntimeError * Use pickle instead of jsonutils for serialization * Updated from global requirements * Acknowledgements implementation * Replace 'M' with 'Mitaka' * Add if condition for random.shuffle * Fix message missing after duplicated message error * Fix fork-related issues * FIx CPU time consuming in green\_poller poll() * Documenting main driver classes * Notifier implementation * Imported Translations from Transifex * Fix BaseDriver.listen\_for\_notifications() signature * ZMQ: Minor matchmaker improvement * Imported Translations from Transifex * Updated from global requirements * Add unit tests for zmq\_async 2.2.0 ----- * Imported Translations from Transifex * ZMQ: \`Lazify\` driver code * Ensures that some assumptions are true * Remove oslo namespace package * Register matchmaker\_redis\_opts in RedisMatchMaker * Imported Translations from Transifex * Updated from global requirements * ZMQ: Removed unused code and tests * ZMQ: Run more functional tests * Get rid of proxy process in zmq * Fully use futurist code-base to abstract concurrent.futures away 2.1.0 ----- * Imported Translations from Transifex * Updated from global requirements * Close sockets properly * add plugin documentation for executors and notifiers * Allows to change defaults opts * Target direct usage * Move zmq tests into a subdirectory 2.0.0 ----- * Allow a forward slash as a part of the user/password * Update 'impl\_eventlet' docstring to reflect actual impl * Updated from global requirements * tests: adjusts an expected time for gate * Updated from global requirements * Ensure callback variable capture + cleanup is done correctly * Remove oslo namespace package * ZMQ: Initial matchmaker implementation * Updated from global requirements * Fix threading zmq poller and proxy * Don't install pyngus on Python 3 * Fix amqp connection pool leak in ConnectionContext * Executor docstring & attribute tweaks 1.17.1 ------ * Use the warn\_eventlet\_not\_patched util function * Drop use of 'oslo' namespace package 1.17.0 ------ * Updated from global requirements * Add unit tests for zmq\_serializer * Updated from global requirements * Fix work with timeout in CallRequest.receive\_reply() * Fix mock use for mock 1.1.0 * Make heartbeat the default * ZMQ: Allow to raise remote exception * Local Fanout implementation * Drop use of 'oslo' namespace package * Use oslo.log in the zmq receiver * Imported Translations from Transifex * Remove usage of contentmanager for executors * Verify that version in 'prepare' is valid 1.16.0 ------ * Fix qpid's functional gate * Don't reply when we known that client is gone * Remove py26 artefacts from oslo.messaging code * Remove 2.6 classifier * Imported Translations from Transifex * Add WebOb and greenlet to requirements * Use ServiceBase from oslo.service as a parent class * Manual update the requirements * Deprecated impl\_qpid * Add a missed \`raise\` statement * Remove qpid-config call * Initial commit for new zmq driver implementation * Add tox target to find missing requirements * Fix qpid's functional gate * Imported Translations from Transifex * fix typo * Correct RPCVersionCapError message 1.15.0 ------ * Drop use of 'oslo' namespace package * Update .gitreview for feature/zmq * Use \`inferred=True\` by default * Enable amqp's protocol unit tests everywhere * Switch badges from 'pypip.in' to 'shields.io' * Don't use devstack to setup our functional env * Switch to warnings module instead of versionutils * Updated from global requirements * Get mox from mox3, not from six.moves * rabbit: Add logging on blocked connection * Provide better detection of failures during message send 1.14.0 ------ * Reduce \`magic\` conf attribute usage * Imported Translations from Transifex * Remove leftover oslo.config reference * replace rpc\_response\_timeout use in rabbit driver * Enable \`fanout\_target\` scenarios in test\_impl\_rabbit * Add drivers to the documentation 1.13.0 ------ * Ensure rpc\_response\_timeout is registered before using it * rabbit: test for new reply behavior 1.12.0 ------ * Fix condition in \_publish\_and\_retry\_on\_missing\_exchange() * Set places to 0 in self.assertAlmostEqual() * Allow to remove second \_send\_reply() call * Don't create a new channel in RabbitMQ Connection.reset() * Imported Translations from Transifex * Adding Publisher Acknowledgements/confirms * Fix deprecated\_group of rpc\_conn\_pool\_size * Refactor processing reply in ReplyWaiter * rabbit: doc fixes * consumer connections not closed properly 1.11.0 ------ * rabbit: smart timeout on missing exchange * rabbit: Fix message ttl not work * rabbit: remove publisher classes * rabbit: Set timeout on the underlying socket * Remove stale copy of context.py * Add one more functional test for MessagingTimeout * Fix list\_opts test to not check all deps * make it possible to import amqp driver without dependencies * Remove outdated release notes * rabbit: smarter declaration of the notif. queue * rabbit: redeclare consumers when ack/requeue fail * Bump kombu and amqp requirements * Updated from global requirements * rabbit: fix exception path in queue redeclaration * rabbit: fix consumers declaration * rabbit: remove unused consumer interfaces * rabbit: remove unused code * rabbit: Remove unused stuffs from publisher * Remove support for Python 3.3 * Updated from global requirements * Add RequestContextSerializer * Updated from global requirements * rabbit: fixes a logging issue * rabbit/qpid: simplify the consumer loop * Updated from global requirements * Imported Translations from Transifex * Fix missing space in help text * zmq: Add support for ZmqClient pooling * Enable eventlet dependency on Python 3 * Add JsonPayloadSerializer serializer * Fix test\_matchmaker\_redis on Python 3 * Disable and mark heartbeat as experimental 1.10.0 ------ * Uncap library requirements for liberty * Port ZMQ driver to Python 3 * Use unittest.mock on Python 3 * Enable redis test dependency on Python 3 * Remove amqp driver 'unpacked content' logging * Updated from global requirements * Add pypi download + version badges * Fix TypeError caused by err\_msg formatting * Fix typo in oslo\_messaging/\_drivers/protocols/amqp/opts.py * Document notification\_driver possible values * Do not skip functional test for amqp driver * Add functional test for notify.logger * Properly deserialize received AMQP 1.0 messages * Make notify driver messaging play well with publish\_errors * Imported Translations from Transifex 1.9.0 ----- * Use the oslo\_utils stop watch in decaying timer * Updated from global requirements * Remove 'UNIQUE\_ID is %s' logging * Sync with latest oslo-incubator * rabbit: fix ipv6 support * Create a unique transport for each server in the functional tests * Publish tracebacks only on debug level * Add pluggability for matchmakers * Make option [DEFAULT]amqp\_durable\_queues work * Reconnect on connection lost in heartbeat thread * Don't raise Timeout on no-matchmaker results * Imported Translations from Transifex * cleanup connection pool return * rabbit: Improves logging * fix up verb tense in log message * rabbit: heartbeat implementation * Fix changing keys during iteration in matchmaker heartbeat * Minor improvement * ZeroMQ deployment guide * Fix a couple typos to make it easier to read * Tiny problem with notify-server in simulator * Fix coverage report generation * Add support for multiple namespaces in Targets * tools: add simulator script * Deprecates the localcontext API * Update to oslo.context * Remove obsolete cross tests script * Fix the bug redis do not delete the expired keys 1.8.0 ----- * Updated from global requirements * NotifyPublisher need handle amqp\_auto\_delete * Fix matchmaker\_redis ack\_alive fails with KeyError * Properly distinguish between server index zero and no server 1.7.0 ----- * Add FAQ entry for notifier configuration * rabbit: Fix behavior of rabbit\_use\_ssl * amqp1: fix functional tests deps * Skip functional tests that fail due to a qpidd bug * Use import of zmq package for test skip * Remove unnecessary log messages from amqp1 unit tests * Include missing parameter in call to listen\_for\_notifications * Fix the import of the driver by the unit test * Add a new aioeventlet executor * Add missing unit test for a recent commit * Add the threading executor setup.cfg entrypoint * Move each drivers options into its own group * Refactor the replies waiter code * Imported Translations from Transifex * Fix notifications broken with ZMQ driver * Gate functionnal testing improvements * Treat sphinx warnings as errors * Move gate hooks to the oslo.messaging tree * Set the password used in gate * Update README.rst format to match expectations 1.6.0 ----- * Declare DirectPublisher exchanges with passive=True * Updated from global requirements * Expose \_impl\_test for designate * Update Oslo imports to remove namespace package * Speedup the rabbit tests * Fix functionnal tests * kombu: fix driver loading with kombu+qpid scheme * Fixed docstring for Notifier * zmq: Refactor test case shared code * Add more private symbols to the old namespace package * Updated from global requirements * Adjust tests for the new namespace * Fixes test\_two\_pools\_three\_listener * Add TimerTestCase missing tests case * Ensure kombu channels are closed * fix qpid test issue with eventlet monkey patching * Make setup.cfg packages include oslo.messaging * Upgrade to hacking 0.10 * Implements notification-dispatcher-filter * Add oslo.messaging.\_drivers.common for heat tests * Port zmq driver to Python 3 * Make sure zmq can work with redis * fix qpid test issue with eventlet monkey patching * Move files out of the namespace package * Add a info log when a reconnection occurs * rabbit: fix timeout timer when duration is None * Don't log each received messages * Fix some comments in a backporting review session * Enable IPv6-support in libzmq by default * Add a thread + futures executor based executor * safe\_log Sanitize Passwords in List of Dicts * Updated from global requirements * rabbit: add some tests when rpc\_backend is set * Warns user if thread monkeypatch is not done * Add functional and unit 0mq driver tests * The executor doesn't need to set the timeout * qpid: honor iterconsume timeout * rabbit: more precise iterconsume timeout * Workflow documentation is now in infra-manual * Touch up grammar in warning messages 1.5.1 ----- * Reintroduces fake\_rabbit config option * Make the RPCVersionCapError message clearer * Doc: 'wait' releases driver connection, not 'stop' * Don't allow call with fanout target * Imported Translations from Transifex * Add an optional executor callback to dispatcher 1.5.0 ----- * Rabbit: Fixes debug message format * Rabbit: iterconsume must honor timeout * Don't use oslo.cfg to set kombu in-memory driver * Don't share connection pool between driver object * Show what the threshold is being increased to * Wait for expected messages in listener pool test * Dispath messages in all listeners in a pool * Reduces the unit tests run times * Set correctly the messaging driver to use in tests * Always use a poll timeout in the executor * Have the timeout decrement inside the wait() method * Warn user if needed when the process is forked * Renamed PublishErrorsHandler * Fix reconnect race condition with RabbitMQ cluster * Create a new connection when a process fork has been detected * Add more TLS protocols to rabbit impl * Remove the use of PROTOCOL\_SSLv3 * Add qpid and amqp 1.0 tox targets * Updated from global requirements * Imported Translations from Transifex * rabbit: uses kombu instead of builtin stuffs * Allows to overriding oslotest environ var * Create ZeroMQ Context per socket * Remove unuseful param of the ConnectionContext * Updated from global requirements * Add basic tests for 0mq matchmakers * Notification listener pools * Updated from global requirements * Fix tiny typo in server.py * Switch to oslo.middleware * Updated from global requirements * Activate pep8 check that \_ is imported * Enable user authentication in the AMQP 1.0 driver * Documentation anomaly in TransportURL parse classmethod * Don't put the message payload into warning log * Updated from global requirements * Fix incorrect attribute name in matchmaker\_redis * Add pbr to installation requirements * Updated from global requirements * Add driver independent functional tests * Imported Translations from Transifex * zmq: Remove dead code * Updated from global requirements * Finish transition to oslo.i18n * Imported Translations from Transifex * Imported Translations from Transifex * qpid: Always auto-delete queue of DirectConsumer * Updated from global requirements * Imported Translations from Transifex * Enable oslo.i18n for oslo.messaging * Switch to oslo.serialization * Cleanup listener after stopping rpc server * Updated from global requirements * Track the attempted method when raising UnsupportedVersion * fix memory leak for function \_safe\_log * Stop using importutils from oslo-incubator * Add missing deprecated group amqp1 * Updated from global requirements * Stop using intersphinx * Add documentation explaining how to use the AMQP 1.0 driver * Imported Translations from Transifex * Construct ZmqListener with correct arguments * Message was send to wrong node with use zmq as rpc\_backend * Work toward Python 3.4 support and testing * Ensure the amqp options are present in config file * Add contributing page to docs * Import notifier middleware from oslo-incubator * Let oslotest manage the six.move setting for mox 1.4.1 ----- * Imported Translations from Transifex * Add square brackets for ipv6 based hosts * An initial implementation of an AMQP 1.0 based messaging driver * warn against sorting requirements * Improve help strings * Switch to oslo.utils * Fix Python 3 testing * Import oslo-incubator context module * Import oslo-incubator/middleware/base * Should not send replies for cast messages * Port to Python 3 * Sync jsonutils from oslo-incubator * Add parameter to customize Qpid receiver capacity * Make tests pass with random python hashseed * Set sample\_default for rpc\_zmq\_host * Enable PEP8 check E714 * Enable PEP8 check E265 * Enable PEP8 check E241 * Fix error in example of an RPC server * Replace lambda method \_ * Enable check for E226 * Updated from global requirements * Add release notes for 1.4.0.0a4 * Add release notes for stable/icehouse 1.3.1 release 1.4.0.0a4 --------- * Enabled hacking checks H305 and H307 * Bump hacking to version 0.9.2 * Fixes incorrect exchange lock in fake driver * Imported Translations from Transifex 1.4.0.0a3 --------- * Add release notes for 1.4.0.0a2/a3 * Fix AMQPListener for polling with timeout * Replaced 'e.g.' with 'for example' * Use assertEqual instead of assertIs for strings 1.4.0.0a2 --------- * Fix structure of unit tests in oslo.messaging (part 3 last) * Fix structure of unit tests in oslo.messaging (part 2) * Fix slow notification listener tests * encoding error in file * Fix info method of ListenerSetupMixin 1.4.0.0a1 --------- * Add release notes for 1.4.0.0a1 * Fix formatting of TransportURL.parse() docs * Remove duplicate docs for MessageHandlingServer * Add missing docs for list\_opts() * Add 'docs' tox environment * Replace usage of str() with six.text\_type * Fix structure of unit tests in oslo.messaging (part 1) * Synced jsonutils and its dependencies from oslo-incubator * Ensures listener queues exist in fake driver * RPC server doc: use the blocking executor * Fix the notifier example * Removes the use of mutables as default args * Set correct group for matchmaker\_redis options * replace string format arguments with function parameters * Removes contextlib.nested * Transport reconnection retries for notification * Disable connection pool in qpid interfaces tests * Updated from global requirements * Add check credentials to log message if rabbmitmq closes socket * Fix the notify method of the routing notifier * Handle unused allowed\_remote\_exmods in \_multi\_send * rabbit/qpid: remove the args/kwargs from ensure() * Add an example usage of RPCClient retry parameter * Add transport reconnection retries * Add an optional timeout parameter to Listener.poll * Bump hacking to 0.9.x series * Removes unused config option * fixed pep8 issue E265 * Setup for translation * Updated from global requirements * Remove amqp default exchange hack * remove default=None for config options * Cleaned up references to executor specific RPCServer types * Make the TransportUrl hashable * debug level logs should not be translated * Explicitly name subscription queue for responses * Fix passing envelope variable as timeout * Updated from global requirements * Synced jsonutils from oslo-incubator * Remove str() from LOG.\* and exceptions * Remove dependent module py3kcompat * Enable log messages to handle exceptions containing unicode * Updated from global requirements * Fix typo in docstring of notify/notifier * Full support of multiple hosts in transport url * Logical error in blockless fanout of zmq * Select AMQP message broker at random * Use a for loop to set the defaults for \_\_call\_\_ params * Update ensure()/reconnect() to catch MessagingError * Remove old drivers dead code * Import run\_cross\_tests.sh from oslo-incubator * Remove rendundant parentheses of cfg help strings * zmq: switch back to not using message envelopes * Trival:Fix assertEqual arguments order * Oslo-messaging-zmq-receiver cannot recive any messages 1.3.0 ----- * Add release notes for 1.3.0 * Ensure routing key is specified in the address for a direct producer * Fix wrong parameter description in docstring * Fixed inconsistent EventletContextManagerSpawnTest failures * Use messaging\_conf fixture configuration by default * Fixed possible pep8 failure due to pyflakes bug * Refactor AMQP message broker selection * Add unit test to check the order of Qpid hosts on reconnect * Fixed the issue for pop exception * Clean up for qpid tests * Add kombu driver library to requirements.txt * Use driver's notify\_send() method again * Remove vim header * Updated from global requirements * Fixed spelling error - runnung to running * Build log\_handler documentation * Add release notes up to 1.3.0a9 1.3.0a9 ------- * Remove use of sslutils 1.3.0a8 ------- * Expose PublishErrorsHandler through oslo.messaging * Use mock's call assert methods over call\_args\_list * notify listener: document the metadata callback parameter * Add missing data into the notif. endpoint callback * notification listener: add allow\_requeue param * Adds unit test cases to impl\_qpid * Do not leak \_unique\_id out of amqp drivers * Add multiple exchange per listerner in fake driver * Allow to requeue the notification message * Slow down Kombu reconnect attempts * Don't run python 3 tests by default * Gracefully handle consumer cancel notifications * Updated from global requirements * Convert to oslo.test * Add log\_handler to oslo.messaging * Add a link to the docs from the README * Pass the session to QpidMessage constructor * User a more accurate max\_delay for reconnects * Make the dispatcher responsible of the message ack * Don't reply to notification message * Abstract the acknowledge layer of a message * Implements notification listener and dispatcher * Switch over to oslosphinx * Improve help strings * Update ExpectedException handling * Ignore any egg and egg-info directories * Qpid: advance thru the list of brokers on reconnect * RabbitMQ: advance thru the list of brokers on reconnect 1.3.0a7 ------- * Make the dispatcher responsible to listen() * Allow fake driver to consume multiple topics * Allow different login methods to be used with kombu connections 1.3.0a6 ------- * Use stevedore's make\_test\_instance * Expose an entry point to list all config options * Fix test case name typo * Fix UnboundLocalError error 1.3.0a5 ------- * Fix help strings * Add release notes for 1.3.0a3 * python3: Switch to mox3 instead of mox * Remove dependencies on pep8, pyflakes and flake8 * Routing notifier 1.3.0a4 ------- * Removes use of timeutils.set\_time\_override * Fix spelling errors in comments * Fix test\_notifier\_logger for Python 3 * Minor Python 3 fixes * Remove copyright from empty files * Fix duplicate topic messages for Qpid topology=2 * Replace dict.iteritems() with six.iteritems() * Remove unused eventlet/greenlet from qpid/rabbit * fix test\_rabbit for Python 3 * Fix try/except syntax for Python 3 * Fix exception deserialiation on Python 3 * Add Sample priority * sysnchronize oslo-incubator modules * Remove eventlet related code in amqp driver * Fix syntax of relative imports for Python3 * Updated from global requirements * Updated from global requirements * Unify different names between Python2 and Python3 * Replace data structures' attribute with six module * Avoid creating qpid connection twice in initialization * Use six.moves.queue instead of Queue * Add transport aliases * Remove the partial implementation of ack\_on\_error * Fixed misspellings of common words * Add release notes for 1.3.0a2 * Unify different names between Python2/3 with six.moves * Remove vim header * Ensure context type is handled when using to\_dict * Refactors boolean returns 1.3.0a2 ------- * Simplify common base exception prototype * Properly reconnect subscribing clients when QPID broker restarts * Remove useless global vars / import * Avoid storing configuration in notifier * Implement a log handler using notifier * notifier: add audit level * Add 'warning' as an alias to 'warn' * Decouple from Oslo uuidutils module * Supply missing argument to raise\_invalid\_topology\_version() * Support a new qpid topology * Remove hosts as property in TransportURL * Remove property on virtual\_host in TransportURL * Updated from global requirements * Fix some typos and adjust capitalization * Changes driver method for notifications 1.3.0a1 ------- * Properly handle transport URL config on the client * Updated from global requirements * Updated from global requirements * Replace assertEquals with assertEqual * Properly handle transport:///vhost URL * Updated from global requirements * Make rpc\_backend default to 'rabbit' * Apply six for metaclass * Add third element to RPC versions for backports * Fix rpc client docs * Updated from global requirements * Remove cruft from setup.cfg * Updated from global requirements * Fixes a typo in the address string syntax * Implement the server side of ZmqDriver * Add zmq-receiver * Implement the client side of ZmqDriver * Import zmq driver code with minimal modifications 1.2.0a11 -------- * Fix race-condition in rabbit reply processing * Fix error message if message handler fails * Don't include msg\_id or reply\_q in casts * Remove check\_for\_lock support in RPCClient 1.2.0a10 -------- * Add a Notifier.prepare() method 1.2.0a9 ------- * Fix dictionary changed size during iteration 1.2.0a8 ------- * Fix transport URL parsing bug 1.2.0a7 ------- * Fix rabbit driver handling of None, etc. replies 1.2.0a6 ------- * Remove ConfFixture from toplevel public API * Fix fake driver handling of failure replies * Bumps hacking to 0.7.0 * Fix transport URL ipv6 parsing support 1.2.0a5 ------- * Fix handling of None, etc. replies 1.2.0a4 ------- 1.2.0a3 ------- * Add a unit testing configuration fixture * Add a TransportURL class to the public API 1.2.0a2 ------- * Ensure namespace package is installed 1.2.0a1 ------- * Add transport URL support to rabbit driver * Kill ability to specify exchange in transport URL * Fix capitalization, it's OpenStack * Fix handling expected exceptions in rabbit driver * Add thread-local store of request context * Add a context serialization hook * Removes a redundant version\_is\_compatible function * Document how call() handles remote exceptions * Add a per-transport allow\_remote\_exmods API * Expose RemoteError exception in the public API * Implement failure replies in the fake driver * Add API for expected endpoint exceptions * Add a driver method specifically for sending notifications * Enforce target preconditions outside of drivers * Add comments to ReplyWaiter.wait() * Remove some FIXMEs and debug logging * Remove unused IncomingMessage.done() * Implement wait\_for\_reply timeout in rabbit driver * Use testtools.TestCase assertion methods * Implement failure replies in rabbit driver * Add test with multiple waiting sender threads * Fix race condition in ReplyWaiters.wake\_all() * Add rabbit unit test for sending and receiving replies * Add some docs on target version numbers * Add tests for rabbit driver wire protcol * Pop \_unique\_id when checking for duplicates * Add a transport cleanup() method * Remove my notes and test scripts * Add initial qpid driver * Move most new rabbit driver code into amqpdriver * Move rpc\_conn\_pool\_size into amqp * Add simple rabbit driver unit test * Temporarily add eventlet to requirements * Add missing gettextutils * Add unit tests for object pool * Remove only\_free param to Pool.get() * Connection pool bugfix * Remove unused file * Add exception serialization tests * Don't call consume() each time iterconsume() is called * Add test code for the rabbit driver * Remove use of gettextutils * Add initial rabbit driver * Remove use of openstack.common.local * Use stdlib logging * Don't register options with cfg.CONF at module import * Port away from some eventlet infrastructure * Adjust imports in rabbit/qpid drivers * Import some needed modules from oslo-incubator * Add oslo-incubator code unmodified * Make executor threads more robust * Allow use of hacking 0.6.0 and fix min version * Include docstrings in published docs * Use oslo.sphinx and remove local copy of doc theme * Add some notes * Unit tests for notifier * Make test notifier useful * Use lowercase priority in log notifier * Use lowercase priority in notifications topic * Handle case where no drivers configured * Fix buglet in v2 messaging notifier * Make LOG private in notifier * Require a transport to construct a Notifier * Add serializer support to notifier * Rename context to ctxt in serializer API * Rename context to ctxt in notify API * Make Notifier public at top-level * Docstrings for notifier API * Fix notify drivers namespace * Remove backwards compat entry point aliases * Simplify public symbol exports * Use assertEqual() rather than assertEquals() * Remove accidental use of messaging.rpc\_server * Make exchange\_from\_url() use parse\_url() * Unit tests for URL parsing code * Fix parse\_urls() buglets * Move url utils into messaging.\_urls * Don't use common logging * Update example scripts for recent API changes * Fix fake driver with eventlet * Use log.warning() instead of log.warn() * Fix some pep8 issues * Don't translate exception messages * Knock off a few TODOs * Add can\_send\_version() to RPCClient * Check start() does nothing on a running server * Remove unused statements in base serializer * Fix thinko in exchange\_from\_url() * Call wait() in server tests * Add docstrings for base executor class * Remove a fixed fixme * Add a client call timeout test * Don't raise a driver-specific error on send * Add some docstrings to driver base * Test a bunch more RPC server scenarios * Make it possible to call prepare() on call context * Rework how queues get created in fake driver * Use testscenarios * Move files to new locations for oslo.messaging * Import stuff from oslo-incubator * Add oslo.messaging project infrastructure * Add some RPC server tests * More gracefully handle "no listeners" in fake driver * Better error handling in server.start() * Re-work server API to eliminate server subclasses * Add license header to \_executors/\_\_init\_\_.py * Add RPCDispatcher tests * Check for return value in client serializer test * Add note about can\_send\_version() * More client unit tests * Make RPCClient.check\_for\_lock a callable * Apply version cap check when casting * Make RPCVersionCapError extend base exception * Remove a bogus param from client.prepare() docs * pep8 fixes for serializer code * Simple RPCClient test * Unit tests * Move some stuff into doc/ * Implement Target.\_\_eq\_\_() * Fix bug in exchange\_from\_url() * pep8 fixes for fake driver * Make utils.parse\_url() docstring pep8 compliant * Don't translate exceptions * Misc pep8 fixes * pep8 fixes for toplevel package * Some error handling improvements * Recommend wrapping the client class rather than subclassing * Document how to use RPCClient directly * Document the public RPC API * Fix defaults for client.prepare() args * Fix client.cast() typo * Fix version\_cap typo * Allow all target attributes in client.prepare() * Expose Serializer from top-level namespace * Allow specifying a serializer when creating a server * Make endpoint.target optional * Dispatch methods in their own greenthreads * Make rpc.dispatcher private * Make the base RPCServer class private * Fix typo with the serializer work * Update use of stevedore * Require topics and target in notify driver constructors * Add generic serialization support * Support namespace in RPCClient.prepare() * Add parse\_url to \_utils * Remove entry point lists from the public API * Support capping message versions in the client * Fix RPCClient check\_for\_lock() * First cut at the notifier API * Add some notes * Add IncomingMessage abstraction * Pass a context dict * Fix docstring * Implement a fake driver * Adding reply infrastructure * Add some exceptions * Fix buglet with default timeout * Fix target/namespace target buglet * Fix rpc client buglets * Fix 'Blockinging' typos * Missing self parameter to server start() * Fix default\_exchange typo * Add forgotten piece of eventlet executor * It's \_executors not \_executor * Make poll() just return the message * Make drivers list public again * Add top-level convenience aliases * Prefix the executors module with underscore * Prefix the messaging.server module with an underscore * Prefix the drivers module with an underscore * Make transport methods private * Fix little typo in server exception class name * Add missing utils module * Add convenience RPC server classes * Update changes.txt for recent API changes * Use : for loading classes in entry\_points * Split the dispatcher from the executor and server * Make driver and transport methods public * Pass the driver instance to the listener instead of config * Try out replacing "executor" for "dispatcher" * Fix host vs server typo * Initial framework ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/HACKING.rst0000664000175000017500000000017000000000000016354 0ustar00zuulzuul00000000000000Style Commandments ================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/LICENSE0000664000175000017500000002665200000000000015600 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. --- License for python-keystoneclient versions prior to 2.1 --- All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of this project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1546743 oslo.messaging-14.9.0/PKG-INFO0000664000175000017500000000426500000000000015664 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: oslo.messaging Version: 14.9.0 Summary: Oslo Messaging API Home-page: https://docs.openstack.org/oslo.messaging/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.messaging.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Oslo Messaging Library ====================== .. image:: https://img.shields.io/pypi/v/oslo.messaging.svg :target: https://pypi.org/project/oslo.messaging/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.messaging.svg :target: https://pypi.org/project/oslo.messaging/ :alt: Downloads The Oslo messaging API supports RPC and notifications over a number of different messaging transports. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.messaging/latest/ * Source: https://opendev.org/openstack/oslo.messaging * Bugs: https://bugs.launchpad.net/oslo.messaging * Release notes: https://docs.openstack.org/releasenotes/oslo.messaging/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 Provides-Extra: amqp1 Provides-Extra: kafka Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/README.rst0000664000175000017500000000171500000000000016253 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.messaging.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Oslo Messaging Library ====================== .. image:: https://img.shields.io/pypi/v/oslo.messaging.svg :target: https://pypi.org/project/oslo.messaging/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.messaging.svg :target: https://pypi.org/project/oslo.messaging/ :alt: Downloads The Oslo messaging API supports RPC and notifications over a number of different messaging transports. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.messaging/latest/ * Source: https://opendev.org/openstack/oslo.messaging * Bugs: https://bugs.launchpad.net/oslo.messaging * Release notes: https://docs.openstack.org/releasenotes/oslo.messaging/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/bindep.txt0000664000175000017500000000143600000000000016566 0ustar00zuulzuul00000000000000# common dpkg gettext [platform:dpkg] # For releasenotes job build-essential [platform:dpkg] libffi-dev [platform:dpkg] # common rpm gcc [platform:rpm] gcc-c++ [platform:rpm] make [platform:rpm] pkgconfig [platform:rpm] libffi-devel [platform:rpm] # RabbitMQ message broker rabbitmq-server [platform:dpkg rabbit] rabbitmq-server [platform:rpm rabbit] # AMQP1 dpkg # This needs qpid/testing, will be installed by tools/test-setup.sh # qdrouterd [platform:dpkg amqp1 test] sasl2-bin [platform:dpkg amqp1 test] uuid-dev [platform:dpkg amqp1 test] swig [platform:dpkg amqp1 test] libsasl2-modules [platform:dpkg amqp1 test] default-jdk [platform:dpkg amqp1 test !platform:debian] # kafka dpkg default-jdk [platform:dpkg kafka] librdkafka1 [platform:dpkg kafka] librdkafka-dev [platform:dpkg kafka] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1146717 oslo.messaging-14.9.0/doc/0000775000175000017500000000000000000000000015325 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/requirements.txt0000664000175000017500000000043300000000000020611 0ustar00zuulzuul00000000000000openstackdocstheme>=2.2.0 # Apache-2.0 sphinx>=2.0.0 # BSD reno>=3.1.0 # Apache-2.0 # imported when the source code is parsed for generating documentation: fixtures>=3.0.0 # Apache-2.0/BSD confluent-kafka>=0.11.6 # Apache-2.0 pyngus>=2.2.0 # Apache-2.0 tenacity>=3.2.1 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1146717 oslo.messaging-14.9.0/doc/source/0000775000175000017500000000000000000000000016625 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1146717 oslo.messaging-14.9.0/doc/source/admin/0000775000175000017500000000000000000000000017715 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/admin/AMQP1.0.rst0000664000175000017500000005412300000000000021431 0ustar00zuulzuul00000000000000========================================= AMQP 1.0 Protocol Driver Deployment Guide ========================================= .. currentmodule:: oslo_messaging Introduction ------------ The AMQP 1.0 Protocol Driver is a messaging transport backend supported in oslo.messaging. The driver maps the base *oslo.messaging* capabilities for RPC and Notification message exchange onto version 1.0 of the Advanced Message Queuing Protocol (AMQP 1.0, ISO/IEC 19464). The driver is intended to support any messaging intermediary (e.g. broker or router) that implements version 1.0 of the AMQP protocol. More detail regarding the AMQP 1.0 Protocol is available from the `AMQP specification`__. More detail regarding the driver's implementation is available from the `oslo specification`__. __ http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-overview-v1.0-os.html __ https://opendev.org/openstack/oslo-specs/src/branch/master/specs/juno/amqp10-driver-implementation.rst Abstract -------- The AMQP 1.0 driver is one of a family of *oslo.messaging* backend drivers. It currently supports two types of message intermediaries. The first type is an AMQP 1.0 messaging broker and the second type is an AMQP 1.0 message router. The driver should support additional intermediary types in the future but may require additions to driver configuration parameters in order to do so. +--------------+-----------+------------+------------+-----------+ | Intermediary | RPC | Notify | Message | Topology | | Type | Pattern | Pattern | Treatment | | +--------------+-----------+------------+------------+-----------+ | Message | Yes | `Limited`_ | Direct | Single or | | Router | | | Messaging | Mesh | +--------------+-----------+------------+------------+-----------+ | Message | Yes | Yes | Store and | Single or | | Broker | | | Forward | Cluster | +--------------+-----------+------------+------------+-----------+ Direct Messaging ~~~~~~~~~~~~~~~~ The RPC messaging pattern is a synchronous exchange between client and server that is temporally bracketed. The direct messaging capabilities provided by the message router are optimal for the RPC messaging pattern. The driver can readily scale operation from working with a single instances of a message router to working with a large scale routed mesh interconnect topology. Store and Forward ~~~~~~~~~~~~~~~~~ The Notification messaging pattern is an asynchronous exchange from a notifier to a listener (e.g. consumer). The listener need not be present when the notification is sent. Thus, the store and forwarding capabilities provided by the message broker are required for the Notification messaging pattern. This driver is able to work with a single instance of a message broker or a clustered broker deployment. .. _Limited: It is recommended that the message router intermediary not be used for the Notification messaging pattern due to the consideration that notification messages will be dropped when there is no active consumer. The message router does not provide durability or store-and-forward capabilities for notification messages. Hybrid Messaging Backends ~~~~~~~~~~~~~~~~~~~~~~~~~ Oslo.messaging provides a mechanism to configure separate backends for RPC and Notification communications. This is supported through the specification of separate RPC and Notification `transport urls`_ in the service configuration. This capability enables the optimal alignment of messaging patterns to messaging backend and allows for different messaging backend types to be deployed. This document provides deployment and configuration information for use of this driver in hybrid messaging configurations. Addressing ~~~~~~~~~~ A new address syntax was added to the driver to support efficient direct message routing. This new syntax will also work with a broker intermediary backend but is not compatible with the address syntax previously used by the driver. In order to allow backward compatibility, the driver will attempt to identify the intermediary type for the backend in use and will automatically select the 'legacy' syntax for broker-based backends or the new 'routable' syntax for router-based backends. An `address mode`_ configuration option is provided to override this dynamic behavior and force the use of either the legacy or routable address syntax. Message Acknowledgement ~~~~~~~~~~~~~~~~~~~~~~~ A primary functional difference between a router and a broker intermediary type is when message acknowledgement occurs. The router does not "store" the message hence it does not generate an acknowledgement. Instead the consuming endpoint is responsible for message acknowledgement and the router forwards the acknowledgement back to the sender. This is known as 'end-to-end' acknowledgement. In contrast, a broker stores then forwards the message so that message acknowledgement is performed in two stages. In the first stage, a message acknowledgement occurs between the broker and the Sender. In the second stage, an acknowledgement occurs between the Server and the broker. This difference affects how long the Sender waits for the message transfer to complete. :: +dispatch+ | (3) | | | | v +--------------+ (1) +----------+ (2) +--------------+ | Client |---------->| Router |----------->| Server | | (Sender) |<----------| (Direct) |<-----------| (Listener) | +--------------+ (5) +----------+ (4) +--------------+ For example when a router intermediary is used, the following sequence occurs: 1. The message is sent to the router 2. The router forwards the message to the Server 3. The Server dispatches the message to the application 4. The Server indicates the acknowledgement via the router 5. The router forwards the acknowledgement to the Sender In this sequence, a Sender waits for the message acknowledgement until step (5) occurs. :: +dispatch+ | (4) | | | | v +--------------+ (1) +----------+ (3) +--------------+ | Client |---------->| Broker |----------->| Server | | (Sender) |<----------| (Queue) |<-----------| (Listener) | +--------------+ (2) +----------+ (5) +--------------+ And when a broker intermediary is used, the following sequence occurs: 1. The message is sent to the broker 2. The broker stores the message and acknowledges the message to the Sender 3. The broker sends the message to the Server 4. The Server dispatches the message to the application 5. The Server indicates the acknowledgement to the broker In this sequence, a Sender waits for the message acknowledgement until step (2) occurs. Therefore the broker-based Sender receives the acknowledgement earlier in the transfer than the routed case. However in the brokered case receipt of the acknowledgement does not signify that the message has been (or will ever be) received by the Server. Batched Notifications **Note Well** ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ While the use of a router intermediary for oslo.messaging Notification is currently not recommended, it should be noted that the use of a router intermediary with batched notifications may exacerbate the acknowledgement wait time for a Sender. For example, when a batched notification configuration is used where batch size is set to 100, the Server will wait until 100 notification messages are buffered (or timeout occurs) before dispatching the notifications to the application for message acknowledgement. Since each notifier client can have at most one message outstanding (e.g. pending acknowledgement), then if the total number of notifying clients are less than 100 the batch limit will never be met. This will effectively pause all notifying clients until the batch timeout expires. Prerequisites ------------- Protocol Engine ~~~~~~~~~~~~~~~ This driver uses the Apache QPID `Proton`__ AMQP 1.0 protocol engine. This engine consists of a platform specific library and a python binding. The driver does not directly interface with the engine API, as the API is a very low-level interface to the AMQP protocol. Instead, the driver uses the pure python `Pyngus`__ client API, which is layered on top of the protocol engine. In order to run the driver the Proton Python bindings, Proton library, Proton header files, and Pyngus must be installed. __ http://qpid.apache.org/proton/index.html __ https://github.com/kgiusti/pyngus Source packages for the `Pyngus client API`__ are available via PyPI. __ https://pypi.org/project/pyngus Pyngus depends on the Proton Python bindings. Source packages for the `Proton Python bindings`__ are also available via PyPI. __ https://pypi.org/project/python-qpid-proton Since the AMQP 1.0 driver is an optional extension to Oslo.Messaging these packages are not installed by default. Use the 'amqp1' extras tag when installing Oslo.Messaging in order to pull in these extra packages: .. code-block:: shell $ python -m pip install oslo.messaging[amqp1] The Proton package includes a C extension that links to the Proton library. The C extension is built locally when the Proton source packages are install from PyPI. In order to build the Proton C source locally, there are a number of tools and libraries that need to be present on the system: * The tools and library necessary for Python C development * The `SWIG`__ wrapper generator * The `OpenSSL`__ development libraries and headers * The `Cyrus SASL`__ development libraries and headers **Note well**: Currently the Proton PyPI package only supports building the C extension on Linux systems. Pre-built packages for both Pyngus and Proton engine are available for various Linux distributions (see `packages`_ below). It is recommended to use the pre-built packages if they are available for your platform. __ http://www.swig.org/index.php __ https://www.openssl.org __ https://cyrusimap.org Router Intermediary ~~~~~~~~~~~~~~~~~~~ This driver supports a *router* intermediary that supports version 1.0 of the AMQP protocol. The direct messaging capabilities provided by this intermediary type are recommended for oslo.messaging RPC. The driver has been tested with `qpid-dispatch-router`__ router in a `devstack`_ environment. The version of qpid-dispatch-router **must** be at least 0.7.0. The qpid-dispatch-router also uses the Proton engine for its AMQP 1.0 support, so the Proton library must be installed on the system hosting the qpid-dispatch-router daemon. Pre-built packages for the router are available. See `packages`_ below. __ http://qpid.apache.org/components/dispatch-router/ Broker Intermediary ~~~~~~~~~~~~~~~~~~~ This driver supports a *broker* intermediary that supports version 1.0 of the AMQP protocol. The store and forward capabilities provided by this intermediary type are recommended for *oslo.messaging* Notifications. The driver has been tested with the `qpidd`__ broker in a `devstack`_ environment. The version of qpidd **must** be at least 0.34. qpidd also uses the Proton engine for its AMQP 1.0 support, so the Proton library must be installed on the system hosting the qpidd daemon. Pre-built packages for the broker are available. See `packages`_ below. See the `oslo specification`__ for additional information regarding testing done on the driver. __ http://qpid.apache.org/components/cpp-broker/index.html __ https://opendev.org/openstack/oslo-specs/src/branch/master/specs/juno/amqp10-driver-implementation.rst Configuration ------------- .. _transport urls: Transport URL Enable ~~~~~~~~~~~~~~~~~~~~ In oslo.messaging, the transport_url parameters define the OpenStack service backends for RPC and Notify. The url is of the form:: transport://user:pass@host1:port[,hostN:portN]/virtual_host Where the transport value specifies the rpc or notification backend as one of **amqp**, rabbit, kafka, etc. To specify and enable the AMQP 1.0 driver for RPC, in the ``[DEFAULT]`` section of the service configuration file, specify the ``transport_url`` parameter: .. code-block:: ini [DEFAULT] transport_url = amqp://username:password@routerhostname:5672 To specify and enable the AMQP 1.0 driver for Notify, in the ``[NOTIFICATIONS]`` section of the service configuration file, specify the ``transport_url`` parameter: :: [NOTIFICATIONS] transport_url = amqp://username:password@brokerhostname:5672 Note, that if a 'transport_url' parameter is not specified in the [NOTIFICATIONS] section, the [DEFAULT] transport_url will be used for both RPC and Notify backends. Driver Options ~~~~~~~~~~~~~~ It is recommended that the default configuration options provided by the AMQP 1.0 driver be used. The configuration options can be modified in the :oslo.config:group:`oslo_messaging_amqp` section of the service configuration file. Connection Options ^^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_amqp.idle_timeout` - :oslo.config:option:`oslo_messaging_amqp.connection_retry_interval` - :oslo.config:option:`oslo_messaging_amqp.connection_retry_backoff` - :oslo.config:option:`oslo_messaging_amqp.connection_retry_interval_max` Message Send Options ^^^^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_amqp.pre_settled` - :oslo.config:option:`oslo_messaging_amqp.link_retry_delay` - :oslo.config:option:`oslo_messaging_amqp.default_reply_timeout` - :oslo.config:option:`oslo_messaging_amqp.default_send_timeout` - :oslo.config:option:`oslo_messaging_amqp.default_notify_timeout` .. _address mode: Addressing Options ^^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_amqp.addressing_mode` - :oslo.config:option:`oslo_messaging_amqp.server_request_prefix` - :oslo.config:option:`oslo_messaging_amqp.broadcast_prefix` - :oslo.config:option:`oslo_messaging_amqp.group_request_prefix` - :oslo.config:option:`oslo_messaging_amqp.rpc_address_prefix` - :oslo.config:option:`oslo_messaging_amqp.notify_address_prefix` - :oslo.config:option:`oslo_messaging_amqp.multicast_address` - :oslo.config:option:`oslo_messaging_amqp.unicast_address` - :oslo.config:option:`oslo_messaging_amqp.anycast_address` - :oslo.config:option:`oslo_messaging_amqp.default_notification_exchange` - :oslo.config:option:`oslo_messaging_amqp.default_rpc_exchange` SSL Options ^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_amqp.ssl` - :oslo.config:option:`oslo_messaging_amqp.ssl_ca_file` - :oslo.config:option:`oslo_messaging_amqp.ssl_cert_file` - :oslo.config:option:`oslo_messaging_amqp.ssl_key_file` - :oslo.config:option:`oslo_messaging_amqp.ssl_key_password` SASL Options ^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_amqp.sasl_mechanisms` - :oslo.config:option:`oslo_messaging_amqp.sasl_config_dir` - :oslo.config:option:`oslo_messaging_amqp.sasl_config_name` - :oslo.config:option:`oslo_messaging_amqp.sasl_default_realm` AMQP Generic Options (**Note Well**) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The AMQP 1.0 driver currently does **not** support the generic *amqp* options used by pre-1.0 drivers such as *amqp_durable_queues* or *amqp_auto_delete*. qpid-dispatch-router ~~~~~~~~~~~~~~~~~~~~ First, verify that the Proton library has been installed and is imported by the ``qpid-dispatch-router`` intermediary. This can be checked by running: :: $ qdrouterd --help and looking for references to ``qpid-dispatch`` include and config path options in the help text. If no ``qpid-dispatch`` information is listed, verify that the Proton libraries are installed and that the version of ``qdrouterd`` is greater than or equal to 0.6.0. Second, configure the address patterns used by the driver. This is done by adding the following to ``/etc/qpid-dispatch/qdrouterd.conf``. If the legacy syntax for the addressing mode is required, include the following: :: address { prefix: unicast distribution: closest } address { prefix: exclusive distribution: closest } address { prefix: broadcast distribution: multicast } For the routable syntax addressing mode, include the following: :: address { prefix: openstack.org/om/rpc/multicast distribution: multicast } address { prefix: openstack.org/om/rpc/unicast distribution: closest } address { prefix: openstack.org/om/rpc/anycast distribution: balanced } address { prefix: openstack.org/om/notify/multicast distribution: multicast } address { prefix: openstack.org/om/notify/unicast distribution: closest } address { prefix: openstack.org/om/notify/anycast distribution: balanced } **Note well**: For any customization of the `address mode`_ and syntax used, it is required that the address entity configurations in the `1`/etc/qpid-dispatch/qdrouterd.conf`` be updated. qpidd ~~~~~ First, verify that the Proton library has been installed and is imported by the qpidd broker. This can checked by running: .. code-block:: shell $ qpidd --help and looking for the AMQP 1.0 options in the help text. If no AMQP 1.0 options are listed, verify that the Proton libraries are installed and that the version of qpidd is greater than or equal to 0.34. Second, configure the default address patterns used by the driver for a broker-based backend. This is done by adding the following to ``/etc/qpid/qpidd.conf``: .. code-block:: ini queue-patterns=exclusive queue-patterns=unicast topic-patterns=broadcast These patterns, *exclusive*, *unicast*, and *broadcast* are the legacy addressing values used by the driver. These can be overridden via the driver configuration options if desired (see above). If manually overridden, update the ``qpidd.conf`` values to match. .. _devstack: DevStack Support ---------------- The plugin for the AMQP 1.0 oslo.messaging driver is supported by DevStack. The plugin supports the deployment of several different message bus configurations. In the ``[localrc]`` section of ``local.conf``, the `devstack-plugin-amqp1`__ plugin repository must be enabled. For example: .. code-block:: ini [[local|localrc]] enable_plugin amqp1 https://opendev.org/openstack/devstack-plugin-amqp1 Set the username and password variables if needed for the configuration: .. code-block:: shell AMQP1_USERNAME=queueuser AMQP1_PASSWORD=queuepassword The AMQP1_SERVICE variable identifies the message bus configuration that will be used. In addition to the AMQP 1.0 driver being used for both the RPC and Notification messaging communications, a hybrid configuration is supported in the plugin that will deploy AMQP 1.0 for the RPC backend and the oslo_messaging rabbit driver for the Notification backend. Additionally, the plugin supports a setting for a pre-provisioned messaging bus that prevents the plugin from creating the messaging bus. The setting of the AMQP1_SERVICE variable will select which messaging intermediary will be used for the RPC and Notification messaging backends: +---------------+------------------+------------------+ | AMQP1_SERVICE | RPC Backend | Notify Backend | +---------------+------------------+------------------+ | | | | | qpid | qpidd broker | qpidd broker | | | | | +---------------+------------------+------------------+ | | | | | qpid-dual | qdrouterd router | qpidd broker | | | | | +---------------+------------------+------------------+ | | | | | qpid-hybrid | qdrouterd router | rabbitmq broker | | | | | +---------------+------------------+------------------+ | | | | | external | pre-provisioned | pre-provisioned | | | message bus | message bus | | | | | +---------------+------------------+------------------+ __ https://github.com/openstack/devstack-plugin-amqp1.git .. _packages: Platforms and Packages ---------------------- PyPi ~~~~ Packages for `Pyngus`__ and the `Proton`__ engine are available on PyPI. __ https://pypi.org/project/pyngus __ https://pypi.org/project/python-qpid-proton RHEL and Fedora ~~~~~~~~~~~~~~~ Packages exist in EPEL for RHEL/Centos 7 and 8, and Fedora 26+. The following packages must be installed on the system running the ``qdrouterd`` daemon: - ``qpid-dispatch-router`` - ``python-qpid-proton`` The following packages must be installed on the system running the ``qpidd`` daemon: - ``qpid-cpp-server`` (version 0.26+) - ``qpid-proton-c`` The following packages must be installed on the systems running the services that use the new driver: - Proton libraries: ``qpid-proton-c-devel`` - Proton python bindings: ``python-qpid-proton`` - ``pyngus`` (via PyPI) Debian and Ubuntu ~~~~~~~~~~~~~~~~~ .. todo:: Is this still true? Packages for the Proton library, headers, and Python bindings are available in the Debian/Testing repository. Proton packages are not yet available in the Ubuntu repository. The version of qpidd on both platforms is too old and does not support AMQP 1.0. Until the proper package version arrive the latest packages can be pulled from the `Apache Qpid PPA`__ on Launchpad: .. code-block:: shell $ sudo add-apt-repository ppa:qpid/released The following packages must be installed on the system running the ``qdrouterd`` daemon: - ``qdrouterd`` (version 0.8.0+) The following packages must be installed on the system running the ``qpidd`` daemon: - ``qpidd`` (version 0.34+) The following packages must be installed on the systems running the services that use the new driver: - Proton libraries: ``libqpid-proton2-dev`` - Proton python bindings: ``python-qpid-proton`` - ``pyngus`` (via Pypi) __ https://launchpad.net/~qpid/+archive/ubuntu/released ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/admin/drivers.rst0000664000175000017500000000016300000000000022125 0ustar00zuulzuul00000000000000=================== Available Drivers =================== .. list-plugins:: oslo.messaging.drivers :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/admin/index.rst0000664000175000017500000000017300000000000021557 0ustar00zuulzuul00000000000000================ Deployment Guide ================ .. toctree:: :maxdepth: 2 drivers AMQP1.0 kafka rabbit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/admin/kafka.rst0000664000175000017500000002171400000000000021531 0ustar00zuulzuul00000000000000============================= Kafka Driver Deployment Guide ============================= Introduction ------------ The Kafka Driver is an experimental messaging transport backend in *oslo.messaging*. The driver maps the base *oslo.messaging* capabilities for notification message exchange onto v2.0 of the Apache Kafka distributed streaming platform. More detail regarding the Apache Kafka server is available from the `Apache Kafka website`__. More detail regarding the driver's implementation is available from the `adding kafka driver specification`__ and the `update kafka driver specification`__. __ https://kafka.apache.org/ __ https://opendev.org/openstack/oslo-specs/src/branch/master/specs/liberty/adding-kafka-support.rst __ https://opendev.org/openstack/oslo-specs/src/branch/master/specs/queens/update-kafka-support.rst Overview -------- The Kafka driver **only** supports use for sending and receiving *oslo.messaging* notifications. Specifically, the Kafka driver does not support *oslo.messaging* RPC transfers. Communications between the driver and Kafka server backend uses a `binary protocol over TCP`__ that defines all APIs as request response message pairs. The Kafka driver integrates the ``confluent-kafka`` Python client for full protocol support and utilizes the Producer API to publish notification messages and the Consumer API for notification listener subscriptions. The driver is able to work with a single instance of a Kafka server or a clustered Kafka server deployment. __ https://kafka.apache.org/protocol.html Hybrid Messaging Deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~ *Oslo.messaging* provides a mechanism to configure separate messaging backends for RPC and notification communications. This is supported through the definition of separate RPC and notification `transport urls`__ in the service configuration. When the Kafka driver is deployed for *oslo.messaging* notifications, a separate driver and messaging backend must be deployed for RPC communications. For these hybrid messaging configurations, either the `rabbit`__ or `amqp`__ drivers can be deployed for *oslo.messaging* RPC. __ https://docs.openstack.org/oslo.messaging/latest/reference/transport.html __ https://docs.openstack.org/oslo.messaging/latest/admin/drivers.html#rabbit __ https://docs.openstack.org/oslo.messaging/latest/admin/AMQP1.0.html Topics and vhost Support ~~~~~~~~~~~~~~~~~~~~~~~~ The Kafka topic is the feed name to which records are published. Topics in Kafka are multi-subscriber such that a topic can have zero, one or many consumers that subscribe to the data written to it. In *oslo.messaging*, a notification listener subscribes to a topic in a supplied target that is directly mapped by the driver to the Kafka topic. The Kafka server architecture does not natively support vhosts. In order to support the presence of a vhost in the transport url provided to the driver, the topic created on the Kafka server will be appended with the virtual host name. This creates a unique topic per virtual host but **note** there is otherwise no access control or isolation provided by the Kafka server. Listener Pools ~~~~~~~~~~~~~~ The Kafka driver provides support for listener pools. This capability is realized by mapping the listener pool name to a Kafka server *consumer group* name. Each record published to a topic will be delivered to one consumer instance within each subscribing pool (e.g. *consumer group*). If a listener pool name is not assigned to the notification listener, a single default *consumer group* will be used by the Kafka driver and all listeners will be assigned to that group and the messages will effectively be load balanced across the competing listener instances. Synchronous Commit ~~~~~~~~~~~~~~~~~~ A primary functional difference between a Kafka server and a classic broker queue is that the offset or position of the message read from the commit log is controlled by the listener (e.g. consumer). The driver will advance the offset it maintains linearly as it reads message records from the server. To ensure that duplicate messages are not generated during downtime or communication interruption, the driver will *synchronously* commit the consumed messages prior to the notification listener dispatch. Due to this, the driver does not support the re-queue operation and the driver can not replay messages from a Kafka partition. Prerequisites ------------- In order to run the driver the ``confluent-kafka`` Python client must be installed. The Kafka driver integrates a Python client based on `librdkafka`__ for full protocol support and utilizes the Producer API to publish notification messages and the Consumer API for notification listener subscriptions. __ https://github.com/confluentinc/confluent-kafka-python Source packages for the ``confluent-kafka`` library are available via `PyPI`__. Since the Kafka driver is an optional extension to *oslo.messaging* these packages are not installed by default. Use the ``kafka`` extras tag when installing *oslo.messaging* in order to pull in these extra packages: .. code-block:: shell $ python -m pip install oslo.messaging[kafka] __ https://pypi.org/project/confluent-kafka/ Configuration ------------- Transport URL Enable ~~~~~~~~~~~~~~~~~~~~ In *oslo.messaging*, the ``transport_url`` parameters define the OpenStack service backends for RPC and Notify. The URL is of the form:: transport://user:pass@host1:port[,hostN:portN]/virtual_host Where the transport value specifies the RPC or notification backend as one of ``amqp``, ``rabbit``, ``kafka``, etc. To specify and enable the Kafka driver for notifications, in the section ``[oslo_messaging_notifications]`` of the service configuration file, specify the ``transport_url`` parameter:: [oslo_messaging_notifications] transport_url = kafka://username:password@kafkahostname:9092 Note, that if a ``transport_url`` parameter is not specified in the ``[oslo_messaging_notifications]`` section, the value of ``[DEFAULT] transport_url`` will be used for both RPC and notification backends. Driver Options ~~~~~~~~~~~~~~ It is recommended that the default configuration options provided by the Kafka driver be used. The configuration options can be modified in the :oslo.config:group:`oslo_messaging_kafka` section of the service configuration file. Notification Listener Options ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_kafka.kafka_max_fetch_bytes` - :oslo.config:option:`oslo_messaging_kafka.kafka_consumer_timeout` - :oslo.config:option:`oslo_messaging_kafka.consumer_group` - :oslo.config:option:`oslo_messaging_kafka.enable_auto_commit` - :oslo.config:option:`oslo_messaging_kafka.max_poll_records` Notifier Options ^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_kafka.producer_batch_timeout` - :oslo.config:option:`oslo_messaging_kafka.producer_batch_size` compression_codec The compression codec for all data generated by the producer, valid values are: none, gzip, snappy, lz4, zstd. Note that the legal option of this depends on the kafka version, please refer to `kafka documentation`_. .. _kafka documentation: https://kafka.apache.org/documentation/ Security Options ^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_kafka.security_protocol` - :oslo.config:option:`oslo_messaging_kafka.sasl_mechanism` - :oslo.config:option:`oslo_messaging_kafka.ssl_cafile` DevStack Support ---------------- The plugin for the Kafka *oslo.messaging* driver is supported by DevStack. As the Kafka driver can only be deployed for notifications, the plugin supports the deployment of several message bus configurations. In the ``[localrc]`` section of ``local.conf``, the `devstack-plugin-kafka`__ plugin repository must be enabled. For example: .. code-block:: ini [[local|localrc]] enable_plugin kafka https://opendev.org/openstack/devstack-plugin-kafka Set the Kafka and Scala version and location variables if needed for the configuration .. code-block:: shell KAFKA_VERSION=2.0.0 KAFKA_BASEURL=http://archive.apache.org/dist/kafka SCALA_VERSION=2.12 SCALA_BASEURL=http://www.scala-lang.org/riles/archive The ``RPC_`` and ``NOTIFY_`` variables will define the message bus configuration that will be used. The hybrid configurations will allow for the *rabbit* and *amqp* drivers to be used for the RPC transports while the *kafka* driver will be used for the notification transport. The setting of the service variables will select which messaging intermediary is enabled for the configuration: +------------+--------------------+--------------------+ | | RPC | NOTIFY | | +-----------+--------+-----------+--------+ | | SERVICE | PORT | SERVICE | PORT | +------------+-----------+--------+-----------+--------+ | Config 1 | rabbit | 5672 | kafka | 9092 | +------------+-----------+--------+-----------+--------+ | Config 1 | amqp | 5672 | kafka | 9092 | +------------+-----------+--------+-----------+--------+ __ https://github.com/openstack/devstack-plugin-kafka.git ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/admin/rabbit.rst0000664000175000017500000002412200000000000021713 0ustar00zuulzuul00000000000000================================ RabbitMq Driver Deployment Guide ================================ Introduction ------------ The RabbitMQ Driver is a messaging transport backend in *oslo.messaging*. The driver maps the base oslo.messaging capabilities for notification message exchange onto the RabbitMQ distributed message broker. More detail regarding the RabbitMQ server is available from the `RabbitMQ website`__. __ https://www.rabbitmq.com/ Abstract -------- The RabbitMQ Driver is a messaging transport backend supported in oslo.messaging. Communications between the driver and RabbitMQ server backend uses the `AMQP 0-9-1 protocol`_ (Advanced Message Queuing Protocol) which is an open application layer that allows clients applications to communicate with messaging middleware brokers in a standard way. AMQP defines all APIs to request messages from a message queue or to publishes messages to an exchange. The RabbitMQ driver integrates the kombu python client for full protocol support and utilizes the `Producer API`_ to publish notification messages and the `Consumer API`_ for notification listener subscriptions. The driver is able to work with a single instance of a RabbitMQ server or a clustered RabbitMQ server deployment. .. _AMQP 0-9-1 protocol: https://www.rabbitmq.com/protocol.html .. _Consumer API: https://kombu.readthedocs.io/en/stable/userguide/consumers.html .. _Producer API: https://kombu.readthedocs.io/en/stable/userguide/producers.html Exchange ~~~~~~~~ `Exchange is a AMQP mechanism`_ that it designed to dispatch the messages like a proxy wrapping. Messages are always published to exchanges. An exchange can: - receives messages from producers - push messages to `queues`_ Exchanges can distribute message copies to one or many queues using rules called bindings. AMQP protocol defines different types of exchanges: - direct - topic - headers - fanout An exchange can live without any binding. By default, no exception is raised if the message is not redirected to any queue, `unless the mandatory flag is used`_. *oslo.messaging* allow you to send and consume messages in a related manner through the *Connection* class. With mandatory flag RabbitMQ raises a callback if the message is not routed to any queue. This callback will be used to loop for a timeout and let's a chance to sender to recover. .. _Exchange is a AMQP mechanism: https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchanges .. _queues: https://www.rabbitmq.com/queues.html .. _unless the mandatory flag is used: https://www.rabbitmq.com/reliability.html#routing Queue ~~~~~ The `AMQP queue`_ is the messages store, it can store the messages in memory or persist the messages to the disk. The queue is bound to the exchange through one or more bindings. Consumers can consume messages from the queue. Queues have names so that applications can reference them. Queues have properties that define how they behave. .. _AMQP queue: https://www.rabbitmq.com/tutorials/amqp-concepts.html#queues Routing-Key ~~~~~~~~~~~ The routing key is part of the AMQP envelop of the message. The routing key is set by the producer to route the sended message. When a message is received, the exchange will try to match the message routing-key with the binding key of all the queues bound to it. If no match exist the message will be ignored else the message will be routed to the corresponding queue who binding key is matched. Exchange types ~~~~~~~~~~~~~~ ``direct`` ^^^^^^^^^^ A direct exchange is an exchange which route messages to queues based on message routing key. Message will be directly delivered to the queue that correspond to the routing-key. *direct* is a type of exchange so RabbitMQ backend does not store the data. ``topic`` ^^^^^^^^^ The RabbitMQ topic is an `exchange type`_. In RabbitMQ messages sent to a `topic exchange`_ can't have an arbitrary routing_key - it must be a list of words, delimited by dots. The words can be anything, but usually they specify some features connected to the message. A few valid routing key examples: "blue.orange.yellow", "cars.bikes", "quick.orange.rabbit". There can be as many words in the routing key as you like, up to the limit of 255 bytes.. In *oslo.messaging*, a notification listener subscribes to a topic in a supplied target that is directly mapped by the driver to the RabbitMQ topic. .. _exchange type: https://www.rabbitmq.com/tutorials/tutorial-three-python.html .. _topic exchange: https://www.rabbitmq.com/tutorials/tutorial-five-python.html ``fanout`` ^^^^^^^^^^ The fanout exchange will broadcasts all messages it receives to all the queues it knows. ``headers`` ^^^^^^^^^^^ An headers exchange will route messages to queues by using message header content instead of routing by using the routing key like described previously. In this case producer will adds message header values as key-value pair in and he will sends by using the headers exchange. The exchange will try to match all or any (based on the value of “x-match”) header value of the received message with the binding value of all the queues bound to it. The exchange will determine which header to use to match the binding queues by using the value of the "x-match" header entry. If match is not found, the message will be ignored else it route the message to the queue whose binding value is matched. Health check with heartbeat frames ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The RabbitMQ driver of *oslo.messaging* allow you to detect dead TCP connections with heartbeats and TCP keepalives. The heartbeat function from the driver is build over the `heartbeat_check feature of kombu client`_ and over the `AMQP 0.9.1 heartbeat feature`_ implemented by RabbitMQ. Heartbeating is a technique designed to undo one of TCP/IP's features, namely its ability to recover from abroken physical connection by closing only after a quite long time-out. In some scenarios we need to knowvery rapidly if a peer is disconnected or not responding for other reasons (e.g. it is looping). Since heart-beating can be done at a low level, AMQP 0.9.1 implement this as a special type of frame that peers exchange at the transport level, rather than as a class method. Heartbeats also defend against certain network equipment which may terminate "idle" TCP connections when there is no activity on them for a certain period of time. The driver will always run the heartbeat in a native python thread and avoid to inherit the execution model from the parent process to avoid to use green threads. .. _heartbeat_check feature of kombu client: http://docs.celeryproject.org/projects/kombu/en/stable/reference/kombu.html?highlight=heartbeat#kombu.Connection.heartbeat_check .. _AMQP 0.9.1 heartbeat feature: https://www.rabbitmq.com/heartbeats.html Prerequisites ------------- In order to run the driver the kombu python client must be installed. The RabbitMQ driver integrates a `Python client based on kombu`_ and `on py-amqp`_ for full protocol support and utilizes the Producer API to publish notification messages and the Consumer API for notification listener subscriptions. .. _Python client based on kombu: https://github.com/celery/kombu .. _on py-amqp: https://github.com/celery/py-amqp Source packages for the `kombu library`_ are available via PyPI. Since the RabbitMQ driver is not an optional extension to *oslo.messaging* these packages installed by default. .. _kombu library: https://pypi.org/project/kombu/ Configuration ------------- Transport URL Enable ~~~~~~~~~~~~~~~~~~~~ In *oslo.messaging*, the ``transport_url`` parameter defines the OpenStack service backends for RPC and notifications. The URL is of the form:: transport://user:pass@host1:port[,hostN:portN]/virtual_host Where the transport value specifies the RPC or notification backend as one of ``amqp``, ``rabbit``, ``kafka``, etc. To specify and enable the RabbitMQ driver for notifications, in the section ``[oslo_messaging_notifications]`` of the service configuration file, specify the ``transport_url`` parameter:: [oslo_messaging_notifications] transport_url = rabbit://username:password@kafkahostname:9092 Note, that if a ``transport_url`` parameter is not specified in the ``[oslo_messaging_notifications]`` section, the ``[DEFAULT] transport_url`` option will be used for both RPC and notifications backends. Driver Options ~~~~~~~~~~~~~~ It is recommended that the default configuration options provided by the RabbitMQ driver be used. The configuration options can be modified in the :oslo.config:group:`oslo_messaging_rabbit` section of the service configuration file. Publishing Options ^^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_rabbit.kombu_compression` - :oslo.config:option:`oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout` Consuming Options ^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_rabbit.rabbit_ha_queues` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_quorum_queue` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_quorum_delivery_limit` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_quorum_max_memory_length` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_quorum_max_memory_bytes` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_transient_queues_ttl` Connection Options ^^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_rabbit.kombu_reconnect_delay` - :oslo.config:option:`oslo_messaging_rabbit.kombu_failover_strategy` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_retry_interval` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_retry_backoff` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_interval_max` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_qos_prefetch_count` Heartbeat Options ^^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_rabbit.heartbeat_timeout_threshold` - :oslo.config:option:`oslo_messaging_rabbit.heartbeat_rate` Security Options ^^^^^^^^^^^^^^^^ - :oslo.config:option:`oslo_messaging_rabbit.ssl` - :oslo.config:option:`oslo_messaging_rabbit.ssl_version` - :oslo.config:option:`oslo_messaging_rabbit.ssl_key_file` - :oslo.config:option:`oslo_messaging_rabbit.ssl_cert_file` - :oslo.config:option:`oslo_messaging_rabbit.rabbit_login_method` - :oslo.config:option:`oslo_messaging_rabbit.ssl_enforce_fips_mode` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/conf.py0000664000175000017500000000355400000000000020133 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Project information ------------------------------------------------------ # General information about the project. copyright = '2018, Oslo Contributors' # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'openstackdocstheme', 'stevedore.sphinxext', 'oslo_config.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.messaging' openstackdocs_bug_project = 'oslo.messaging' openstackdocs_bug_tag = '' # The master toctree document. master_doc = 'index' # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1146717 oslo.messaging-14.9.0/doc/source/configuration/0000775000175000017500000000000000000000000021474 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/configuration/conffixture.rst0000664000175000017500000000023600000000000024563 0ustar00zuulzuul00000000000000---------------------- Testing Configurations ---------------------- .. currentmodule:: oslo_messaging.conffixture .. autoclass:: ConfFixture :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/configuration/index.rst0000664000175000017500000000014000000000000023330 0ustar00zuulzuul00000000000000============= Configuration ============= .. toctree:: :maxdepth: 2 opts conffixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/configuration/opts.rst0000664000175000017500000000053500000000000023216 0ustar00zuulzuul00000000000000======================= Configuration Options ======================= oslo.messaging uses oslo.config to define and manage configuration options to allow the deployer to control how an application uses the underlying messaging system. .. show-options:: oslo.messaging API === .. currentmodule:: oslo_messaging.opts .. autofunction:: list_opts ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.118672 oslo.messaging-14.9.0/doc/source/contributor/0000775000175000017500000000000000000000000021177 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/contributor/contributing.rst0000664000175000017500000000012400000000000024435 0ustar00zuulzuul00000000000000============== Contributing ============== .. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/contributor/driver-dev-guide.rst0000664000175000017500000000315100000000000025073 0ustar00zuulzuul00000000000000--------------------------------------- Guide for Transport Driver Implementors --------------------------------------- .. currentmodule:: oslo_messaging .. automodule:: oslo_messaging._drivers.base ============ Introduction ============ This document is a *best practices* guide for the developer interested in creating a new transport driver for Oslo.Messaging. It should also be used by maintainers as a reference for proper driver behavior. This document will describe the driver interface and prescribe the expected behavior of any driver implemented to this interface. **Note well:** The API described in this document is internal to the oslo.messaging library and therefore **private**. Under no circumstances should this API be referenced by code external to the oslo.messaging library. ================ Driver Interface ================ The driver interface is defined by a set of abstract base classes. The developer creates a driver by defining concrete classes from these bases. The derived classes embody the logic that is specific for the messaging back-end that is to be supported. These base classes are defined in the *base.py* file in the *_drivers* subdirectory. =============== IncomingMessage =============== .. autoclass:: IncomingMessage :members: ================== RpcIncomingMessage ================== .. autoclass:: RpcIncomingMessage :members: ======== Listener ======== .. autoclass:: Listener :members: ================= PollStyleListener ================= .. autoclass:: PollStyleListener :members: ========== BaseDriver ========== .. autoclass:: BaseDriver :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/contributor/index.rst0000664000175000017500000000027700000000000023046 0ustar00zuulzuul00000000000000============================== Contributing to oslo.messaging ============================== .. toctree:: :maxdepth: 2 contributing driver-dev-guide supported-messaging-drivers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/contributor/supported-messaging-drivers.rst0000664000175000017500000000416100000000000027407 0ustar00zuulzuul00000000000000============================= Supported Messaging Drivers ============================= RabbitMQ may not be sufficient for the entire community as the community grows. Pluggability is still something we should maintain, but we should have a very high standard for drivers that are shipped and documented as being supported. This document defines a very clear policy as to the requirements for drivers to be carried in oslo.messaging and thus supported by the OpenStack community as a whole. We will deprecate any drivers that do not meet the requirements, and announce said deprecations in any appropriate channels to give users time to signal their needs. Deprecation will last for two release cycles before removing the code. We will also review and update documentation to annotate which drivers are supported and which are deprecated given these policies Policy ------ Testing ~~~~~~~ * Must have unit and/or functional test coverage of at least 60% as reported by coverage report. Unit tests must be run for all versions of python oslo.messaging currently gates on. * Must have integration testing including at least 3 popular oslo.messaging dependents, preferably at the minimum a devstack-gate job with Nova, Cinder, and Neutron. * All testing above must be voting in the gate of oslo.messaging. Documentation ~~~~~~~~~~~~~ * Must have a reasonable amount of documentation including documentation in the official OpenStack deployment guide. Support ~~~~~~~ * Must have at least two individuals from the community committed to triaging and fixing bugs, and responding to test failures in a timely manner. Prospective Drivers ~~~~~~~~~~~~~~~~~~~ * Drivers that intend to meet the requirements above, but that do not yet meet them will be given one full release cycle, or 6 months, whichever is longer, to comply before being marked for deprecation. Their use, however, will not be supported by the community. This will prevent a chicken and egg problem for new drivers. .. note:: This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/index.rst0000664000175000017500000000076700000000000020500 0ustar00zuulzuul00000000000000============== oslo.messaging ============== The Oslo messaging API supports RPC and notifications over a number of different messaging transports. .. toctree:: :maxdepth: 1 contributor/index configuration/index admin/index user/index reference/index Release Notes ============= Read also the `oslo.messaging Release Notes `_. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.118672 oslo.messaging-14.9.0/doc/source/reference/0000775000175000017500000000000000000000000020563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/exceptions.rst0000664000175000017500000000100400000000000023471 0ustar00zuulzuul00000000000000---------- Exceptions ---------- .. currentmodule:: oslo_messaging .. autoexception:: ClientSendError .. autoexception:: DriverLoadFailure .. autoexception:: ExecutorLoadFailure .. autoexception:: InvalidTransportURL .. autoexception:: MessagingException .. autoexception:: MessagingTimeout .. autoexception:: MessagingServerError .. autoexception:: NoSuchMethod .. autoexception:: RPCDispatcherError .. autoexception:: RPCVersionCapError .. autoexception:: ServerListenError .. autoexception:: UnsupportedVersion ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/executors.rst0000664000175000017500000000207600000000000023343 0ustar00zuulzuul00000000000000========= Executors ========= Executors control how a received message is scheduled for processing by a Server. This scheduling can be *synchronous* or *asynchronous*. A synchronous executor will process the message on the Server's thread. This means the Server can process only one message at a time. Other incoming messages will not be processed until the current message is done processing. For example, in the case of an RPCServer only one method call will be invoked at a time. A synchronous executor guarantees that messages complete processing in the order that they are received. An asynchronous executor will process received messages concurrently. The Server thread will not be blocked by message processing and can continue to service incoming messages. There are no ordering guarantees - message processing may complete in a different order than they were received. The executor may be configured to limit the maximum number of messages that are processed at once. Available Executors =================== .. list-plugins:: oslo.messaging.executors :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/index.rst0000664000175000017500000000033400000000000022424 0ustar00zuulzuul00000000000000.. _using: ========= Reference ========= .. toctree:: :maxdepth: 2 transport executors target server rpcclient notifier notification_driver notification_listener serializer exceptions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/notification_driver.rst0000664000175000017500000000042300000000000025355 0ustar00zuulzuul00000000000000------------------- Notification Driver ------------------- .. automodule:: oslo_messaging.notify.messaging .. autoclass:: MessagingDriver .. autoclass:: MessagingV2Driver .. currentmodule:: oslo_messaging.notify.notifier .. autoclass:: Driver :members: :noindex: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/notification_listener.rst0000664000175000017500000000036500000000000025714 0ustar00zuulzuul00000000000000--------------------- Notification Listener --------------------- .. automodule:: oslo_messaging.notify.listener .. currentmodule:: oslo_messaging .. autofunction:: get_notification_listener .. autofunction:: get_batch_notification_listener ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/notifier.rst0000664000175000017500000000052400000000000023135 0ustar00zuulzuul00000000000000========== Notifier ========== .. currentmodule:: oslo_messaging .. autoclass:: Notifier :members: .. autoclass:: LoggingNotificationHandler :members: .. autoclass:: LoggingErrorNotificationHandler :members: Available Notifier Drivers ========================== .. list-plugins:: oslo.messaging.notify.drivers :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/rpcclient.rst0000664000175000017500000000021300000000000023274 0ustar00zuulzuul00000000000000---------- RPC Client ---------- .. currentmodule:: oslo_messaging .. autoclass:: RPCClient :members: .. autoexception:: RemoteError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/serializer.rst0000664000175000017500000000021300000000000023462 0ustar00zuulzuul00000000000000---------- Serializer ---------- .. currentmodule:: oslo_messaging .. autoclass:: Serializer :members: .. autoclass:: NoOpSerializer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/server.rst0000664000175000017500000000074200000000000022626 0ustar00zuulzuul00000000000000---------- RPC Server ---------- .. automodule:: oslo_messaging.rpc.server .. currentmodule:: oslo_messaging .. autofunction:: get_rpc_server .. autoclass:: RPCAccessPolicyBase .. autoclass:: LegacyRPCAccessPolicy .. autoclass:: DefaultRPCAccessPolicy .. autoclass:: ExplicitRPCAccessPolicy .. autoclass:: RPCDispatcher .. autoclass:: MessageHandlingServer :members: .. autofunction:: expected_exceptions .. autofunction:: expose .. autoexception:: ExpectedException ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/target.rst0000664000175000017500000000427200000000000022610 0ustar00zuulzuul00000000000000------ Target ------ .. currentmodule:: oslo_messaging .. autoclass:: Target =============== Target Versions =============== Target version numbers take the form Major.Minor. For a given message with version X.Y, the server must be marked as able to handle messages of version A.B, where A == X and B >= Y. The Major version number should be incremented for an almost completely new API. The Minor version number would be incremented for backwards compatible changes to an existing API. A backwards compatible change could be something like adding a new method, adding an argument to an existing method (but not requiring it), or changing the type for an existing argument (but still handling the old type as well). If no version is specified it defaults to '1.0'. In the case of RPC, if you wish to allow your server interfaces to evolve such that clients do not need to be updated in lockstep with the server, you should take care to implement the server changes in a backwards compatible and have the clients specify which interface version they require for each method. Adding a new method to an endpoint is a backwards compatible change and the version attribute of the endpoint's target should be bumped from X.Y to X.Y+1. On the client side, the new RPC invocation should have a specific version specified to indicate the minimum API version that must be implemented for the method to be supported. For example:: def get_host_uptime(self, ctxt, host): cctxt = self.client.prepare(server=host, version='1.1') return cctxt.call(ctxt, 'get_host_uptime') In this case, version '1.1' is the first version that supported the get_host_uptime() method. Adding a new parameter to an RPC method can be made backwards compatible. The endpoint version on the server side should be bumped. The implementation of the method must not expect the parameter to be present.:: def some_remote_method(self, arg1, arg2, newarg=None): # The code needs to deal with newarg=None for cases # where an older client sends a message without it. pass On the client side, the same changes should be made as in example 1. The minimum version that supports the new parameter should be specified. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/reference/transport.rst0000664000175000017500000000121400000000000023347 0ustar00zuulzuul00000000000000--------- Transport --------- .. currentmodule:: oslo_messaging .. autoclass:: Transport .. autoclass:: TransportURL :members: .. autoclass:: TransportHost .. autofunction:: set_transport_defaults Forking Processes and oslo.messaging Transport objects ------------------------------------------------------ oslo.messaging can't ensure that forking a process that shares the same transport object is safe for the library consumer, because it relies on different 3rd party libraries that don't ensure that. In certain cases, with some drivers, it does work: * rabbit: works only if no connection have already been established. * amqp1: works ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.118672 oslo.messaging-14.9.0/doc/source/user/0000775000175000017500000000000000000000000017603 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/user/FAQ.rst0000664000175000017500000000417200000000000020750 0ustar00zuulzuul00000000000000============================ Frequently Asked Questions ============================ I don't need notifications on the message bus. How do I disable them? ===================================================================== Notification messages can be disabled using the ``noop`` notify driver. Set ``driver = noop`` in your configuration file under the [oslo_messaging_notifications] section. Why does the notification publisher create queues, too? Shouldn't the subscriber do that? ========================================================================================= The notification messages are meant to be used for integration with external services, including services that are not part of OpenStack. To ensure that the subscriber does not miss any messages if it starts after the publisher, ``oslo.messaging`` ensures that subscriber queues exist when notifications are sent. How do I change the queue names where notifications are published? ================================================================== Notifications are published to the configured exchange using a topic built from a base value specified in the configuration file and the notification "level". The default topic is ``notifications``, so an info-level notification is published to the topic ``notifications.info``. A subscriber queue of the same name is created automatically for each of these topics. To change the queue names, change the notification topic using the ``topics`` configuration option in ``[oslo_messaging_notifications]``. The option accepts a list of values, so it is possible to publish to multiple topics. What are the other choices of notification drivers available? ============================================================= - messaging Send notifications using the 1.0 message format. - messagingv2 Send notifications using the 2.0 message format (with a message envelope). - routing Configurable routing notifier (by priority or event_type). - log Publish notifications via Python logging infrastructure. - test Store notifications in memory for test verification. - noop Disable sending notifications entirely. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/user/history.rst0000664000175000017500000000004000000000000022030 0ustar00zuulzuul00000000000000.. include:: ../../../ChangeLog ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/doc/source/user/index.rst0000664000175000017500000000021700000000000021444 0ustar00zuulzuul00000000000000==================== Using oslo.messaging ==================== .. toctree:: :maxdepth: 2 FAQ .. toctree:: :maxdepth: 1 history ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.118672 oslo.messaging-14.9.0/etc/0000775000175000017500000000000000000000000015333 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/etc/routing_notifier.yaml.sample0000664000175000017500000000134300000000000023066 0ustar00zuulzuul00000000000000# Setting a priority AND an event means both have to be satisfied. # # However, defining different sets for the same driver allows you # to do OR operations. # # See how this logic is modelled below: # # if (priority in info, warn or error) or # (event == compute.scheduler.run_instance) # send to messaging driver ... # # if priority == 'poll' and # event == 'bandwidth.*' # send to poll driver group_1: messaging: accepted_priorities: ['info', 'warn', 'error'] poll: accepted_priorities: ['poll'] accepted_events: ['bandwidth.*'] log: accepted_events: ['compute.instance.exists'] group_2: messaging:⋅ accepted_events: ['compute.scheduler.run_instance.*'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1226723 oslo.messaging-14.9.0/oslo.messaging.egg-info/0000775000175000017500000000000000000000000021202 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/PKG-INFO0000664000175000017500000000426500000000000022306 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: oslo.messaging Version: 14.9.0 Summary: Oslo Messaging API Home-page: https://docs.openstack.org/oslo.messaging/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.messaging.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Oslo Messaging Library ====================== .. image:: https://img.shields.io/pypi/v/oslo.messaging.svg :target: https://pypi.org/project/oslo.messaging/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.messaging.svg :target: https://pypi.org/project/oslo.messaging/ :alt: Downloads The Oslo messaging API supports RPC and notifications over a number of different messaging transports. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.messaging/latest/ * Source: https://opendev.org/openstack/oslo.messaging * Bugs: https://bugs.launchpad.net/oslo.messaging * Release notes: https://docs.openstack.org/releasenotes/oslo.messaging/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 Provides-Extra: amqp1 Provides-Extra: kafka Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686539.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/SOURCES.txt0000664000175000017500000002127200000000000023072 0ustar00zuulzuul00000000000000.coveragerc .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/admin/AMQP1.0.rst doc/source/admin/drivers.rst doc/source/admin/index.rst doc/source/admin/kafka.rst doc/source/admin/rabbit.rst doc/source/configuration/conffixture.rst doc/source/configuration/index.rst doc/source/configuration/opts.rst doc/source/contributor/contributing.rst doc/source/contributor/driver-dev-guide.rst doc/source/contributor/index.rst doc/source/contributor/supported-messaging-drivers.rst doc/source/reference/exceptions.rst doc/source/reference/executors.rst doc/source/reference/index.rst doc/source/reference/notification_driver.rst doc/source/reference/notification_listener.rst doc/source/reference/notifier.rst doc/source/reference/rpcclient.rst doc/source/reference/serializer.rst doc/source/reference/server.rst doc/source/reference/target.rst doc/source/reference/transport.rst doc/source/user/FAQ.rst doc/source/user/history.rst doc/source/user/index.rst etc/routing_notifier.yaml.sample oslo.messaging.egg-info/PKG-INFO oslo.messaging.egg-info/SOURCES.txt oslo.messaging.egg-info/dependency_links.txt oslo.messaging.egg-info/entry_points.txt oslo.messaging.egg-info/not-zip-safe oslo.messaging.egg-info/pbr.json oslo.messaging.egg-info/requires.txt oslo.messaging.egg-info/top_level.txt oslo_messaging/__init__.py oslo_messaging/_utils.py oslo_messaging/conffixture.py oslo_messaging/dispatcher.py oslo_messaging/exceptions.py oslo_messaging/opts.py oslo_messaging/serializer.py oslo_messaging/server.py oslo_messaging/target.py oslo_messaging/transport.py oslo_messaging/version.py oslo_messaging/_drivers/__init__.py oslo_messaging/_drivers/amqp.py oslo_messaging/_drivers/amqpdriver.py oslo_messaging/_drivers/base.py oslo_messaging/_drivers/common.py oslo_messaging/_drivers/impl_amqp1.py oslo_messaging/_drivers/impl_fake.py oslo_messaging/_drivers/impl_kafka.py oslo_messaging/_drivers/impl_rabbit.py oslo_messaging/_drivers/pool.py oslo_messaging/_drivers/amqp1_driver/__init__.py oslo_messaging/_drivers/amqp1_driver/addressing.py oslo_messaging/_drivers/amqp1_driver/controller.py oslo_messaging/_drivers/amqp1_driver/eventloop.py oslo_messaging/_drivers/amqp1_driver/opts.py oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst oslo_messaging/_drivers/kafka_driver/__init__.py oslo_messaging/_drivers/kafka_driver/kafka_options.py oslo_messaging/_metrics/__init__.py oslo_messaging/_metrics/client.py oslo_messaging/hacking/__init__.py oslo_messaging/hacking/checks.py oslo_messaging/notify/__init__.py oslo_messaging/notify/_impl_log.py oslo_messaging/notify/_impl_noop.py oslo_messaging/notify/_impl_routing.py oslo_messaging/notify/_impl_test.py oslo_messaging/notify/dispatcher.py oslo_messaging/notify/filter.py oslo_messaging/notify/listener.py oslo_messaging/notify/log_handler.py oslo_messaging/notify/logger.py oslo_messaging/notify/messaging.py oslo_messaging/notify/middleware.py oslo_messaging/notify/notifier.py oslo_messaging/rpc/__init__.py oslo_messaging/rpc/client.py oslo_messaging/rpc/dispatcher.py oslo_messaging/rpc/server.py oslo_messaging/rpc/transport.py oslo_messaging/tests/__init__.py oslo_messaging/tests/test_config_opts_proxy.py oslo_messaging/tests/test_exception_serialization.py oslo_messaging/tests/test_expected_exceptions.py oslo_messaging/tests/test_fixture.py oslo_messaging/tests/test_opts.py oslo_messaging/tests/test_target.py oslo_messaging/tests/test_transport.py oslo_messaging/tests/test_urls.py oslo_messaging/tests/test_utils.py oslo_messaging/tests/utils.py oslo_messaging/tests/drivers/__init__.py oslo_messaging/tests/drivers/test_amqp_driver.py oslo_messaging/tests/drivers/test_impl_kafka.py oslo_messaging/tests/drivers/test_impl_rabbit.py oslo_messaging/tests/drivers/test_pool.py oslo_messaging/tests/functional/__init__.py oslo_messaging/tests/functional/test_functional.py oslo_messaging/tests/functional/test_rabbitmq.py oslo_messaging/tests/functional/utils.py oslo_messaging/tests/functional/notify/__init__.py oslo_messaging/tests/functional/notify/test_logger.py oslo_messaging/tests/notify/__init__.py oslo_messaging/tests/notify/test_dispatcher.py oslo_messaging/tests/notify/test_listener.py oslo_messaging/tests/notify/test_log_handler.py oslo_messaging/tests/notify/test_logger.py oslo_messaging/tests/notify/test_middleware.py oslo_messaging/tests/notify/test_notifier.py oslo_messaging/tests/rpc/__init__.py oslo_messaging/tests/rpc/test_client.py oslo_messaging/tests/rpc/test_dispatcher.py oslo_messaging/tests/rpc/test_server.py releasenotes/notes/RPC-call-monitoring-7977f047d069769a.yaml releasenotes/notes/add-enable_cancel_on_failover-22ac472b93dd3a23.yaml releasenotes/notes/add-ping-endpoint.yaml releasenotes/notes/add-quorum-control-configurations-beed79811ff97ba2.yaml releasenotes/notes/add-ssl-support-for-kafka.yaml releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml releasenotes/notes/adding_support_for_quorum_queues-3101d055b492289e.yaml releasenotes/notes/allow-transient-no-expire-ce7ae9d8c9d15751.yaml releasenotes/notes/auto-deleted-failed-quorum-ca6a3923c3ed999a.yaml releasenotes/notes/blocking-executor-deprecated-895146c1c3bf2f51.yaml releasenotes/notes/blocking-executor-support-dropped-a3bc74c6825863f0.yaml releasenotes/notes/bug-1917645-rabbit-use-retry-parameter-for-notifications-3f7c508ab4437579.yaml releasenotes/notes/bug-1981093-kafka-dont-log-in-tpool-execute-fa50ceee2d55ebae.yaml releasenotes/notes/bug-1993149-e8b231791b65e938.yaml releasenotes/notes/bug-2068630-6ff92f213bc4eca0.yaml releasenotes/notes/bump-amqp-version-due-to-tls-issue-e877b152eb101c15.yaml releasenotes/notes/connection_ttl-2cf0fe6e1ab8c73c.yaml releasenotes/notes/declare_fallback_durable_exchange-0db677de4fdf1e78.yaml releasenotes/notes/deprecate-ZeroMQ-driver-a8af25aaba867c5b.yaml releasenotes/notes/deprecate-the-option-heartbeat_in_pthread-from-rabbit-driver-5757adb83701caa5.yaml releasenotes/notes/deprecated-amqp1-driver-4bf57449bc2b7aad.yaml releasenotes/notes/disable-mandatory-flag-a6210a534f3853f0.yaml releasenotes/notes/do-not-run-heartbeat-in-pthread-by-default-42e1299f59b841f8.yaml releasenotes/notes/drop-python27-support-5ef2f365d8930483.yaml releasenotes/notes/enforce_fips_mode-07dd259eb8a73c2b.yaml releasenotes/notes/fix-access_policy-deafult-a6954a147cb002b0.yaml releasenotes/notes/get-rpc-client-0b4aa62160864b29.yaml releasenotes/notes/get-rpc-helpers-cls-8911826ac08aef2a.yaml releasenotes/notes/get_rpc_transport-4aa3511ad9754a60.yaml releasenotes/notes/handle-missing-queue-553a803f94976be7.yaml releasenotes/notes/heartbeat-rate-3-7ada9edbccc11a3f.yaml releasenotes/notes/kafka-client-library-change-fe16d5a34550db7f.yaml releasenotes/notes/no-log-if-ignore-errors-e2223b8a646b4c40.yaml releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d672a2.yaml releasenotes/notes/oslo-metrics-support-fe16343a637cc14b.yaml releasenotes/notes/pika-driver-has-been-deprecated-e2407fa53c91fe5c.yaml releasenotes/notes/rabbit-no-wait-for-ack-9e5de3e1320d7660.yaml releasenotes/notes/rabbit_queue_manager-363209285cbbe257.yaml releasenotes/notes/rabbit_quorum_typo-9c06a9fd8d767f53.yaml releasenotes/notes/rabbit_transient_quorum-fc3c3f88ead90034.yaml releasenotes/notes/removal-deprecated-options-6d4c5db90525c52d.yaml releasenotes/notes/remove-RequestContextSerializer-234c0496a7e0376b.yaml releasenotes/notes/remove-ZeroMQ-driver-e9e0bbbb7bd4f5e6.yaml releasenotes/notes/remove-pika-1bae204ced2521a3.yaml releasenotes/notes/reply_q-timeout-e3c3bae636e8bc74.yaml releasenotes/notes/retry-support-07996ef04dda9482.yaml releasenotes/notes/run-heartbeat-in-pthread-by-default-28637b41ebf500dc.yaml releasenotes/notes/stream-c3dd31ee98f6bbd7.yaml releasenotes/notes/undeprecate_heartbeat_in_pthread-48e2c1fc008cf208.yaml releasenotes/notes/use-extras-for-optional-deps-2a00e8007ef7a629.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po tools/functions.sh tools/messages_length.yaml tools/setup-scenario-env.sh tools/simulator.py tools/test-setup.sh././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/dependency_links.txt0000664000175000017500000000000100000000000025250 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/entry_points.txt0000664000175000017500000000167100000000000024505 0ustar00zuulzuul00000000000000[console_scripts] oslo-messaging-send-notification = oslo_messaging.notify.notifier:_send_notification [oslo.config.opts] oslo.messaging = oslo_messaging.opts:list_opts [oslo.messaging.drivers] amqp = oslo_messaging._drivers.impl_amqp1:ProtonDriver fake = oslo_messaging._drivers.impl_fake:FakeDriver kafka = oslo_messaging._drivers.impl_kafka:KafkaDriver kombu = oslo_messaging._drivers.impl_rabbit:RabbitDriver rabbit = oslo_messaging._drivers.impl_rabbit:RabbitDriver [oslo.messaging.executors] eventlet = futurist:GreenThreadPoolExecutor threading = futurist:ThreadPoolExecutor [oslo.messaging.notify.drivers] log = oslo_messaging.notify._impl_log:LogDriver messaging = oslo_messaging.notify.messaging:MessagingDriver messagingv2 = oslo_messaging.notify.messaging:MessagingV2Driver noop = oslo_messaging.notify._impl_noop:NoOpDriver routing = oslo_messaging.notify._impl_routing:RoutingDriver test = oslo_messaging.notify._impl_test:TestDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/not-zip-safe0000664000175000017500000000000100000000000023430 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/pbr.json0000664000175000017500000000005700000000000022662 0ustar00zuulzuul00000000000000{"git_version": "d601f7aa", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/requires.txt0000664000175000017500000000114000000000000023576 0ustar00zuulzuul00000000000000PyYAML>=3.13 WebOb>=1.7.1 amqp>=2.5.2 cachetools>=2.0.0 debtcollector>=1.2.0 futurist>=1.2.0 kombu>=4.6.6 oslo.config>=5.2.0 oslo.context>=5.3.0 oslo.log>=3.36.0 oslo.metrics>=0.2.1 oslo.middleware>=3.31.0 oslo.serialization>=2.18.0 oslo.service>=1.24.0 oslo.utils>=3.37.0 pbr>=2.0.0 stevedore>=1.20.0 [amqp1] pyngus>=2.2.0 [kafka] confluent-kafka>=1.3.0 [test] bandit<1.8.0,>=1.7.0 confluent-kafka>=1.3.0 coverage>=4.0 eventlet>=0.23.0 fixtures>=3.0.0 greenlet>=0.4.15 hacking<=6.2.0,>=6.1.0 oslotest>=3.2.0 pifpaf>=2.2.0 pre-commit>=2.6.0 pyngus>=2.2.0 stestr>=2.0.0 testscenarios>=0.4 testtools>=2.2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686538.0 oslo.messaging-14.9.0/oslo.messaging.egg-info/top_level.txt0000664000175000017500000000001700000000000023732 0ustar00zuulzuul00000000000000oslo_messaging ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1226723 oslo.messaging-14.9.0/oslo_messaging/0000775000175000017500000000000000000000000017571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/__init__.py0000664000175000017500000000140100000000000021676 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from .exceptions import * from .notify import * from .rpc import * from .serializer import * from .server import * from .target import * from .transport import * ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1266725 oslo.messaging-14.9.0/oslo_messaging/_drivers/0000775000175000017500000000000000000000000021406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/__init__.py0000664000175000017500000000000000000000000023505 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp.py0000664000175000017500000001026600000000000022723 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for drivers based on the AMQPDriverBase. This module contains utility code used by drivers based on the AMQPDriverBase class. Specifically this includes the impl_rabbit driver. """ import collections import uuid from oslo_config import cfg from oslo_messaging._drivers import common as rpc_common amqp_opts = [ cfg.BoolOpt('amqp_durable_queues', default=False, help='Use durable queues in AMQP. If rabbit_quorum_queue ' 'is enabled, queues will be durable and this value will ' 'be ignored.'), cfg.BoolOpt('amqp_auto_delete', default=False, deprecated_group='DEFAULT', help='Auto-delete queues in AMQP.'), ] UNIQUE_ID = '_unique_id' class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.reply_q = kwargs.pop('reply_q', None) super(RpcContext, self).__init__(**kwargs) def deepcopy(self): values = self.to_dict() values['conf'] = self.conf values['msg_id'] = self.msg_id values['reply_q'] = self.reply_q return self.__class__(**values) def unpack_context(msg): """Unpack context from msg.""" context_dict = {} for key in list(msg.keys()): key = str(key) if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) context_dict['reply_q'] = msg.pop('_reply_q', None) context_dict['client_timeout'] = msg.pop('_timeout', None) return RpcContext.from_dict(context_dict) def pack_context(msg, context): """Pack context into msg. Values for message keys need to be less than 255 chars, so we pull context out into a bunch of separate keys. If we want to support more arguments in rabbit messages, we may want to do the same for args at some point. """ if isinstance(context, dict): context_d = context.items() else: context_d = context.to_dict().items() msg.update(('_context_%s' % key, value) for (key, value) in context_d) class _MsgIdCache(object): """This class checks any duplicate messages.""" # NOTE: This value is considered can be a configuration item, but # it is not necessary to change its value in most cases, # so let this value as static for now. DUP_MSG_CHECK_SIZE = 16 def __init__(self, **kwargs): self.prev_msgids = collections.deque([], maxlen=self.DUP_MSG_CHECK_SIZE) def check_duplicate_message(self, message_data): """AMQP consumers may read same message twice when exceptions occur before ack is returned. This method prevents doing it. """ try: msg_id = message_data.pop(UNIQUE_ID) except KeyError: return if msg_id in self.prev_msgids: raise rpc_common.DuplicateMessageError(msg_id=msg_id) return msg_id def add(self, msg_id): if msg_id and msg_id not in self.prev_msgids: self.prev_msgids.append(msg_id) def _add_unique_id(msg): """Add unique_id for checking duplicate messages.""" unique_id = uuid.uuid4().hex msg.update({UNIQUE_ID: unique_id}) class AMQPDestinationNotFound(Exception): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1266725 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp1_driver/0000775000175000017500000000000000000000000024000 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp1_driver/__init__.py0000664000175000017500000000000000000000000026077 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp1_driver/addressing.py0000664000175000017500000002564600000000000026512 0ustar00zuulzuul00000000000000# Copyright 2016, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities that map from a Target address to a proper AMQP 1.0 address. This module defines a utility class that translates a high level oslo.messaging address (Target) into the message-level address used on the message bus. This translation may be statically configured or determined when the connection to the message bus is made. The Target members that are used to generate the address are: * exchange * topic * server flag * fanout flag In addition a 'service' tag is associated with the address. This tag determines the service associated with an address (e.g. rpc or notification) so that traffic can be partitioned based on its use. """ import abc import logging from oslo_messaging.target import Target __all__ = [ "keyify", "AddresserFactory", "SERVICE_RPC", "SERVICE_NOTIFY" ] SERVICE_RPC = 0 SERVICE_NOTIFY = 1 LOG = logging.getLogger(__name__) def keyify(address, service=SERVICE_RPC): """Create a hashable key from a Target and service that will uniquely identify the generated address. This key is used to map the abstract oslo.messaging address to its corresponding AMQP link(s). This mapping may be done before the connection is established. """ if isinstance(address, Target): # service is important because the resolved address may be # different based on whether or not this Target is used for # notifications or RPC return ("Target:{t={%s} e={%s} s={%s} f={%s} service={%s}}" % (address.topic, address.exchange, address.server, address.fanout, service)) else: # absolute address can be used without modification return "String:{%s}" % address class Addresser(object): """Base class message bus address generator. Used to convert an oslo.messaging address into an AMQP 1.0 address string used over the connection to the message bus. """ def __init__(self, default_exchange): self._default_exchange = default_exchange def resolve(self, target, service): if not isinstance(target, Target): # an already resolved address return target # Return a link address for a given target if target.fanout: return self.multicast_address(target, service) elif target.server: return self.unicast_address(target, service) else: return self.anycast_address(target, service) @abc.abstractmethod def multicast_address(self, target, service): """Address used to broadcast to all subscribers """ @abc.abstractmethod def unicast_address(self, target, service): """Address used to target a specific subscriber (direct) """ @abc.abstractmethod def anycast_address(self, target, service): """Address used for shared subscribers (competing consumers) """ def _concat(self, sep, items): return sep.join(filter(bool, items)) class LegacyAddresser(Addresser): """Legacy addresses are in the following format: multicast: '$broadcast_prefix[.$vhost].$exchange.$topic.all' unicast: '$server_prefix[.$vhost].$exchange.$topic.$server' anycast: '$group_prefix[.$vhost].$exchange.$topic' Legacy addresses do not distinguish RPC traffic from Notification traffic """ def __init__(self, default_exchange, server_prefix, broadcast_prefix, group_prefix, vhost): super(LegacyAddresser, self).__init__(default_exchange) self._server_prefix = server_prefix self._broadcast_prefix = broadcast_prefix self._group_prefix = group_prefix self._vhost = vhost def multicast_address(self, target, service): return self._concat(".", [self._broadcast_prefix, self._vhost, target.exchange or self._default_exchange, target.topic, "all"]) def unicast_address(self, target, service=SERVICE_RPC): return self._concat(".", [self._server_prefix, self._vhost, target.exchange or self._default_exchange, target.topic, target.server]) def anycast_address(self, target, service=SERVICE_RPC): return self._concat(".", [self._group_prefix, self._vhost, target.exchange or self._default_exchange, target.topic]) # for debug: def _is_multicast(self, address): return address.startswith(self._broadcast_prefix) def _is_unicast(self, address): return address.startswith(self._server_prefix) def _is_anycast(self, address): return address.startswith(self._group_prefix) def _is_service(self, address, service): # legacy addresses are the same for RPC or Notifications return True class RoutableAddresser(Addresser): """Routable addresses have different formats based their use. It starts with a prefix that is determined by the type of traffic (RPC or Notifications). The prefix is followed by a description of messaging delivery semantics. The delivery may be one of: 'multicast', 'unicast', or 'anycast'. The delivery semantics are followed by information pulled from the Target. The template is: $prefix/$semantics[/$vhost]/$exchange/$topic[/$server] Examples based on the default prefix and semantic values: rpc-unicast: "openstack.org/om/rpc/unicast/my-exchange/my-topic/my-server" notify-anycast: "openstack.org/om/notify/anycast/my-vhost/exchange/topic" """ def __init__(self, default_exchange, rpc_exchange, rpc_prefix, notify_exchange, notify_prefix, unicast_tag, multicast_tag, anycast_tag, vhost): super(RoutableAddresser, self).__init__(default_exchange) if not self._default_exchange: self._default_exchange = "openstack" # templates for address generation: self._vhost = vhost _rpc = rpc_prefix + "/" self._rpc_prefix = _rpc self._rpc_unicast = _rpc + unicast_tag self._rpc_multicast = _rpc + multicast_tag self._rpc_anycast = _rpc + anycast_tag _notify = notify_prefix + "/" self._notify_prefix = _notify self._notify_unicast = _notify + unicast_tag self._notify_multicast = _notify + multicast_tag self._notify_anycast = _notify + anycast_tag self._exchange = [ # SERVICE_RPC: rpc_exchange or self._default_exchange or 'rpc', # SERVICE_NOTIFY: notify_exchange or self._default_exchange or 'notify' ] def multicast_address(self, target, service=SERVICE_RPC): if service == SERVICE_RPC: prefix = self._rpc_multicast else: prefix = self._notify_multicast return self._concat("/", [prefix, self._vhost, target.exchange or self._exchange[service], target.topic]) def unicast_address(self, target, service=SERVICE_RPC): if service == SERVICE_RPC: prefix = self._rpc_unicast else: prefix = self._notify_unicast return self._concat("/", [prefix, self._vhost, target.exchange or self._exchange[service], target.topic, target.server]) def anycast_address(self, target, service=SERVICE_RPC): if service == SERVICE_RPC: prefix = self._rpc_anycast else: prefix = self._notify_anycast return self._concat("/", [prefix, self._vhost, target.exchange or self._exchange[service], target.topic]) # for debug: def _is_multicast(self, address): return (address.startswith(self._rpc_multicast) or address.startswith(self._notify_multicast)) def _is_unicast(self, address): return (address.startswith(self._rpc_unicast) or address.startswith(self._notify_unicast)) def _is_anycast(self, address): return (address.startswith(self._rpc_anycast) or address.startswith(self._notify_anycast)) def _is_service(self, address, service): return address.startswith(self._rpc_prefix if service == SERVICE_RPC else self._notify_prefix) class AddresserFactory(object): """Generates the proper Addresser based on configuration and the type of message bus the driver is connected to. """ def __init__(self, default_exchange, mode, **kwargs): self._default_exchange = default_exchange self._mode = mode self._kwargs = kwargs def __call__(self, remote_properties, vhost=None): # for backwards compatibility use legacy if dynamic and we're connected # to qpidd or we cannot identify the message bus. This can be # overridden via the configuration. product = remote_properties.get('product', 'qpid-cpp') if self._mode == 'legacy' or (self._mode == 'dynamic' and product == 'qpid-cpp'): return LegacyAddresser(self._default_exchange, self._kwargs['legacy_server_prefix'], self._kwargs['legacy_broadcast_prefix'], self._kwargs['legacy_group_prefix'], vhost) else: return RoutableAddresser(self._default_exchange, self._kwargs.get("rpc_exchange"), self._kwargs["rpc_prefix"], self._kwargs.get("notify_exchange"), self._kwargs["notify_prefix"], self._kwargs["unicast"], self._kwargs["multicast"], self._kwargs["anycast"], vhost) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp1_driver/controller.py0000664000175000017500000015361600000000000026551 0ustar00zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Controller that manages the interface between the driver and the messaging service. This module defines a Controller class that is responsible for performing messaging-related operations (Tasks) requested by the driver, and for managing the connection to the messaging service. The Controller creates a background thread which performs all messaging operations and socket I/O. The Controller's messaging logic is executed in the background thread via lambda functions scheduled by the Controller. """ import abc import collections import logging import os import platform import queue import random import sys import threading import time import uuid from oslo_utils import eventletutils import proton import pyngus from oslo_messaging._drivers.amqp1_driver.addressing import AddresserFactory from oslo_messaging._drivers.amqp1_driver.addressing import keyify from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_NOTIFY from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_RPC from oslo_messaging._drivers.amqp1_driver import eventloop from oslo_messaging import exceptions from oslo_messaging.target import Target from oslo_messaging import transport LOG = logging.getLogger(__name__) class Task(object): """Run a command on the eventloop thread, wait until it completes """ @abc.abstractmethod def wait(self): """Called by the client thread to wait for the operation to complete. The implementation may optionally return a value. """ @abc.abstractmethod def _execute(self, controller): """This method will be run on the eventloop thread to perform the messaging operation. """ class SubscribeTask(Task): """A task that creates a subscription to the given target. Messages arriving from the target are given to the listener. """ def __init__(self, target, listener, notifications=False): super(SubscribeTask, self).__init__() self._target = target() # mutable - need a copy self._subscriber_id = listener.id self._in_queue = listener.incoming self._service = SERVICE_NOTIFY if notifications else SERVICE_RPC self._wakeup = eventletutils.Event() def wait(self): self._wakeup.wait() def _execute(self, controller): controller.subscribe(self) self._wakeup.set() class SendTask(Task): """This is the class used by the Controller to send messages to a given destination. """ def __init__(self, name, message, target, deadline, retry, wait_for_ack, notification=False): super(SendTask, self).__init__() self.name = name # note: target can be either a Target class or a string # target is mutable - make copy self.target = target() if isinstance(target, Target) else target self.message = message self.deadline = deadline self.wait_for_ack = wait_for_ack self.service = SERVICE_NOTIFY if notification else SERVICE_RPC self.timer = None self._retry = None if retry is None or retry < 0 else retry self._wakeup = eventletutils.Event() self._error = None self._sender = None def wait(self): self._wakeup.wait() return self._error def _execute(self, controller): if self.deadline: # time out the send self.timer = controller.processor.alarm(self._on_timeout, self.deadline) controller.send(self) def _prepare(self, sender): """Called immediately before the message is handed off to the i/o system. This implies that the sender link is up. """ self._sender = sender def _on_ack(self, state, info): """If wait_for_ack is True, this is called by the eventloop thread when the ack/nack is received from the peer. If wait_for_ack is False this is called by the eventloop right after the message is written to the link. In the last case state will always be set to ACCEPTED. """ if state != pyngus.SenderLink.ACCEPTED: msg = ("{name} message send to {target} failed: remote" " disposition: {disp}, info:" "{info}".format(name=self.name, target=self.target, disp=state, info=info)) self._error = exceptions.MessageDeliveryFailure(msg) LOG.warning("%s", msg) self._cleanup() self._wakeup.set() def _on_timeout(self): """Invoked by the eventloop when our timer expires """ self.timer = None self._sender and self._sender.cancel_send(self) msg = ("{name} message sent to {target} failed: timed" " out".format(name=self.name, target=self.target)) LOG.warning("%s", msg) # Only raise a MessagingTimeout if the caller has explicitly specified # a timeout. self._error = exceptions.MessagingTimeout(msg) \ if self.message.ttl else \ exceptions.MessageDeliveryFailure(msg) self._cleanup() self._wakeup.set() def _on_error(self, description): """Invoked by the eventloop if the send operation fails for reasons other than timeout and nack. """ msg = ("{name} message sent to {target} failed:" " {reason}".format(name=self.name, target=self.target, reason=description)) LOG.warning("%s", msg) self._error = exceptions.MessageDeliveryFailure(msg) self._cleanup() self._wakeup.set() def _cleanup(self): self._sender = None if self.timer: self.timer.cancel() self.timer = None @property def _can_retry(self): # has the retry count expired? if self._retry is not None: self._retry -= 1 if self._retry < 0: return False return True class RPCCallTask(SendTask): """Performs an RPC Call. Sends the request and waits for a response from the destination. """ def __init__(self, target, message, deadline, retry, wait_for_ack): super(RPCCallTask, self).__init__("RPC Call", message, target, deadline, retry, wait_for_ack) self._reply_link = None self._reply_msg = None self._msg_id = None def wait(self): error = super(RPCCallTask, self).wait() return error or self._reply_msg def _prepare(self, sender): super(RPCCallTask, self)._prepare(sender) # reserve a message id for mapping the received response if self._msg_id: # already set so this is a re-transmit. To be safe cancel the old # msg_id and allocate a fresh one. self._reply_link.cancel_response(self._msg_id) self._reply_link = sender._reply_link rl = self._reply_link self._msg_id = rl.prepare_for_response(self.message, self._on_reply) def _on_reply(self, message): # called if/when the reply message arrives self._reply_msg = message self._cleanup() self._wakeup.set() def _on_ack(self, state, info): if state != pyngus.SenderLink.ACCEPTED: super(RPCCallTask, self)._on_ack(state, info) # must wait for reply if ACCEPTED def _cleanup(self): if self._msg_id: self._reply_link.cancel_response(self._msg_id) self._msg_id = None self._reply_link = None super(RPCCallTask, self)._cleanup() class RPCMonitoredCallTask(RPCCallTask): """An RPC call which expects a periodic heartbeat until the response is received. There are two timeouts: deadline - overall hard timeout, implemented in RPCCallTask call_monitor_timeout - keep alive timeout, reset when heartbeat arrives """ def __init__(self, target, message, deadline, call_monitor_timeout, retry, wait_for_ack): super(RPCMonitoredCallTask, self).__init__(target, message, deadline, retry, wait_for_ack) assert call_monitor_timeout is not None # nosec self._monitor_timeout = call_monitor_timeout self._monitor_timer = None self._set_alarm = None def _execute(self, controller): self._set_alarm = controller.processor.defer self._monitor_timer = self._set_alarm(self._call_timeout, self._monitor_timeout) super(RPCMonitoredCallTask, self)._execute(controller) def _call_timeout(self): # monitor_timeout expired self._monitor_timer = None self._sender and self._sender.cancel_send(self) msg = ("{name} message sent to {target} failed: call monitor timed" " out".format(name=self.name, target=self.target)) LOG.warning("%s", msg) self._error = exceptions.MessagingTimeout(msg) self._cleanup() self._wakeup.set() def _on_reply(self, message): # if reply is null, then this is the call monitor heartbeat if message.body is None: self._monitor_timer.cancel() self._monitor_timer = self._set_alarm(self._call_timeout, self._monitor_timeout) else: super(RPCMonitoredCallTask, self)._on_reply(message) def _cleanup(self): self._set_alarm = None if self._monitor_timer: self._monitor_timer.cancel() self._monitor_timer = None super(RPCMonitoredCallTask, self)._cleanup() class MessageDispositionTask(Task): """A task that updates the message disposition as accepted or released for a Server """ def __init__(self, disposition, released=False): super(MessageDispositionTask, self).__init__() self._disposition = disposition self._released = released def wait(self): # disposition update does not have to block the sender since there is # no result to pend for. This avoids a thread context switch with # every RPC call pass def _execute(self, controller): try: self._disposition(self._released) except Exception as e: # there's really nothing we can do about a failed disposition. LOG.exception("Message acknowledgment failed: %s", e) class Sender(pyngus.SenderEventHandler): """A link for sending to a particular destination on the message bus. """ def __init__(self, destination, scheduler, delay, service): super(Sender, self).__init__() self._destination = destination self._service = service self._address = None self._link = None self._scheduler = scheduler self._delay = delay # for re-connecting/re-transmitting # holds all pending SendTasks self._pending_sends = collections.deque() # holds all messages sent but not yet acked self._unacked = set() self._reply_link = None self._connection = None self._resend_timer = None @property def pending_messages(self): return len(self._pending_sends) @property def unacked_messages(self): return len(self._unacked) def attach(self, connection, reply_link, addresser): """Open the link. Called by the Controller when the AMQP connection becomes active. """ self._connection = connection self._reply_link = reply_link self._address = addresser.resolve(self._destination, self._service) LOG.debug("Sender %s attached", self._address) self._link = self._open_link() def detach(self): """Close the link. Called by the controller when shutting down or in response to a close requested by the remote. May be re-attached later (after a reset is done) """ LOG.debug("Sender %s detached", self._address) self._connection = None self._reply_link = None if self._resend_timer: self._resend_timer.cancel() self._resend_timer = None if self._link: self._link.close() def reset(self, reason="Link reset"): """Called by the controller on connection failover. Release all link resources, abort any in-flight messages, and check the retry limit on all pending send requests. """ self._address = None self._connection = None self._reply_link = None if self._link: self._link.destroy() self._link = None self._abort_unacked(reason) self._check_retry_limit(reason) def destroy(self, reason="Link destroyed"): """Destroy the sender and all pending messages. Called on driver shutdown. """ LOG.debug("Sender %s destroyed", self._address) self.reset(reason) self._abort_pending(reason) def send_message(self, send_task): """Send a message out the link. """ if not self._can_send or self._pending_sends: self._pending_sends.append(send_task) else: self._send(send_task) def cancel_send(self, send_task): """Attempts to cancel a send request. It is possible that the send has already completed, so this is best-effort. """ # may be in either list, or none self._unacked.discard(send_task) try: self._pending_sends.remove(send_task) except ValueError: pass # Pyngus callbacks: def sender_active(self, sender_link): LOG.debug("Sender %s active", self._address) self._send_pending() def credit_granted(self, sender_link): pass def sender_remote_closed(self, sender_link, pn_condition): # The remote has initiated a close. This could happen when the message # bus is shutting down, or it detected an error LOG.warning("Sender %(addr)s failed due to remote initiated close:" " condition=%(cond)s", {'addr': self._address, 'cond': pn_condition}) self._link.close() # sender_closed() will be called once the link completes closing def sender_closed(self, sender_link): self._handle_sender_closed() def sender_failed(self, sender_link, error): """Protocol error occurred.""" LOG.warning("Sender %(addr)s failed error=%(error)s", {'addr': self._address, 'error': error}) self._handle_sender_closed(str(error)) # end Pyngus callbacks def _handle_sender_closed(self, reason="Sender closed"): self._abort_unacked(reason) if self._connection: # still attached, so attempt to restart the link self._check_retry_limit(reason) self._scheduler.defer(self._reopen_link, self._delay) def _check_retry_limit(self, reason): # Called on recoverable connection or link failure. Remove any pending # sends that have exhausted their retry count: expired = set() for send_task in self._pending_sends: if not send_task._can_retry: expired.add(send_task) send_task._on_error("Message send failed: %s" % reason) while expired: self._pending_sends.remove(expired.pop()) def _abort_unacked(self, error): # fail all messages that have been sent to the message bus and have not # been acked yet while self._unacked: send_task = self._unacked.pop() send_task._on_error("Message send failed: %s" % error) def _abort_pending(self, error): # fail all messages that have yet to be sent to the message bus while self._pending_sends: send_task = self._pending_sends.popleft() send_task._on_error("Message send failed: %s" % error) @property def _can_send(self): return self._link and self._link.active # acknowledge status _TIMED_OUT = pyngus.SenderLink.TIMED_OUT _ACCEPTED = pyngus.SenderLink.ACCEPTED _RELEASED = pyngus.SenderLink.RELEASED _MODIFIED = pyngus.SenderLink.MODIFIED def _send(self, send_task): send_task._prepare(self) send_task.message.address = self._address if send_task.wait_for_ack: self._unacked.add(send_task) def pyngus_callback(link, handle, state, info): # invoked when the message bus (n)acks this message if state == Sender._TIMED_OUT: # ignore pyngus timeout - we maintain our own timer # which will properly deal with this case return self._unacked.discard(send_task) if state == Sender._ACCEPTED: send_task._on_ack(Sender._ACCEPTED, info) elif (state == Sender._RELEASED or (state == Sender._MODIFIED and # assuming delivery-failed means in-doubt: not info.get("delivery-failed") and not info.get("undeliverable-here"))): # These states indicate that the message was never # forwarded beyond the next hop so they can be # re-transmitted without risk of duplication self._resend(send_task) else: # some error - let task figure it out... send_task._on_ack(state, info) self._link.send(send_task.message, delivery_callback=pyngus_callback, handle=self, deadline=send_task.deadline) else: # do not wait for ack self._link.send(send_task.message, delivery_callback=None, handle=self, deadline=send_task.deadline) send_task._on_ack(pyngus.SenderLink.ACCEPTED, {}) def _resend(self, send_task): # the message bus returned the message without forwarding it. Wait a # bit for other outstanding sends to finish - most likely ending up # here since they are all going to the same destination - then resend # this message if send_task._can_retry: # note well: once there is something on the pending list no further # messages will be sent (they will all queue up behind this one). self._pending_sends.append(send_task) if self._resend_timer is None: sched = self._scheduler # this will get the pending sends going again self._resend_timer = sched.defer(self._resend_pending, self._delay) else: send_task._on_error("Send retries exhausted") def _resend_pending(self): # run from the _resend_timer, attempt to resend pending messages self._resend_timer = None self._send_pending() def _send_pending(self): # flush all pending messages out if self._can_send: while self._pending_sends: self._send(self._pending_sends.popleft()) def _open_link(self): name = "openstack.org/om/sender/[%s]/%s" % (self._address, uuid.uuid4().hex) link = self._connection.create_sender(name=name, source_address=self._address, target_address=self._address, event_handler=self) link.open() return link def _reopen_link(self): if self._connection: if self._link: self._link.destroy() self._link = self._open_link() class Replies(pyngus.ReceiverEventHandler): """This is the receiving link for all RPC reply messages. Messages are routed to the proper incoming queue using the correlation-id header in the message. """ def __init__(self, connection, on_ready, on_down, capacity): self._correlation = {} # map of correlation-id to response queue self._on_ready = on_ready self._on_down = on_down rname = ("openstack.org/om/receiver/[rpc-response]/%s" % uuid.uuid4().hex) self._receiver = connection.create_receiver("rpc-response", event_handler=self, name=rname) # capacity determines the maximum number of reply messages this link is # willing to receive. As messages are received and capacity is # consumed, this driver will 'top up' the capacity back to max # capacity. This number should be large enough to avoid needlessly # flow-controlling the replies. self._capacity = capacity self._capacity_low = (capacity + 1) / 2 self._receiver.open() def detach(self): # close the link if self._receiver: self._receiver.close() def destroy(self): self._correlation.clear() if self._receiver: self._receiver.destroy() self._receiver = None def prepare_for_response(self, request, callback): """Apply a unique message identifier to this request message. This will be used to identify messages received in reply. The identifier is placed in the 'id' field of the request message. It is expected that the identifier will appear in the 'correlation-id' field of the corresponding response message. When the caller is done receiving replies, it must call cancel_response """ request.id = uuid.uuid4().hex # reply is placed on reply_queue self._correlation[request.id] = callback request.reply_to = self._receiver.source_address return request.id def cancel_response(self, msg_id): """Abort waiting for the response message corresponding to msg_id. This can be used if the request fails and no reply is expected. """ try: del self._correlation[msg_id] except KeyError: pass @property def active(self): return self._receiver and self._receiver.active # Pyngus ReceiverLink event callbacks: def receiver_active(self, receiver_link): """This is a Pyngus callback, invoked by Pyngus when the receiver_link has transitioned to the open state and is able to receive incoming messages. """ LOG.debug("Replies link active src=%s", self._receiver.source_address) receiver_link.add_capacity(self._capacity) self._on_ready() def receiver_remote_closed(self, receiver, pn_condition): """This is a Pyngus callback, invoked by Pyngus when the peer of this receiver link has initiated closing the connection. """ if pn_condition: LOG.error("Reply subscription closed by peer: %s", pn_condition) receiver.close() def receiver_failed(self, receiver_link, error): """Protocol error occurred.""" LOG.error("Link to reply queue failed. error=%(error)s", {"error": error}) self._on_down() def receiver_closed(self, receiver_link): self._on_down() def message_received(self, receiver, message, handle): """This is a Pyngus callback, invoked by Pyngus when a new message arrives on this receiver link from the peer. """ key = message.correlation_id try: self._correlation[key](message) receiver.message_accepted(handle) except KeyError: LOG.warning("Can't find receiver for response msg id=%s, " "dropping!", key) receiver.message_modified(handle, True, True, None) # ensure we have enough credit if receiver.capacity <= self._capacity_low: receiver.add_capacity(self._capacity - receiver.capacity) class Server(pyngus.ReceiverEventHandler): """A group of links that receive messages from a set of addresses derived from a given target. Messages arriving on the links are placed on the 'incoming' queue. """ def __init__(self, target, incoming, scheduler, delay, capacity): self._target = target self._incoming = incoming self._addresses = [] self._capacity = capacity # credit per each link self._capacity_low = (capacity + 1) / 2 self._receivers = [] self._scheduler = scheduler self._delay = delay # for link re-attach self._connection = None self._reopen_scheduled = False def attach(self, connection): """Create receiver links over the given connection for all the configured addresses. """ self._connection = connection for a in self._addresses: name = "openstack.org/om/receiver/[%s]/%s" % (a, uuid.uuid4().hex) r = self._open_link(a, name) self._receivers.append(r) def detach(self): """Attempt a clean shutdown of the links""" self._connection = None self._addresses = [] for receiver in self._receivers: receiver.close() def reset(self): # destroy the links, but keep the addresses around since we may be # failing over. Since links are destroyed, this cannot be called from # any of the following ReceiverLink callbacks. self._connection = None self._addresses = [] self._reopen_scheduled = False for r in self._receivers: r.destroy() self._receivers = [] # Pyngus ReceiverLink event callbacks. Note that all of the Server's links # share this handler def receiver_remote_closed(self, receiver, pn_condition): """This is a Pyngus callback, invoked by Pyngus when the peer of this receiver link has initiated closing the connection. """ LOG.debug("Server subscription to %s remote detach", receiver.source_address) if pn_condition: vals = { "addr": receiver.source_address or receiver.target_address, "err_msg": pn_condition } LOG.error("Server subscription %(addr)s closed " "by peer: %(err_msg)s", vals) receiver.close() def receiver_failed(self, receiver_link, error): """Protocol error occurred.""" LOG.error("Listener link queue failed. error=%(error)s", {"error": error}) self.receiver_closed(receiver_link) def receiver_closed(self, receiver_link): LOG.debug("Server subscription to %s closed", receiver_link.source_address) # If still attached, attempt to re-start link if self._connection and not self._reopen_scheduled: LOG.debug("Server subscription reopen scheduled") self._reopen_scheduled = True self._scheduler.defer(self._reopen_links, self._delay) def message_received(self, receiver, message, handle): """This is a Pyngus callback, invoked by Pyngus when a new message arrives on this receiver link from the peer. """ def message_disposition(released=False): if receiver in self._receivers and not receiver.closed: if released: receiver.message_released(handle) else: receiver.message_accepted(handle) if receiver.capacity <= self._capacity_low: receiver.add_capacity(self._capacity - receiver.capacity) else: LOG.debug("Can't find receiver for settlement") qentry = {"message": message, "disposition": message_disposition} self._incoming.put(qentry) def _open_link(self, address, name): props = {"snd-settle-mode": "mixed"} r = self._connection.create_receiver(source_address=address, target_address=address, event_handler=self, name=name, properties=props) r.add_capacity(self._capacity) r.open() return r def _reopen_links(self): # attempt to re-establish any closed links LOG.debug("Server subscription reopening") self._reopen_scheduled = False if self._connection: for i in range(len(self._receivers)): link = self._receivers[i] if link.closed: addr = link.target_address name = link.name link.destroy() self._receivers[i] = self._open_link(addr, name) class RPCServer(Server): """Subscribes to RPC addresses""" def __init__(self, target, incoming, scheduler, delay, capacity): super(RPCServer, self).__init__(target, incoming, scheduler, delay, capacity) def attach(self, connection, addresser): # Generate the AMQP 1.0 addresses for the base class self._addresses = [ addresser.unicast_address(self._target, SERVICE_RPC), addresser.multicast_address(self._target, SERVICE_RPC), addresser.anycast_address(self._target, SERVICE_RPC) ] # now invoke the base class with the generated addresses super(RPCServer, self).attach(connection) class NotificationServer(Server): """Subscribes to Notification addresses""" def __init__(self, target, incoming, scheduler, delay, capacity): super(NotificationServer, self).__init__(target, incoming, scheduler, delay, capacity) def attach(self, connection, addresser): # Generate the AMQP 1.0 addresses for the base class self._addresses = [ addresser.anycast_address(self._target, SERVICE_NOTIFY) ] # now invoke the base class with the generated addresses super(NotificationServer, self).attach(connection) class Hosts(object): """An order list of TransportHost addresses. Connection failover progresses from one host to the next. The default realm comes from the configuration and is only used if no realm is present in the URL. """ def __init__(self, url, default_realm=None): self.virtual_host = url.virtual_host if url.hosts: self._entries = url.hosts[:] else: self._entries = [transport.TransportHost(hostname="localhost", port=5672)] for entry in self._entries: entry.port = entry.port or 5672 entry.username = entry.username entry.password = entry.password if default_realm and entry.username and '@' not in entry.username: entry.username = entry.username + '@' + default_realm self._current = random.randint(0, len(self._entries) - 1) # nosec @property def current(self): return self._entries[self._current] def next(self): if len(self._entries) > 1: self._current = (self._current + 1) % len(self._entries) return self.current def __repr__(self): return '' def __str__(self): r = ', vhost=%s' % self.virtual_host if self.virtual_host else '' return ", ".join(["%r" % th for th in self._entries]) + r class Controller(pyngus.ConnectionEventHandler): """Controls the connection to the AMQP messaging service. This object is the 'brains' of the driver. It maintains the logic for addressing, sending and receiving messages, and managing the connection. All messaging and I/O work is done on the Eventloop thread, allowing the driver to run asynchronously from the messaging clients. """ def __init__(self, url, default_exchange, config): self.processor = None self._socket_connection = None self._node = platform.node() or "" self._command = os.path.basename(sys.argv[0]) self._pid = os.getpid() # queue of drivertask objects to execute on the eventloop thread self._tasks = queue.Queue(maxsize=500) # limit the number of Task()'s to execute per call to _process_tasks(). # This allows the eventloop main thread to return to servicing socket # I/O in a timely manner self._max_task_batch = 50 # cache of all Sender links indexed by address: self._all_senders = {} # active Sender links indexed by address: self._active_senders = set() # closing Sender links indexed by address: self._purged_senders = [] # Servers indexed by target. Each entry is a map indexed by the # specific ProtonListener's identifier: self._servers = {} self._container_name = config.oslo_messaging_amqp.container_name self.idle_timeout = config.oslo_messaging_amqp.idle_timeout self.trace_protocol = config.oslo_messaging_amqp.trace self.ssl = config.oslo_messaging_amqp.ssl self.ssl_ca_file = config.oslo_messaging_amqp.ssl_ca_file self.ssl_cert_file = config.oslo_messaging_amqp.ssl_cert_file self.ssl_key_file = config.oslo_messaging_amqp.ssl_key_file self.ssl_key_password = config.oslo_messaging_amqp.ssl_key_password self.ssl_verify_vhost = config.oslo_messaging_amqp.ssl_verify_vhost self.pseudo_vhost = config.oslo_messaging_amqp.pseudo_vhost self.sasl_mechanisms = config.oslo_messaging_amqp.sasl_mechanisms self.sasl_config_dir = config.oslo_messaging_amqp.sasl_config_dir self.sasl_config_name = config.oslo_messaging_amqp.sasl_config_name self.hosts = Hosts(url, config.oslo_messaging_amqp.sasl_default_realm) self.conn_retry_interval = \ config.oslo_messaging_amqp.connection_retry_interval self.conn_retry_backoff = \ config.oslo_messaging_amqp.connection_retry_backoff self.conn_retry_interval_max = \ config.oslo_messaging_amqp.connection_retry_interval_max self.link_retry_delay = config.oslo_messaging_amqp.link_retry_delay _opts = config.oslo_messaging_amqp factory_args = {"legacy_server_prefix": _opts.server_request_prefix, "legacy_broadcast_prefix": _opts.broadcast_prefix, "legacy_group_prefix": _opts.group_request_prefix, "rpc_prefix": _opts.rpc_address_prefix, "notify_prefix": _opts.notify_address_prefix, "multicast": _opts.multicast_address, "unicast": _opts.unicast_address, "anycast": _opts.anycast_address, "notify_exchange": _opts.default_notification_exchange, "rpc_exchange": _opts.default_rpc_exchange} self.addresser_factory = AddresserFactory(default_exchange, _opts.addressing_mode, **factory_args) self.addresser = None # cannot send an RPC request until the replies link is active, as we # need the peer assigned address, so need to delay sending any RPC # requests until this link is active: self.reply_link = None # Set True when the driver is shutting down self._closing = False # only schedule one outstanding reconnect attempt at a time self._reconnecting = False self._delay = self.conn_retry_interval # seconds between retries # prevent queuing up multiple requests to run _process_tasks() self._process_tasks_scheduled = False self._process_tasks_lock = threading.Lock() # credit levels for incoming links self._reply_credit = _opts.reply_link_credit self._rpc_credit = _opts.rpc_server_credit self._notify_credit = _opts.notify_server_credit # sender link maintenance timer and interval self._link_maint_timer = None self._link_maint_timeout = _opts.default_sender_link_timeout def connect(self): """Connect to the messaging service.""" self.processor = eventloop.Thread(self._container_name, self._node, self._command, self._pid) self.processor.wakeup(lambda: self._do_connect()) def add_task(self, task): """Add a Task for execution on processor thread.""" self._tasks.put(task) self._schedule_task_processing() def shutdown(self, timeout=30): """Shutdown the messaging service.""" LOG.info("Shutting down the AMQP 1.0 connection") if self.processor: self.processor.wakeup(self._start_shutdown) LOG.debug("Waiting for eventloop to exit") self.processor.join(timeout) self._hard_reset("Shutting down") for sender in self._all_senders.values(): sender.destroy() self._all_senders.clear() self._servers.clear() self.processor.destroy() self.processor = None LOG.debug("Eventloop exited, driver shut down") # The remaining methods are reserved to run from the eventloop thread only! # They must not be invoked directly! # methods executed by Tasks created by the driver: def send(self, send_task): if send_task.deadline and send_task.deadline <= time.monotonic(): send_task._on_timeout() return key = keyify(send_task.target, send_task.service) sender = self._all_senders.get(key) if not sender: sender = Sender(send_task.target, self.processor, self.link_retry_delay, send_task.service) self._all_senders[key] = sender if self.reply_link and self.reply_link.active: sender.attach(self._socket_connection.pyngus_conn, self.reply_link, self.addresser) self._active_senders.add(key) sender.send_message(send_task) def subscribe(self, subscribe_task): """Subscribe to a given target""" if subscribe_task._service == SERVICE_NOTIFY: t = "notification" server = NotificationServer(subscribe_task._target, subscribe_task._in_queue, self.processor, self.link_retry_delay, self._notify_credit) else: t = "RPC" server = RPCServer(subscribe_task._target, subscribe_task._in_queue, self.processor, self.link_retry_delay, self._rpc_credit) LOG.debug("Subscribing to %(type)s target %(target)s", {'type': t, 'target': subscribe_task._target}) key = keyify(subscribe_task._target, subscribe_task._service) servers = self._servers.get(key) if servers is None: servers = {} self._servers[key] = servers servers[subscribe_task._subscriber_id] = server if self._active: server.attach(self._socket_connection.pyngus_conn, self.addresser) # commands executed on the processor (eventloop) via 'wakeup()': def _do_connect(self): """Establish connection and reply subscription on processor thread.""" host = self.hosts.current conn_props = {'properties': {'process': self._command, 'pid': self._pid, 'node': self._node}} # only set hostname in the AMQP 1.0 Open performative if the message # bus can interpret it as the virtual host. We leave it unspecified # since apparently noone can agree on how it should be used otherwise! if self.hosts.virtual_host and not self.pseudo_vhost: conn_props['hostname'] = self.hosts.virtual_host if self.idle_timeout: conn_props["idle-time-out"] = float(self.idle_timeout) if self.trace_protocol: conn_props["x-trace-protocol"] = self.trace_protocol # SSL configuration ssl_enabled = False if self.ssl: ssl_enabled = True conn_props["x-ssl"] = self.ssl if self.ssl_ca_file: conn_props["x-ssl-ca-file"] = self.ssl_ca_file ssl_enabled = True if self.ssl_cert_file: ssl_enabled = True conn_props["x-ssl-identity"] = (self.ssl_cert_file, self.ssl_key_file, self.ssl_key_password) if ssl_enabled: # Set the identity of the remote server for SSL to use when # verifying the received certificate. Typically this is the DNS # name used to set up the TCP connections. However some servers # may provide a certificate for the virtual host instead. If that # is the case we need to use the virtual hostname instead. # Refer to SSL Server Name Indication (SNI) for the entire story: # https://tools.ietf.org/html/rfc6066 if self.ssl_verify_vhost: if self.hosts.virtual_host: conn_props['x-ssl-peer-name'] = self.hosts.virtual_host else: conn_props['x-ssl-peer-name'] = host.hostname # SASL configuration: if self.sasl_mechanisms: conn_props["x-sasl-mechs"] = self.sasl_mechanisms if self.sasl_config_dir: conn_props["x-sasl-config-dir"] = self.sasl_config_dir if self.sasl_config_name: conn_props["x-sasl-config-name"] = self.sasl_config_name self._socket_connection = self.processor.connect(host, handler=self, properties=conn_props) LOG.debug("Connection initiated") def _process_tasks(self): """Execute Task objects in the context of the processor thread.""" with self._process_tasks_lock: self._process_tasks_scheduled = False count = 0 while (not self._tasks.empty() and count < self._max_task_batch): try: self._tasks.get(False)._execute(self) except Exception as e: LOG.exception("Error processing task: %s", e) count += 1 # if we hit _max_task_batch, resume task processing later: if not self._tasks.empty(): self._schedule_task_processing() def _schedule_task_processing(self): """_process_tasks() helper: prevent queuing up multiple requests for task processing. This method is called both by the application thread and the processing thread. """ if self.processor: with self._process_tasks_lock: already_scheduled = self._process_tasks_scheduled self._process_tasks_scheduled = True if not already_scheduled: self.processor.wakeup(lambda: self._process_tasks()) def _start_shutdown(self): """Called when the application is closing the transport. Attempt to cleanly flush/close all links. """ self._closing = True if self._active: # try a clean shutdown self._detach_senders() self._detach_servers() self.reply_link.detach() self._socket_connection.pyngus_conn.close() else: # don't wait for a close from the remote, may never happen self.processor.shutdown() # reply link callbacks: def _reply_link_ready(self): """Invoked when the Replies reply link has become active. At this point, we are ready to receive messages, so start all pending RPC requests. """ LOG.info("Messaging is active (%(hostname)s:%(port)s%(vhost)s)", {'hostname': self.hosts.current.hostname, 'port': self.hosts.current.port, 'vhost': ("/" + self.hosts.virtual_host if self.hosts.virtual_host else "")}) for sender in self._all_senders.values(): sender.attach(self._socket_connection.pyngus_conn, self.reply_link, self.addresser) def _reply_link_down(self): # Treat it as a recoverable failure because the RPC reply address is # now invalid for all in-flight RPC requests. if not self._closing: self._detach_senders() self._detach_servers() self._socket_connection.pyngus_conn.close() # once closed, _handle_connection_loss() will initiate reconnect # callback from eventloop on socket error def socket_error(self, error): """Called by eventloop when a socket error occurs.""" LOG.error("Socket failure: %s", error) self._handle_connection_loss(str(error)) # Pyngus connection event callbacks (and their helpers), all invoked from # the eventloop thread: def connection_failed(self, connection, error): """This is a Pyngus callback, invoked by Pyngus when a non-recoverable error occurs on the connection. """ if connection is not self._socket_connection.pyngus_conn: # pyngus bug: ignore failure callback on destroyed connections return LOG.debug("AMQP Connection failure: %s", error) self._handle_connection_loss(str(error)) def connection_active(self, connection): """This is a Pyngus callback, invoked by Pyngus when the connection to the peer is up. At this point, the driver will activate all subscriber links (server) and the reply link. """ LOG.debug("Connection active (%(hostname)s:%(port)s), subscribing...", {'hostname': self.hosts.current.hostname, 'port': self.hosts.current.port}) # allocate an addresser based on the advertised properties of the # message bus props = connection.remote_properties or {} self.addresser = self.addresser_factory(props, self.hosts.virtual_host if self.pseudo_vhost else None) for servers in self._servers.values(): for server in servers.values(): server.attach(self._socket_connection.pyngus_conn, self.addresser) self.reply_link = Replies(self._socket_connection.pyngus_conn, self._reply_link_ready, self._reply_link_down, self._reply_credit) self._delay = self.conn_retry_interval # reset # schedule periodic maintenance of sender links self._link_maint_timer = self.processor.defer(self._purge_sender_links, self._link_maint_timeout) def connection_closed(self, connection): """This is a Pyngus callback, invoked by Pyngus when the connection has cleanly closed. This occurs after the driver closes the connection locally, and the peer has acknowledged the close. At this point, the shutdown of the driver's connection is complete. """ LOG.debug("AMQP connection closed.") # if the driver isn't being shutdown, failover and reconnect self._handle_connection_loss("AMQP connection closed.") def connection_remote_closed(self, connection, reason): """This is a Pyngus callback, invoked by Pyngus when the peer has requested that the connection be closed. """ # The messaging service/broker is trying to shut down the # connection. Acknowledge the close, and try to reconnect/failover # later once the connection has closed (connection_closed is called). if reason: LOG.info("Connection closed by peer: %s", reason) self._detach_senders() self._detach_servers() self.reply_link.detach() self._socket_connection.pyngus_conn.close() def sasl_done(self, connection, pn_sasl, outcome): """This is a Pyngus callback invoked when the SASL handshake has completed. The outcome of the handshake is passed in the outcome argument. """ if outcome == proton.SASL.OK: return LOG.error("AUTHENTICATION FAILURE: Cannot connect to " "%(hostname)s:%(port)s as user %(username)s", {'hostname': self.hosts.current.hostname, 'port': self.hosts.current.port, 'username': self.hosts.current.username}) # pyngus will invoke connection_failed() eventually def _handle_connection_loss(self, reason): """The connection to the messaging service has been lost. Try to reestablish the connection/failover if not shutting down the driver. """ self.addresser = None self._socket_connection.close() if self._closing: # we're in the middle of shutting down the driver anyways, # just consider it done: self.processor.shutdown() else: # for some reason, we've lost the connection to the messaging # service. Try to re-establish the connection: if not self._reconnecting: self._reconnecting = True self.processor.wakeup(lambda: self._hard_reset(reason)) LOG.info("Delaying reconnect attempt for %d seconds", self._delay) self.processor.defer(lambda: self._do_reconnect(reason), self._delay) self._delay = min(self._delay * self.conn_retry_backoff, self.conn_retry_interval_max) if self._link_maint_timer: self._link_maint_timer.cancel() self._link_maint_timer = None def _do_reconnect(self, reason): """Invoked on connection/socket failure, failover and re-connect to the messaging service. """ self._reconnecting = False if not self._closing: host = self.hosts.next() LOG.info("Reconnecting to: %(hostname)s:%(port)s", {'hostname': host.hostname, 'port': host.port}) self.processor.wakeup(lambda: self._do_connect()) def _hard_reset(self, reason): """Reset the controller to its pre-connection state""" # note well: since this method destroys the connection, it cannot be # invoked directly from a pyngus callback. Use processor.defer() to # run this method on the main loop instead. for sender in self._purged_senders: sender.destroy(reason) del self._purged_senders[:] self._active_senders.clear() unused = [] for key, sender in self._all_senders.items(): # clean up any sender links that no longer have messages to send if sender.pending_messages == 0: unused.append(key) else: sender.reset(reason) self._active_senders.add(key) for key in unused: self._all_senders[key].destroy(reason) del self._all_senders[key] for servers in self._servers.values(): for server in servers.values(): server.reset() if self.reply_link: self.reply_link.destroy() self.reply_link = None if self._socket_connection: self._socket_connection.reset() def _detach_senders(self): """Close all sender links""" for sender in self._all_senders.values(): sender.detach() def _detach_servers(self): """Close all listener links""" for servers in self._servers.values(): for server in servers.values(): server.detach() def _purge_sender_links(self): """Purge inactive sender links""" if not self._closing: # destroy links that have already been closed for sender in self._purged_senders: sender.destroy("Idle link purged") del self._purged_senders[:] # determine next set to purge purge = set(self._all_senders.keys()) - self._active_senders for key in purge: sender = self._all_senders[key] if not sender.pending_messages and not sender.unacked_messages: sender.detach() self._purged_senders.append(self._all_senders.pop(key)) self._active_senders.clear() self._link_maint_timer = \ self.processor.defer(self._purge_sender_links, self._link_maint_timeout) @property def _active(self): # Is the connection up return (self._socket_connection and self._socket_connection.pyngus_conn and self._socket_connection.pyngus_conn.active) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp1_driver/eventloop.py0000664000175000017500000003422700000000000026375 0ustar00zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A thread that performs all messaging I/O and protocol event handling. This module provides a background thread that handles messaging operations scheduled via the Controller, and performs blocking socket I/O and timer processing. This thread is designed to be as simple as possible - all the protocol specific intelligence is provided by the Controller and executed on the background thread via callables. """ import collections import errno import heapq import logging import math import os import pyngus import select import socket import threading import time import uuid LOG = logging.getLogger(__name__) def compute_timeout(offset): # minimize the timer granularity to one second so we don't have to track # too many timers return math.ceil(time.monotonic() + offset) class _SocketConnection(object): """Associates a pyngus Connection with a python network socket, and handles all connection-related I/O and timer events. """ def __init__(self, name, container, properties, handler): self.name = name self.socket = None self.pyngus_conn = None self._properties = properties # The handler is a pyngus ConnectionEventHandler, which is invoked by # pyngus on connection-related events (active, closed, error, etc). # Currently it is the Controller object. self._handler = handler self._container = container def fileno(self): """Allows use of a _SocketConnection in a select() call. """ return self.socket.fileno() def read_socket(self): """Called to read from the socket.""" if self.socket: try: pyngus.read_socket_input(self.pyngus_conn, self.socket) self.pyngus_conn.process(time.monotonic()) except (socket.timeout, socket.error) as e: # pyngus handles EAGAIN/EWOULDBLOCK and EINTER self.pyngus_conn.close_input() self.pyngus_conn.close_output() self._handler.socket_error(str(e)) def write_socket(self): """Called to write to the socket.""" if self.socket: try: pyngus.write_socket_output(self.pyngus_conn, self.socket) self.pyngus_conn.process(time.monotonic()) except (socket.timeout, socket.error) as e: # pyngus handles EAGAIN/EWOULDBLOCK and EINTER self.pyngus_conn.close_output() self.pyngus_conn.close_input() self._handler.socket_error(str(e)) def connect(self, host): """Connect to host and start the AMQP protocol.""" addr = socket.getaddrinfo(host.hostname, host.port, socket.AF_UNSPEC, socket.SOCK_STREAM) if not addr: key = "%s:%i" % (host.hostname, host.port) error = "Invalid peer address '%s'" % key LOG.error("Invalid peer address '%s'", key) self._handler.socket_error(error) return my_socket = socket.socket(addr[0][0], addr[0][1], addr[0][2]) my_socket.setblocking(0) # 0=non-blocking my_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) try: my_socket.connect(addr[0][4]) except socket.error as e: if e.errno != errno.EINPROGRESS: error = "Socket connect failure '%s'" % str(e) LOG.error("Socket connect failure '%s'", str(e)) self._handler.socket_error(error) return self.socket = my_socket props = self._properties.copy() if pyngus.VERSION >= (2, 0, 0): # configure client authentication # props['x-server'] = False if host.username: props['x-username'] = host.username props['x-password'] = host.password or "" self.pyngus_conn = self._container.create_connection(self.name, self._handler, props) self.pyngus_conn.user_context = self if pyngus.VERSION < (2, 0, 0): # older versions of pyngus requires manual SASL configuration: # determine the proper SASL mechanism: PLAIN if a username/password # is present, else ANONYMOUS pn_sasl = self.pyngus_conn.pn_sasl if host.username: password = host.password if host.password else "" pn_sasl.plain(host.username, password) else: pn_sasl.mechanisms("ANONYMOUS") pn_sasl.client() self.pyngus_conn.open() def reset(self, name=None): """Clean up the current state, expect 'connect()' to be recalled later. """ # note well: since destroy() is called on the connection, do not invoke # this method from a pyngus callback! if self.pyngus_conn: self.pyngus_conn.destroy() self.pyngus_conn = None self.close() if name: self.name = name def close(self): if self.socket: self.socket.close() self.socket = None class Scheduler(object): """Schedule callables to be run in the future. """ class Event(object): # simply hold a reference to a callback that can be set to None if the # alarm is canceled def __init__(self, callback): self.callback = callback def cancel(self): # quicker than rebalancing the tree self.callback = None def __init__(self): self._callbacks = {} self._deadlines = [] def alarm(self, request, deadline): """Request a callable be executed at a specific time """ try: callbacks = self._callbacks[deadline] except KeyError: callbacks = list() self._callbacks[deadline] = callbacks heapq.heappush(self._deadlines, deadline) entry = Scheduler.Event(request) callbacks.append(entry) return entry def defer(self, request, delay): """Request a callable be executed after delay seconds """ return self.alarm(request, compute_timeout(delay)) @property def _next_deadline(self): """The timestamp of the next expiring event or None """ return self._deadlines[0] if self._deadlines else None def _get_delay(self, max_delay=None): """Get the delay in milliseconds until the next callable needs to be run, or 'max_delay' if no outstanding callables or the delay to the next callable is > 'max_delay'. """ due = self._deadlines[0] if self._deadlines else None if due is None: return max_delay _now = time.monotonic() if due <= _now: return 0 else: return min(due - _now, max_delay) if max_delay else due - _now def _process(self): """Invoke all expired callables.""" if self._deadlines: _now = time.monotonic() try: while self._deadlines[0] <= _now: deadline = heapq.heappop(self._deadlines) callbacks = self._callbacks[deadline] del self._callbacks[deadline] for cb in callbacks: cb.callback and cb.callback() except IndexError: pass class Requests(object): """A queue of callables to execute from the eventloop thread's main loop. """ def __init__(self): self._requests = collections.deque() self._wakeup_pipe = os.pipe() self._pipe_ready = False # prevents blocking on an empty pipe self._pipe_lock = threading.Lock() def wakeup(self, request=None): """Enqueue a callable to be executed by the eventloop, and force the eventloop thread to wake up from select(). """ with self._pipe_lock: if request: self._requests.append(request) if not self._pipe_ready: self._pipe_ready = True os.write(self._wakeup_pipe[1], b'!') def fileno(self): """Allows this request queue to be used by select().""" return self._wakeup_pipe[0] def process_requests(self): """Invoked by the eventloop thread, execute each queued callable.""" with self._pipe_lock: if not self._pipe_ready: return self._pipe_ready = False os.read(self._wakeup_pipe[0], 512) requests = self._requests self._requests = collections.deque() for r in requests: r() class Thread(threading.Thread): """Manages socket I/O and executes callables queued up by external threads. """ def __init__(self, container_name, node, command, pid): super(Thread, self).__init__() # callables from other threads: self._requests = Requests() # delayed callables (only used on this thread for now): self._scheduler = Scheduler() self._connection = None # Configure a container if container_name is None: container_name = ("openstack.org/om/container/%s/%s/%s/%s" % (node, command, pid, uuid.uuid4().hex)) self._container = pyngus.Container(container_name) self.name = "Thread for Proton container: %s" % self._container.name self._shutdown = False self.daemon = True self.start() def wakeup(self, request=None): """Wake up the eventloop thread, Optionally providing a callable to run when the eventloop wakes up. Thread safe. """ self._requests.wakeup(request) def shutdown(self): """Shutdown the eventloop thread. Thread safe. """ LOG.debug("eventloop shutdown requested") self._shutdown = True self.wakeup() def destroy(self): # release the container. This can only be called after the eventloop # thread exited self._container.destroy() self._container = None # the following methods are not thread safe - they must be run from the # eventloop thread def defer(self, request, delay): """Invoke request after delay seconds.""" return self._scheduler.defer(request, delay) def alarm(self, request, deadline): """Invoke request at a particular time""" return self._scheduler.alarm(request, deadline) def connect(self, host, handler, properties): """Get a _SocketConnection to a peer represented by url.""" key = "openstack.org/om/connection/%s:%s/" % (host.hostname, host.port) # return pre-existing conn = self._container.get_connection(key) if conn: return conn.user_context # create a new connection - this will be stored in the # container, using the specified name as the lookup key, or if # no name was provided, the host:port combination sc = _SocketConnection(key, self._container, properties, handler=handler) sc.connect(host) self._connection = sc return sc def run(self): """Run the proton event/timer loop.""" LOG.debug("Starting Proton thread, container=%s", self._container.name) try: self._main_loop() except Exception: # unknown error - fatal LOG.exception("Fatal unhandled event loop error!") raise def _main_loop(self): # Main event loop while not self._shutdown: readfds = [self._requests] writefds = [] deadline = self._scheduler._next_deadline pyngus_conn = self._connection and self._connection.pyngus_conn if pyngus_conn and self._connection.socket: if pyngus_conn.needs_input: readfds.append(self._connection) if pyngus_conn.has_output: writefds.append(self._connection) if pyngus_conn.deadline: deadline = (pyngus_conn.deadline if not deadline else min(deadline, pyngus_conn.deadline)) # force select to return in time to service the next expiring timer if deadline: _now = time.monotonic() timeout = 0 if deadline <= _now else (deadline - _now) else: timeout = None # and now we wait... try: select.select(readfds, writefds, [], timeout) except select.error as serror: if serror[0] == errno.EINTR: LOG.warning("ignoring interrupt from select(): %s", str(serror)) continue raise # assuming fatal... # Ignore the select return value - simply poll the socket for I/O. # Testing shows that polling improves latency over checking the # lists returned by select() self._requests.process_requests() self._connection.read_socket() if pyngus_conn and pyngus_conn.deadline: _now = time.monotonic() if pyngus_conn.deadline <= _now: pyngus_conn.process(_now) self._connection.write_socket() self._scheduler._process() # run any deferred requests LOG.info("eventloop thread exiting, container=%s", self._container.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp1_driver/opts.py0000664000175000017500000002513700000000000025347 0ustar00zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg amqp1_opts = [ cfg.StrOpt('container_name', deprecated_group='amqp1', help='Name for the AMQP container. must be globally unique.' ' Defaults to a generated UUID'), cfg.IntOpt('idle_timeout', default=0, # disabled deprecated_group='amqp1', help='Timeout for inactive connections (in seconds)'), cfg.BoolOpt('trace', default=False, deprecated_group='amqp1', help='Debug: dump AMQP frames to stdout'), cfg.BoolOpt('ssl', default=False, help=("Attempt to connect via SSL. If no other ssl-related " "parameters are given, it will use the system's " "CA-bundle to verify the server's certificate.")), cfg.StrOpt('ssl_ca_file', default='', deprecated_group='amqp1', help="CA certificate PEM file used to verify the server's" ' certificate'), cfg.StrOpt('ssl_cert_file', default='', deprecated_group='amqp1', help='Self-identifying certificate PEM file' ' for client authentication'), cfg.StrOpt('ssl_key_file', default='', deprecated_group='amqp1', help='Private key PEM file used to sign ssl_cert_file' ' certificate (optional)'), cfg.StrOpt('ssl_key_password', deprecated_group='amqp1', secret=True, help='Password for decrypting ssl_key_file (if encrypted)'), cfg.BoolOpt('ssl_verify_vhost', default=False, help="By default SSL checks that the name in the server's" " certificate matches the hostname in the transport_url. In" " some configurations it may be preferable to use the virtual" " hostname instead, for example if the server uses the Server" " Name Indication TLS extension (rfc6066) to provide a" " certificate per virtual host. Set ssl_verify_vhost to True" " if the server's SSL certificate uses the virtual host name" " instead of the DNS name."), cfg.StrOpt('sasl_mechanisms', default='', deprecated_group='amqp1', help='Space separated list of acceptable SASL mechanisms'), cfg.StrOpt('sasl_config_dir', default='', deprecated_group='amqp1', help='Path to directory that contains the SASL configuration'), cfg.StrOpt('sasl_config_name', default='', deprecated_group='amqp1', help='Name of configuration file (without .conf suffix)'), cfg.StrOpt('sasl_default_realm', default='', help='SASL realm to use if no realm present in username'), # Network connection failure retry options cfg.IntOpt('connection_retry_interval', default=1, min=1, help='Seconds to pause before attempting to re-connect.'), cfg.IntOpt('connection_retry_backoff', default=2, min=0, help='Increase the connection_retry_interval by this many' ' seconds after each unsuccessful failover attempt.'), cfg.IntOpt('connection_retry_interval_max', default=30, min=1, help='Maximum limit for connection_retry_interval' ' + connection_retry_backoff'), # Message send retry and timeout options cfg.IntOpt('link_retry_delay', default=10, min=1, help='Time to pause between re-connecting an AMQP 1.0 link that' ' failed due to a recoverable error.'), cfg.IntOpt('default_reply_retry', default=0, min=-1, help='The maximum number of attempts to re-send a reply message' ' which failed due to a recoverable error.'), cfg.IntOpt('default_reply_timeout', default=30, min=5, help='The deadline for an rpc reply message delivery.'), cfg.IntOpt('default_send_timeout', default=30, min=5, help='The deadline for an rpc cast or call message delivery.' ' Only used when caller does not provide a timeout expiry.'), cfg.IntOpt('default_notify_timeout', default=30, min=5, help='The deadline for a sent notification message delivery.' ' Only used when caller does not provide a timeout expiry.'), # Sender link cache maintenance: cfg.IntOpt('default_sender_link_timeout', default=600, min=1, help='The duration to schedule a purge of idle sender links.' ' Detach link after expiry.'), # Addressing: cfg.StrOpt('addressing_mode', default='dynamic', help="Indicates the addressing mode used by the driver.\n" "Permitted values:\n" "'legacy' - use legacy non-routable addressing\n" "'routable' - use routable addresses\n" "'dynamic' - use legacy addresses if the message bus does not" " support routing otherwise use routable addressing"), cfg.BoolOpt('pseudo_vhost', default=True, help="Enable virtual host support for those message buses" " that do not natively support virtual hosting (such as" " qpidd). When set to true the virtual host name will be" " added to all message bus addresses, effectively creating" " a private 'subnet' per virtual host. Set to False if the" " message bus supports virtual hosting using the 'hostname'" " field in the AMQP 1.0 Open performative as the name of the" " virtual host."), # Legacy addressing customization: cfg.StrOpt('server_request_prefix', default='exclusive', deprecated_group='amqp1', help="address prefix used when sending to a specific server"), cfg.StrOpt('broadcast_prefix', default='broadcast', deprecated_group='amqp1', help="address prefix used when broadcasting to all servers"), cfg.StrOpt('group_request_prefix', default='unicast', deprecated_group='amqp1', help="address prefix when sending to any server in group"), # Routable addressing customization: # # Addresses a composed of the following string values using a template in # the form of: # $(address_prefix)/$(*cast)/$(exchange)/$(topic)[/$(server-name)] # where *cast is one of the multicast/unicast/anycast values used to # identify the delivery pattern used for the addressed message cfg.StrOpt('rpc_address_prefix', default='openstack.org/om/rpc', help="Address prefix for all generated RPC addresses"), cfg.StrOpt('notify_address_prefix', default='openstack.org/om/notify', help="Address prefix for all generated Notification addresses"), cfg.StrOpt('multicast_address', default='multicast', help="Appended to the address prefix when sending a fanout" " message. Used by the message bus to identify fanout" " messages."), cfg.StrOpt('unicast_address', default='unicast', help="Appended to the address prefix when sending to a" " particular RPC/Notification server. Used by the message bus" " to identify messages sent to a single destination."), cfg.StrOpt('anycast_address', default='anycast', help="Appended to the address prefix when sending to a group of" " consumers. Used by the message bus to identify messages that" " should be delivered in a round-robin fashion across" " consumers."), cfg.StrOpt('default_notification_exchange', help="Exchange name used in notification addresses.\n" "Exchange name resolution precedence:\n" "Target.exchange if set\n" "else default_notification_exchange if set\n" "else control_exchange if set\n" "else 'notify'"), cfg.StrOpt('default_rpc_exchange', help="Exchange name used in RPC addresses.\n" "Exchange name resolution precedence:\n" "Target.exchange if set\n" "else default_rpc_exchange if set\n" "else control_exchange if set\n" "else 'rpc'"), # Message Credit Levels cfg.IntOpt('reply_link_credit', default=200, min=1, help='Window size for incoming RPC Reply messages.'), cfg.IntOpt('rpc_server_credit', default=100, min=1, help='Window size for incoming RPC Request messages'), cfg.IntOpt('notify_server_credit', default=100, min=1, help='Window size for incoming Notification messages'), # Settlement control cfg.MultiStrOpt('pre_settled', default=['rpc-cast', 'rpc-reply'], help="Send messages of this type pre-settled.\n" "Pre-settled messages will not receive acknowledgement\n" "from the peer. Note well: pre-settled messages may be\n" "silently discarded if the delivery fails.\n" "Permitted values:\n" "'rpc-call' - send RPC Calls pre-settled\n" "'rpc-reply'- send RPC Replies pre-settled\n" "'rpc-cast' - Send RPC Casts pre-settled\n" "'notify' - Send Notifications pre-settled\n") ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst0000664000175000017500000017266100000000000034077 0ustar00zuulzuul00000000000000############################## Oslo.messaging AMQP 1.0 Driver ############################## :Date: $Date: 2016-08-02 $ :Revision: $Revision: 0.04 $ Introduction ============ This document describes the architecture and implementation of the oslo.messaging AMQP 1.0 driver. The AMQP 1.0 driver provides an implementation of the oslo.messaging base driver service interfaces that map client application RPC and Notify methods "onto" the operation of an AMQP 1.0 protocol messaging bus. The blueprint for the original driver can be found here [1]_ and the original implementation is described in [2]_. The feature specification for the updates to the AMQP 1.0 driver for the OpenStack Newton release can be found here [3]_ The driver effectively hides the details of the AMQP 1.0 protocol transport and message processing from the client applications. The Pyngus messaging framework [4]_ built on the QPID Proton engine [5]_ provides a callback-based API for message passing. The driver implementation is comprised of the callback "handlers" that drive the messaging APIs to connect to the message bus, subscribe servers, send and receive messages. :: +------------+ +------------+ +-------------+ +-------------+ | | | | | | | | OpenStack | RPC Client | | RPC Server | | Notify | | Notify | Application | | | | | Client | | Server | +------------+ +------------+ +-------------+ +-------------+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +-----------------------------------------------------------+ | Oslo.Messaging "Base Driver Interface" | Oslo Messaging +-----------------------------------------------------------+ Driver | Oslo.Messaging AMQP 1.0 Driver | +-----------------------------------------------------------+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +-----------------------------------------------------------+ | Pyngus Messaging Framework | +-----------------------------------------------------------+ | QPID Proton Library | AMQP 1.0 +-----------------------------------------------------------+ Protocol | AMQP 1.0 Protocol | Exchange +-----------------------------------------------------------+ | TCP/IP Network Layer | +-----------------------------------------------------------+ Development View ================ Code Base --------- The AMQP 1.0 driver source code is maintained in the OpenStack oslo.messaging repository [7]_. The driver implementation, tests and user guide are located in the sub-directories of the repository. :: ├── doc │ └── source │ ├── AMQP1.0.rst ├── oslo_messaging ├── _drivers │ ├── amqp1_driver │ │ ├── addressing.py │ │ ├── controller.py │ │ ├── eventloop.py │ │ ├── opts.py │ ├── impl_amqp1.py ├── tests ├── drivers ├── test_amqp_driver.py +-----------------+----------------------------------------------------+ |File | Content | +=================+====================================================+ |doc/ |The AMQP 1.0 driver user guide details | |source/ |prerequisite, configuration and platform deployment | |AMQP1.0.rst |considerations. | | | | +-----------------+----------------------------------------------------+ |_drivers/ |This file provides the oslo.messaging driver entry | |impl_amqp1.py |points for the AMQP 1.0 driver. The file provides | | |implementations for the base.RpcIncomingMessage, | | |base.PollStyleListener and base.BaseDriver oslo | | |messaging entities. | +-----------------+----------------------------------------------------+ |_drivers/ |This file provides a set of utilities that translate| |amqp1_driver/ |a target address to a well-formed AMQP 1.0 address. | |addressing.py | | | | | +-----------------+----------------------------------------------------+ |_drivers/ |The controller manages the interface between the | |amqp1_driver/ |driver and the messaging service protocol exchange. | |controller.py | | | | | +-----------------+----------------------------------------------------+ |_drivers/ |This module provides a background thread that | |amqp1_driver/ |handles scheduled messaging operations. All | |eventloop.py |protocol specific exchanges are executed on this | | |background thread. | +-----------------+----------------------------------------------------+ |_drivers/ |This file manages the AMQP 1.0 driver configuration | |amqp1_driver/ |options (oslo_messaging_amqp). | |opts.py | | | | | +-----------------+----------------------------------------------------+ |tests/ |This file contains a set of functional tests that | |drivers/ |target the capabilities of the driver. A message | |test_amqp_driver |intermediary is included to emulate the full | | |messaging protocol exchanges. | +-----------------+----------------------------------------------------+ Deployment ========== The Oslo Messaging AMQP 1.0 driver is deployed on each node of the OpenStack infrastructure where one or more OpenStack services will be deployed. :: Node Node +--------------------------------+ +-----------------------------------+ | +-------------+ | | +--------------+ +--------------+ | | | | | | | | | | | | | OpenStack | | | | OpenStack | | OpenStack | | | | Service | | | | Service | | Service | | | | | | | | | | | | | +-------------+ | | +--------------+ +--------------+ | | | Oslo | | | | Oslo | | Oslo | | | | Messaging | | | | Messaging | | Messaging | | | +------------+ +-------------+ | | +--------------+ +--------------+ | | | AMQP 1.0 | | AMQP 1.0 | | | | AMQP 1.0 | | AMQP 1.0 | | | |Intermediary| | Driver | | | | Driver | | Driver | | | +------------+ +-------------+ | | +--------------+ +--------------+ | | +----------------------------+ | | +-------------------------------+ | | | TCP/IP | | | | TCP/IP | | | | Stack | | | | Stack | | | +----------------------------+ | | +-------------------------------+ | +--------------------------------+ +-----------------------------------+ ^ ^ ^ ^ | | | | | | Public Network | | +----------------------v-----------------------------------------v------------+ v Internal Network v +-----------------------------------------------------------------------------+ The configuration of each OpenStack service must provide the transport information that indicates to the oslo messaging layer that the AMQP 1.0 driver is to be instantiated for the back-end. During instantiation of the driver, a connection is established from the driver to an AMQP 1.0 intermediary that provides the messaging bus capabilities. The intermediary can be co-located on nodes that are running OpenStack services or can be located on separate stand-alone nodes in the control plane. The driver architecture is intended to support any messaging intermediary (e.g. broker or router) that implements version 1.0 of the AMQP protocol. Support for additional classes of intermediaries might require changes to driver configuration parameters and addressing syntax but should not otherwise require changes to the driver architecture. Driver Structure ================ The functionality of the AMQP 1.0 driver is implemented across a number of components that encapsulate the mapping of the driver activities onto the AMQP protocol exchange. The *Controller* implements the primary functional logic for the driver and serves as the interface between the driver entry points ( *Proton Driver* ) and the I/O operations associated with sending and receiving messages on links attached to the message bus. Each sending or receiving link is associated with a specific driver activity such as sending an RPC Call/Cast or Notify message, receiving an RPC reply message, or receiving an RPC or Notify server request. :: _______________________ / / / Application / / (OpenStack) / /______________________/ | XXXXXXXXXXXXXXXXXXXXXXX|XXXXXXXXXXXXXXXXXXXXXXXXXXXXX | +----------+ +-----------| Proton | V | Driver | +-------+ +----------+ | Tasks | | +-------+ +------------+ +--------->| Controller | ----| |---- / +------------+ \ / | \ / | \ +---------+ +---------+ +---------+ | Sender |<--| Replies | | Server | | | | | | | +---------+ +---------+ +---------+ | | | | +---------+ +---------+ | | Proton | | Proton | | |Listener | |Listener | | +---------+ +---------+ | | | XXXXXXXXX|XXXXXXXXXXXXX|XXXXXXXXXXXXXXX|XXXXXXXXXXXXX | | | +--------+ +--------+ +--------+ | Send | | Receive| | Receive| | Link | | Link | | Link | +--------+ +--------+ +--------+ Task Orchestration ------------------ The AMQP 1.0 driver maintains a thread for processing protocol events and timers. Therefore, the driver must orchestrate and synchronize requests from the client applications with this internal thread. The *Proton Driver* will act as a proxy for each client request and constructs a task request object on the caller's thread via the *Controller*. The task request object contains the necessary information to execute the desired method on the driver invocation thread of control. This method is executed synchronously - the client thread pends until the driver thread completes processing the task. The unique task objects provided for driver thread invocation include: * Subscribe Task * Send Task (for RPC Cast or Notify) * RPC Call Task * RPC Reply Task * Message Disposition Task :: +------------------------+ +-------------------------------+ | Client Thread | | Driver Thread | | +--------+ +---------+ | | +------+ +--------+ +-------+ | | |Proton | |Control | | | |Event | |Control | |Pyngus | | | |Driver | |(-ler) | | | |Loop | |(-ler) | |Frmwrk | | | +---+----+ +----+----+ | | +---+--+ +---+----+ +---+---+ | | |create | | | | | | | | |task() | | | | | | | | |---------->| | | | | | | | |add | | | | | | | | |task() | | Request | | | | | | |---------->| | Queue | | | | | | | | enq | +------+ | deq | | | | | | |------|---> |||||+--|---->| exec() | | | | | | | +------+ | |------->| | | | | | | | | |----------|-+ | | | wait() | | | | | Protocol | | | | #-----------|------|------+ | | | Exchange | | | | # | | V | | | | | | | # | | +-----+ | | set() |<---------|-+ | | # | | |Event|<--------|--------| | | | # | | | | | | | | | | # | | +-----+ | | | | | | # | | | | | | | | | #<----------|------|------+ | | | | | | | | | | | | | | | + + | | + + + | | | | | | | | | +------------------------+ +-------------------------------+ Scheduling - Execution ^^^^^^^^^^^^^^^^^^^^^^ Following the method task construction, the task is added to the *Controller* queue of requests for execution. Following the placement of the task on this queue, the caller will wait for the execution to complete (or possibly timeout or raise an exception). The eventloop running in its own thread will dequeue the task request and invoke the corresponding method on the *Controller* servant using the information stored in the task request object retrieved. The calls executed on this eventloop thread via the *Controller* perform all the protocol specific intelligence required for the pyngus framework. In addition to the target method invocation, the eventloop may call on the request object for message communication state changes or other indications from the peer. :: Request +--------------------------------------------+ +----------+ Tasks |Client Thread /\ | | | | * * * * * * / v | | + V + listen() | * * * * * * | | |---| -------->| * Init *-->* Schedule *-->* Wait * | | |---| | * * * * * * | | |---| | * * * * * * | | +_|_+ | * * * *\ * * | | V | +------------------|-->| +--------------+ +--------------------------------------------+ | | Eventloop | | | * * | +--------------------------------------------+ | | * * | |Client Thread /\ | | | * Execute * | | * * * * * * / v | | | * * | call() | * * * * * * | | | * * | -------->| * Init *-->* Schedule *-->* Wait * | | | ^ * * \ | | * * * * * * | | | / \ | | * * * * * * | | | / / | | * * * *\ * * | | | \ / | | +------------------|-->| | \ * *v | +--------------------------------------------+ | | * * | o | | * Protocol * | o | | * Exchange * | o | | * * | +--------------------------------------------+ | | * * | |Client Thread /\ | | +--------------+ | * * * * * * / v | | cast() | * * * * * * | | -------->| * Init *-->* Schedule *-->* Wait * | | | * * * * * * | | | * * * * * * | | | * * * *\ * * | | | +------------------|--> +--------------------------------------------+ Completion ^^^^^^^^^^ After carrying out the messaging protocol exchange for the requested task or upon a timeout/exception condition, the eventloop thread will wake-up the callers thread to indicate the task completion. Use Scenarios ============= The primary use scenarios for the AMQP 1.0 Driver correspond to the activities supported by the oslo messaging base driver interface. These activities include the ability to subscribe RPC and Notify servers (referred to as "Servers" in the graphics) as well the ability to send RPC (cast and call) messages and Notification messages into the control plane infrastructure. Following RPC and Notify server processing (e.g. dispatch to the application) the ability to indicate the final disposition of the message is supported and mapped onto the message delivery and settlement capabilities of the AMQP messaging bus. The composition of the AMQP driver and its dynamic behaviors is defined by the support of these primary activities. Load Driver ----------- The operational life-cycle of the AMQP 1.0 driver begins when the oslo messaging loads and instantiates the driver instance for use by an application. To complete this activity, the driver will retrieve the oslo_messaging_amqp configuration options in order to define the driver's run time behaviors. The transport URL specifier provided will be used by the driver to create a connection to the AMQP 1.0 messaging bus. The transport URL is of the form amqp://user:pass@host1:port[,hostN:portN] Where the transport scheme specifies **amqp** as the back-end. It should be noted that oslo.messaging is deprecating the discrete host, port and auth configuration options [6]_. The driver provides the capability to transform the "Target" provided by an application to an addressing format that can be associated to the sender and receive links that take part in the AMQP protocol exchange. :: load()---+ \ ----------- \ +--- Transport > * * | ----------- * *<---+ * Prepare * * Driver * * * * * ---------- | Cfg Opts | ----------\ | \ v v * * * * * Retrieve * * Config * * * * * | | v * * * Start * * Protocol * * Thread * * * * * | | v * * +--------------+ * Connect* | AMQP | * to *<----------->| Protocol | * Message * | Exchange | * Bus * +--------------+ * * \ | \ | \ ------------ v +-----> Connection --+ * * ------------ | * * | * Address *<--------------------+ * Factory * * * * * When the AMQP 1.0 driver connects to the messaging bus, it will identify the intermediary that it is connected to (e.g. broker or router). Based on the intermediary type, the driver will dynamically select an addressing syntax that is optimal for operation in a router mesh or a syntax that is appropriate for broker backed queues or topics. Subscribe Server ---------------- The AMQP 1.0 driver maintains a set of (RPC or Notification) servers that are created via the subscribe server activity. For each server, the driver will create and attach a set of addresses for the target that corresponds to the server endpoint for an AMQP protocol exchange. A unique *ProtonListener* (e.g. AMQP 1.0 Receiver Link) is instantiated for each server subscription and the driver will attach event handlers to perform message transport performatives for the link. The driver maintains a single incoming queue that messages from all attached links will be placed upon. :: listen() + \ \ * * \ * * +> * Create * * Listener* * * * * \ ---------- -------- | +-------> Incoming Target -+ | / ---------- -------- \ | +----+ \ v / v * * v * * * Create * * Server * * *\ * * \ ---------- | \ ----------- Connection | +------> Addresses ----------\ | /----------- \ v / v * * / * *<------+ * Attach * * Links * * * * * | | v +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ Send Message ------------ The AMQP 1.0 driver provides the ability to send messages (e.g. RPC Call/Cast or Notify) to a target specified by a client application. The driver maintains a cache of senders corresponding to each unique target that is referenced across the driver life-cycle. The driver maintains a single receiver link that will be the incoming link for all RPC reply messages received by the driver. Prior to sending an RPC call message that expects a reply, the driver will allocate a unique correlation identifier for inclusion in the call message. The driver will also set the message's reply-to field to the address of the RPC reply link. This correlation identifier will appear in the RPC reply message and is used to deliver the reply to the proper client. Prior to sending the message, the AMQP 1.0 driver will determine if the sender link is active and has enough credits for the transfer to proceed. If there are not enough credits to send the message, the driver will retain the pending message until it can be sent or times out. If there are credits to send a message, the driver will first check if there are any messages from a previous request pending to be sent. The driver will service these pending requests in FIFO order and may defer sending the current message request if credits to send run out. The AMQP 1.0 driver tracks the settlement status of all request messages sent to the messaging bus. For each message sent, the driver will maintain a count of the number of retry attempts made on the message. The driver will re-send a message that is not acknowledged up until the retry limit is reached or a send timeout deadline is reached. :: send() + -------- \ +--- Target \ * * | -------- \ * *<---+ +> * Prepare * * Request *---+ ------------- /* * +----> Request Msg <-----+ / * * ------------- | ------- <-+ | | Sender | | ------- | | v | * * ------------ | * *---------> Correlation | * Prepare * ------------ | * Response * | * * | * * | | | | | v --------- | * * +---------> Pending | * */ --------- | * Send * | * Message *\ --------- | * * +-----> Unacked <---+ | * * --------- | | | | | | | + v | / +--------------+ * * v | AMQP | * * | Protocol |-----------> * Settle * | Exchange | * Message * +--------------+ * * * * Server Receive -------------- The AMQP 1.0 driver (via subscribe) maintains a groups of links that receive messages from a set of addresses derived from the Targets associated with a Server instantiation. Messages arriving from these links are placed on the Listener's incoming queue via the Server's incoming message handler. The Listener's poll method will return the message to the application for subsequent application service dispatching. :: +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ | ^ -------- V | --------- Receiver-+ * * +------- Address -------- \ * * --------- v* Message * * Received* * * * * \ \ ----------------- +------> Incoming Message --+ * * ----------------- | * * | * Poll *<--+ | * * | | * * | | * * +-------------------------+ RPC Reply Receive ----------------- The AMQP 1.0 driver instantiates a single receiving link for the reception of all RPC reply messages. Messages received on this receiving link are routed to the originating caller using the correlation-id embedded in the header of the message itself. To ensure the responsiveness and throughput on the shared RPC receiving link, the AMQP 1.0 driver will immediately update the link transfer credits and will acknowledge the successful receipt of the RPC reply. :: +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ | ----------------- V + ------ Incoming Message * * / ----------------- * *v * Message * * Received*<---+ * * | * * \ | ------------- | \ +---- Correlation V \ ------------- * * \ * * \ --------------- * Update * +------> Reply Message * Credit * --------------- * * * * | V * * * * * Accept * * Message * * * * * | V +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ Disposition ----------- For each incoming message provided by the AMQP 1.0 driver to a server application (e.g. RPC or Notify), the delivery disposition of the incoming message can be indicated to the driver. The disposition can either be to acknowledge the message indicating the message was accepted by the application or to requeue the message indicating that application processing could not successfully take place. The driver will initiate the appropriate settlement of the message through an AMQP protocol exchange over the message bus. :: acknowledge()--------+ requeue() --------+ | | v v * * * * * * * * * Ack * * Requeue * * Message *\ ----* Message * * * \ / * * * * \ / * * | v ------------- v | | Incoming Msg | | / ------------- | | / | v v | +--------------+ | | AMQP |<----------------------------+ | Protocol | | Exchange | +--------------+ Driver Components ================= This section describes the components of the AMQP 1.0 driver implementation. For each component, its primary responsibilities and the relationships to other components are included. These relationships are derived from service requests placed upon the other components. Architectural or system-level constraints on the component (e.g. multiplicity, concurrency, parameterization) that change the depiction of the architecture are included. Additionally, any list of issues waiting resolution are described. Controller ---------- +-----------------+----------------------------------------------------+ |Component | *Controller* | +=================+====================================================+ |Responsibilities | Responsible for performing messaging-related | | | operations requested by the driver (tasks) | | | and for managing the connection to the messaging | | | service provided by the AMQP 1.0 intermediaries. | | | | | | This component provides the logic for addressing, | | | sending and receiving messages as well as managing | | | the messaging bus connection life-cycle. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Sender (pyngus.SenderEventHandler) | | | Server (pyngus.ReceiverEventHandler) | | | Replies (pyngus.ReceiverEventHandler) | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created whenever the driver is instantiated | | | in a client application process. The component | | | will terminate the driver operation when the client| | | initiates a shutdown of the driver. | | | | | | All AMQP 1.0 protocol exchanges (e.g. messaging | | | and I/O work) are done on the Eventloop driver | | | thread. This allows the driver to run | | | asynchronously from the messaging clients. | | | | | | The component supports addressing modes defined | | | by the driver configuration and through dynamic | | | inspection of the connection to the messaging | | | intermediary. | +-----------------+----------------------------------------------------+ |Issues | A cache of sender links indexed by address is | | | maintained. Currently, removal from the cache is | | | is not implemented. | +-----------------+----------------------------------------------------+ Sender ------ +-----------------+----------------------------------------------------+ |Component | *Sender* (pyngus.SenderEventHander) | +=================+====================================================+ |Responsibilities | Responsible for managing a sender link life-cycle | | | and queueing/tracking the message delivery. | | | (implementation of Pyngus.SenderEventHandle) | | | | | | Provides the capabilities for sending to a | | | particular address on the message bus. | | | | | | Provides the capability to queue (pending) | | | *SendTask* when link not active or insufficient | | | link credit capacity. | | | | | | Provides the capability to retry send following a | | | recoverable connection or link failure. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Addresser | | | Connection | | | Pyngus.SenderLink | | | SendTask | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created by the *Controller* on a client | | | caller thread and retained in a *Sender* cache. | +-----------------+----------------------------------------------------+ |Issues | Sender cache aging (see above) | +-----------------+----------------------------------------------------+ Server ------ +-----------------+----------------------------------------------------+ |Component | *Server* (pyngus.ReceiverEventHander) | +=================+====================================================+ |Responsibilities | Responsible for operations for the lifecycle of an | | | incoming queue that is used for messages received | | | from a set of target addresses. | | | | +-----------------+----------------------------------------------------+ |Collaborators | Connection | | | Pyngus.ReceiverLink | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created whenever a client application | | | subscribes a RPC or Notification server to the | | | messaging bus. When the client application closes | | | the transport, this component and its associated | | | links will be detached/closed. | | | | | | Individual receiver links are created over the | | | message bus connection for all the addresses | | | generated for the server target. | | | | | | All the receiver links share a single event | | | callback handler. | +-----------------+----------------------------------------------------+ |Issues | The credit per link is presently hard-coded. A | | | mechanism to monitor for a back-up of inbound | | | messages to back-pressure the sender is proposed. | +-----------------+----------------------------------------------------+ Replies ------- +-----------------+----------------------------------------------------+ |Component | *Replies* (pyngus.ReceiverEventHander) | +=================+====================================================+ |Responsibilities | Responsible for the operations and managing | | | the life-cycle of the receiver link for all RPC | | | reply messages. A single instance of an RPC reply | | | link is maintained for the driver. | +-----------------+----------------------------------------------------+ |Collaborators | Connection | | | Pyngus.ReceiverLink | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | The reply link is created when the connection to | | | the messaging bus is activated. | | | | | | The origination of RPC calls is inhibited until | | | the replies link is active. | | | | | | Message are routed to the originator's incoming | | | queue using the correlation-id header that is | | | contained in the response message. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ ProtonDriver ------------ +-----------------+----------------------------------------------------+ |Component | *ProtonDriver* | +=================+====================================================+ |Responsibilities | Responsible for providing the oslo.Messaging | | | BaseDriver implementation. | | | | | | Provides the capabilities to send RPC and | | | Notification messages and create subscriptions for | | | the application. | | | | | | Each operation generates a task that is scheduled | | | for execution on the *Controller* eventloop | | | thread. | | | | | | The calling thread blocks until execution completes| | | or timeout. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | | | RPCCallTask | | | SendTask | | | SubscribeTask | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created whenever the oslo.messaging AMQP 1.0 | | | driver is loaded by an application (process). | | | | | | The component manages the life-cycle of the | | | *Controller* component. Tasks may be created but | | | will not be processed until the Controller | | | connection to the messaging service completes. | | | | | | There are separate timeout values for RPC Send, | | | Notify Send, and RPC Call Reply. | +-----------------+----------------------------------------------------+ |Issues | | | | The unmarshalling of an RPC response could cause | | | an exception/failure and should be optimally | | | communicated back up to the caller. | +-----------------+----------------------------------------------------+ ProtonIncomingMessage --------------------- +-----------------+----------------------------------------------------+ |Component | *ProtonIncomingMessage* | +=================+====================================================+ |Responsibilities | Responsible for managing the life-cycle of an | | | incoming message received on a RPC or notification | | | Server link. | | | | | | Provides the capability to set the disposition of | | | the incoming message as acknowledge (e.g. settled) | | | or requeue. | | | | | | Provides the capability to marshal and send the | | | reply to an RPC Call message. | | | | +-----------------+----------------------------------------------------+ |Collaborators | Controller | | | ProtonListener | | | MessageDispositionTask | | | SendTask | | | | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | A ProtonListener returns this component from the | | | poll of the incoming queue. | | | | | | The message reply_to and id fields of the incoming | | | message are used to generate the target for the | | | RPC reply message. | | | | | | The RPC reply and message disposition operations | | | are scheduled for execution on the Controller | | | eventoloop thread. The caller on the component is | | | blocked until task completion (or timeout). | +-----------------+----------------------------------------------------+ |Issues | The ProtonIncomingMessage is used for both RPC | | | and Notification Server instances. Conceptually, | | | a Notification Server should not schedule a reply | | | and a RPC Server should not schedule a message | | | requeue. Subclassing base.IncomingMessage for | | | Notifications and base.RpcIncomingMessage for RPC | | | could be a consideration. | +-----------------+----------------------------------------------------+ ProtonListener -------------- +-----------------+----------------------------------------------------+ |Component | *ProtonListener* | +=================+====================================================+ |Responsibilities | Responsible for providing the oslo.Messaging | | | base.PollStyleListener implementation. | | | | | | Provides the capabilities to manage the queue of | | | incoming messages received from the messaging links| | | | | | Returns instance of ProtonIncomingMessage to | | | to Servers | +-----------------+----------------------------------------------------+ |Collaborators | | | | Queue | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | An instance is created for each subscription | | | request (e.g. RPC or Notification Server). | | | | | | The Controller maintains a map of Servers indexed | | | by each specific ProtonListener identifier (target)| +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ SubscribeTask ------------- +-----------------+----------------------------------------------------+ |Component | *SubscribeTask* | +=================+====================================================+ |Responsibilities | Responsible for orchestrating a subscription to a | | | given target. | | | | | | Provides the capability to prepare and schedule | | | the subscription call on the Controller eventloop | | | thread. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created for each ProtonDriver subscription | | | request (e.g. listen or listen_for_notifications). | | | | | | The task is prepared and scheduled on the caller's | | | thread. The subscribe operation is executed on the | | | Controller's eventloop thread. The task completes | | | once the subscription has been established on the | | | message bus. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ SendTask -------- +-----------------+----------------------------------------------------+ |Component | *SendTask* | +=================+====================================================+ |Responsibilities | Responsible for sending a message to a given | | | target. | | | | | | Provides the capability to prepare and schedule | | | the send call on the Controller eventloop thread. | | | | | | Provides the ability to be called by Controller | | | eventloop thread to indicate the settlement of the | | | message (e.g. acknowledge or nack). | | | | | | Provides the ability to be called by Controller | | | eventloop thread upon expiry of send timeout | | | duration or general message delivery failure. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created for each ProtonDriver "RPC Cast" or | | | "Notify" send request. The component is destroyed | | | when the message transfer has reached a terminal | | | state (e.g. settled). | | | | | | The task is prepared and scheduled on the caller's | | | thread. The send operation is executed on the | | | Controller's eventloop thread. | | | | | | All retry, timeout and acknowledge operations are | | | performed on Controller eventloop thread and | | | indicated back to the caller thread. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ RPCCallTask ----------- +-----------------+----------------------------------------------------+ |Component | *RPCCallTask* | +=================+====================================================+ |Responsibilities | Responsible for sending an RPC Call message to a | | | given target. | | | | | | Provides all the capabilities derived from the | | | parent SendTask component. | | | | | | Provides the additional capability to prepare for | | | the RPC Call response message that will be returned| | | on the senders reply link. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | | | Sender | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created for each ProtonDriver "RPC Call" | | | send request. It is destroyed once the RPC | | | exchanged has reached its terminal state. | | | | | | The task is prepared and scheduled on the caller's | | | thread. The send operation is executed on the | | | Controller's eventloop thread. | | | | | | The Controller manages a single receiving link for | | | all RPC reply messages. Message are routed | | | using the correlation-id header in the response | | | message. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ MessageDispositionTasks ----------------------- +-----------------+----------------------------------------------------+ |Component | *MessageDispositionTask* | +=================+====================================================+ |Responsibilities | Responsible for updating the message disposition | | | for ProtonIncomingMessage. | | | | | | Provides the ability to acknowledge or requeue the | | | message according to application determination. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | | | ProtonIncomingMessage | | | Server | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created by ProtonIncomingMessage settlement | | | calls (acknowledge or requeue). It is destroyed | | | once the disposition is updated in the Proton | | | protocol engine. | | | | | | the task is prepared and scheduled on the caller's | | | thread. The disposition operation is a function | | | closure on the target server, receiver link and | | | delivery handle for the message received on the | | | Server receiver call back. The closure is executed | | | on the Controller's eventloop thread. | | | | | | The settlement of RPC responses is automatic and | | | not under application control. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ Service and Operational Qualities ================================= This section describes the primary service and operational qualities that are relevant to the driver architecture and implementation. These non-functional factors define the behavior of the driver implementation (e.g. limits and capacities). These behaviors can be generally categorized as being due to a design time (e.g. limit enforced by implementation) or a run time (e.g. limit due to environment, resources, etc.) constraint. The full detail and measures for these qualities is outside the scope of this document but should be included in any performance and scalability analysis of the driver implementation. +-------------+--------------------------------------------+------------+ | Quality | Description | Limit | +-------------+--------------------------------------------+------------+ | Servers | The number of RPC or Notify servers that | Environment| | | the driver will concurrently subscribe to | | | | the messaging bus (e.g. Listeners) | | +-------------+--------------------------------------------+------------+ | Subscription| The maximum rate at which servers can be | Environment| | Rate | subscribed and attached to the message bus | | +-------------+--------------------------------------------+------------+ | Senders | The number of unique Targets that can | Environment| | | be concurrently defined for the destination| | | | of RPC or Notify message transfer | | +-------------+--------------------------------------------+------------+ | Pending | The number of messages that the driver | Environment| | Sends | will queue while waiting for link | | | | availability or flow credit | | +-------------+--------------------------------------------+------------+ | Sends | The number of concurrent unacked messages | Environment| | Outstanding | the driver will send | | | | | | +-------------+--------------------------------------------+------------+ | Server Link | The number of message credits an RPC or | Design | | Credits | Notification server will issue | | | | | | +-------------+--------------------------------------------+------------+ | RPC Reply | The number of RPC reply message credits | Design | | Link Credits| the driver will issue | | | | | | +-------------+--------------------------------------------+------------+ | Message | The rate that the driver will transfer | Environment| | Transfer | requests to the message bus | | | Rate | | | +-------------+--------------------------------------------+------------+ | Message | The rate of transfer for the message | Environment| | Data | body "payload" | | | Throughput | | | +-------------+--------------------------------------------+------------+ | Tasks | The number of concurrent client requests | Design | | Outstanding | that can be queued for driver thread | | | | processing. | | +-------------+--------------------------------------------+------------+ | Message | The number of attempts the driver will | Design | | Retries | make to send a message | | | | | | +-------------+--------------------------------------------+------------+ | Transport | The number of Transport Hosts that can | Environment| | Hosts | be specified for connection management | | | | (e.g. selection and failover) | | +-------------+--------------------------------------------+------------+ References ========== .. [1] https://blueprints.launchpad.net/oslo.messaging/+spec/amqp10-driver-implementation .. [2] https://opendev.org/openstack/oslo-specs/src/branch/master/specs/juno/amqp10-driver-implementation.rst .. [3] https://review.opendev.org/#/c/314603/ .. [4] https://github.com/kgiusti/pyngus .. [5] https://github.com/apache/qpid-proton .. [6] https://review.opendev.org/#/c/317285/ .. [7] https://opendev.org/openstack/oslo.messaging ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/amqpdriver.py0000664000175000017500000010105600000000000024135 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import queue import threading import time import uuid import cachetools from oslo_concurrency import lockutils from oslo_utils import eventletutils from oslo_utils import timeutils import oslo_messaging from oslo_messaging._drivers import amqp as rpc_amqp from oslo_messaging._drivers import base from oslo_messaging._drivers import common as rpc_common from oslo_messaging import MessageDeliveryFailure __all__ = ['AMQPDriverBase'] LOG = logging.getLogger(__name__) # Minimum/Maximum sleep between a poll and ack/requeue # Maximum should be small enough to not get rejected ack, # minimum should be big enough to not burn the CPU. ACK_REQUEUE_EVERY_SECONDS_MIN = 0.001 ACK_REQUEUE_EVERY_SECONDS_MAX = 5.0 class QManager(object): """Queue Manager to build queue name for reply (and fanout) type. This class is used only when use_queue_manager is set to True in config file. It rely on a shared memory accross processes (reading/writing data to /dev/shm/xyz) and oslo_concurrency.lockutils to avoid assigning the same queue name twice (or more) to different processes. The original idea of this queue manager was to avoid random queue names, so on service restart, the previously created queues can be reused, avoiding deletion/creation of queues on rabbitmq side (which cost a lot at scale). """ def __init__(self, hostname, processname): # We will use hostname and processname in queue names to help identify # them easily. # This is also ensuring consistency between service restart. self.hostname = hostname self.processname = processname # This is where the counter is kept self.file_name = '/dev/shm/%s_%s_qmanager' % (self.hostname, # nosec self.processname) # We use the process group to restart the counter on service restart self.pg = os.getpgrp() def get(self): lock_name = 'oslo_read_shm_%s_%s' % (self.hostname, self.processname) @lockutils.synchronized(lock_name, external=True) def read_from_shm(): # Grab the counter from shm # This function is thread and process safe thanks to lockutils try: with open(self.file_name, 'r') as f: pg, c = f.readline().split(':') pg = int(pg) c = int(c) except (FileNotFoundError, ValueError): pg = self.pg c = 0 # Increment the counter if pg == self.pg: c += 1 else: # The process group changed, maybe service restarted? # Start over the counter c = 1 # Write the new counter with open(self.file_name, 'w') as f: f.write(str(self.pg) + ':' + str(c)) return c c = read_from_shm() return self.hostname + ":" + self.processname + ":" + str(c) class MessageOperationsHandler(object): """Queue used by message operations to ensure that all tasks are serialized and run in the same thread, since underlying drivers like kombu are not thread safe. """ def __init__(self, name): self.name = "%s (%s)" % (name, hex(id(self))) self._tasks = queue.Queue() self._shutdown = eventletutils.Event() self._shutdown_thread = threading.Thread( target=self._process_in_background) self._shutdown_thread.daemon = True def stop(self): self._shutdown.set() def process_in_background(self): """Run all pending tasks queued by do() in an thread during the shutdown process. """ self._shutdown_thread.start() def _process_in_background(self): while not self._shutdown.is_set(): self.process() time.sleep(ACK_REQUEUE_EVERY_SECONDS_MIN) def process(self): "Run all pending tasks queued by do()." while True: try: task = self._tasks.get(block=False) except queue.Empty: break task() def do(self, task): "Put the task in the queue." self._tasks.put(task) class AMQPIncomingMessage(base.RpcIncomingMessage): def __init__(self, listener, ctxt, message, unique_id, msg_id, reply_q, client_timeout, obsolete_reply_queues, message_operations_handler): super(AMQPIncomingMessage, self).__init__(ctxt, message, msg_id) self.orig_msg_id = msg_id self.listener = listener self.unique_id = unique_id self.reply_q = reply_q self.client_timeout = client_timeout self._obsolete_reply_queues = obsolete_reply_queues self._message_operations_handler = message_operations_handler self.stopwatch = timeutils.StopWatch() self.stopwatch.start() def _send_reply(self, conn, reply=None, failure=None, ending=True): if not self._obsolete_reply_queues.reply_q_valid(self.reply_q, self.msg_id): return if failure: failure = rpc_common.serialize_remote_exception(failure) # NOTE(sileht): ending can be removed in N*, see Listener.wait() # for more detail. msg = {'result': reply, 'failure': failure, 'ending': ending, '_msg_id': self.msg_id} rpc_amqp._add_unique_id(msg) unique_id = msg[rpc_amqp.UNIQUE_ID] LOG.debug("sending reply msg_id: %(msg_id)s " "reply queue: %(reply_q)s " "time elapsed: %(elapsed)ss", { 'msg_id': self.msg_id, 'unique_id': unique_id, 'reply_q': self.reply_q, 'elapsed': self.stopwatch.elapsed()}) conn.direct_send(self.reply_q, rpc_common.serialize_msg(msg)) def reply(self, reply=None, failure=None): if not self.orig_msg_id: # NOTE(Alexei_987) not sending reply, if msg_id is empty # because reply should not be expected by caller side return # NOTE(sileht): return without hold the a connection if possible if not self._obsolete_reply_queues.reply_q_valid(self.reply_q, self.msg_id): return # NOTE(sileht): we read the configuration value from the driver # to be able to backport this change in previous version that # still have the qpid driver duration = self.listener.driver.missing_destination_retry_timeout timer = rpc_common.DecayingTimer(duration=duration) timer.start() while True: try: with self.listener.driver._get_connection( rpc_common.PURPOSE_SEND, ) as conn: self._send_reply(conn, reply, failure) return except oslo_messaging.MessageUndeliverable: # queue not found if timer.check_return() <= 0: self._obsolete_reply_queues.add(self.reply_q, self.msg_id) LOG.error( 'The reply %(msg_id)s failed to send after ' '%(duration)d seconds due to a missing queue ' '(%(reply_q)s). Abandoning...', { 'msg_id': self.msg_id, 'duration': duration, 'reply_q': self.reply_q}) return LOG.debug( 'The reply %(msg_id)s could not be sent due to a missing ' 'queue (%(reply_q)s). Retrying...', { 'msg_id': self.msg_id, 'reply_q': self.reply_q}) time.sleep(0.25) except rpc_amqp.AMQPDestinationNotFound as exc: # exchange not found/down if timer.check_return() <= 0: self._obsolete_reply_queues.add(self.reply_q, self.msg_id) LOG.error( 'The reply %(msg_id)s failed to send after ' '%(duration)d seconds due to a broker issue ' '(%(exc)s). Abandoning...', { 'msg_id': self.msg_id, 'duration': duration, 'exc': exc}) return LOG.debug( 'The reply %(msg_id)s could not be sent due to a broker ' 'issue (%(exc)s). Retrying...', { 'msg_id': self.msg_id, 'exc': exc}) time.sleep(0.25) def heartbeat(self): # generate a keep alive for RPC call monitoring with self.listener.driver._get_connection( rpc_common.PURPOSE_SEND, ) as conn: try: self._send_reply(conn, None, None, ending=False) except oslo_messaging.MessageUndeliverable: # internal exception that indicates queue gone - # broker unreachable. raise MessageDeliveryFailure( "Heartbeat send failed. Missing queue") except rpc_amqp.AMQPDestinationNotFound: # internal exception that indicates exchange gone - # broker unreachable. raise MessageDeliveryFailure( "Heartbeat send failed. Missing exchange") # NOTE(sileht): Those have already be ack in RpcListener IO thread # We keep them as noop until all drivers do the same def acknowledge(self): pass def requeue(self): pass class NotificationAMQPIncomingMessage(AMQPIncomingMessage): def acknowledge(self): def _do_ack(): try: self.message.acknowledge() except Exception as exc: # NOTE(kgiusti): this failure is likely due to a loss of the # connection to the broker. Not much we can do in this case, # especially considering the Notification has already been # dispatched. This *could* result in message duplication # (unacked msg is returned to the queue by the broker), but the # driver tries to catch that using the msg_id_cache. LOG.warning("Failed to acknowledge received message: %s", exc) self._message_operations_handler.do(_do_ack) self.listener.msg_id_cache.add(self.unique_id) def requeue(self): # NOTE(sileht): In case of the connection is lost between receiving the # message and requeing it, this requeue call fail # but because the message is not acknowledged and not added to the # msg_id_cache, the message will be reconsumed, the only difference is # the message stay at the beginning of the queue instead of moving to # the end. def _do_requeue(): try: self.message.requeue() except Exception as exc: LOG.warning("Failed to requeue received message: %s", exc) self._message_operations_handler.do(_do_requeue) class ObsoleteReplyQueuesCache(object): """Cache of reply queue id that doesn't exist anymore. NOTE(sileht): In case of a broker restart/failover a reply queue can be unreachable for short period the IncomingMessage.send_reply will block for 60 seconds in this case or until rabbit recovers. But in case of the reply queue is unreachable because the rpc client is really gone, we can have a ton of reply to send waiting 60 seconds. This leads to a starvation of connection of the pool The rpc server take to much time to send reply, other rpc client will raise TimeoutError because their don't receive their replies in time. This object cache stores already known gone client to not wait 60 seconds and hold a connection of the pool. Keeping 200 last gone rpc client for 1 minute is enough and doesn't hold to much memory. """ SIZE = 200 TTL = 60 def __init__(self): self._lock = threading.RLock() self._cache = cachetools.TTLCache(self.SIZE, self.TTL) def reply_q_valid(self, reply_q, msg_id): if reply_q in self._cache: self._no_reply_log(reply_q, msg_id) return False return True def add(self, reply_q, msg_id): with self._lock: self._cache.update({reply_q: msg_id}) self._no_reply_log(reply_q, msg_id) def _no_reply_log(self, reply_q, msg_id): LOG.warning("%(reply_queue)s doesn't exist, drop reply to " "%(msg_id)s", {'reply_queue': reply_q, "msg_id": msg_id}) class AMQPListener(base.PollStyleListener): use_cache = False def __init__(self, driver, conn): super(AMQPListener, self).__init__(driver.prefetch_size) self.driver = driver self.conn = conn self.msg_id_cache = rpc_amqp._MsgIdCache() self.incoming = [] self._shutdown = eventletutils.Event() self._shutoff = eventletutils.Event() self._obsolete_reply_queues = ObsoleteReplyQueuesCache() self._message_operations_handler = MessageOperationsHandler( "AMQPListener") self._current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN def __call__(self, message): ctxt = rpc_amqp.unpack_context(message) try: unique_id = self.msg_id_cache.check_duplicate_message(message) except rpc_common.DuplicateMessageError: LOG.exception("ignoring duplicate message %s", ctxt.msg_id) return if self.use_cache: self.msg_id_cache.add(unique_id) if ctxt.msg_id: LOG.debug("received message msg_id: %(msg_id)s reply to " "%(queue)s", {'queue': ctxt.reply_q, 'msg_id': ctxt.msg_id}) else: LOG.debug("received message with unique_id: %s", unique_id) self.incoming.append(self.message_cls( self, ctxt.to_dict(), message, unique_id, ctxt.msg_id, ctxt.reply_q, ctxt.client_timeout, self._obsolete_reply_queues, self._message_operations_handler)) @base.batch_poll_helper def poll(self, timeout=None): stopwatch = timeutils.StopWatch(duration=timeout).start() while not self._shutdown.is_set(): self._message_operations_handler.process() LOG.debug("Listener is running") if self.incoming: LOG.debug("Poll the incoming message with unique_id: %s", self.incoming[0].unique_id) return self.incoming.pop(0) left = stopwatch.leftover(return_none=True) if left is None: left = self._current_timeout if left <= 0: return None try: LOG.debug("AMQPListener connection consume") self.conn.consume(timeout=min(self._current_timeout, left)) except rpc_common.Timeout: LOG.debug("AMQPListener connection timeout") self._current_timeout = min(self._current_timeout * 2, ACK_REQUEUE_EVERY_SECONDS_MAX) else: self._current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN # NOTE(sileht): listener is stopped, just processes remaining messages # and operations LOG.debug("Listener is stopped") self._message_operations_handler.process() if self.incoming: LOG.debug("Poll the incoming message with unique_id: %s", self.incoming[0].unique_id) return self.incoming.pop(0) self._shutoff.set() def stop(self): self._shutdown.set() self.conn.stop_consuming() self._shutoff.wait() # NOTE(sileht): Here, the listener is stopped, but some incoming # messages may still live on server side, because callback is still # running and message is not yet ack/requeue. It's safe to do the ack # into another thread, side the polling thread is now terminated. self._message_operations_handler.process_in_background() def cleanup(self): # NOTE(sileht): server executor is now stopped, we are sure that no # more incoming messages in live, we can acknowledge # remaining messages and stop the thread self._message_operations_handler.stop() # Closes listener connection self.conn.close() class RpcAMQPListener(AMQPListener): message_cls = AMQPIncomingMessage use_cache = True def __call__(self, message): # NOTE(kgiusti): In the original RPC implementation the RPC server # would acknowledge the request THEN process it. The goal of this was # to prevent duplication if the ack failed. Should the ack fail the # request would be discarded since the broker would not remove the # request from the queue since no ack was received. That would lead to # the request being redelivered at some point. However this approach # meant that the ack was issued from the dispatch thread, not the # consumer thread, which is bad since kombu is not thread safe. So a # change was made to schedule the ack to be sent on the consumer thread # - breaking the ability to catch ack errors before dispatching the # request. To fix this we do the actual ack here in the consumer # callback and avoid the upcall if the ack fails. See # https://bugs.launchpad.net/oslo.messaging/+bug/1695746 # for all the gory details... try: message.acknowledge() except Exception as exc: LOG.warning("Discarding RPC request due to failed acknowledge: %s", exc) else: # NOTE(kgiusti): be aware that even if the acknowledge call # succeeds there is no guarantee the broker actually gets the ACK # since acknowledge() simply writes the ACK to the socket (there is # no ACK confirmation coming back from the broker) super(RpcAMQPListener, self).__call__(message) class NotificationAMQPListener(AMQPListener): message_cls = NotificationAMQPIncomingMessage class ReplyWaiters(object): def __init__(self): self._queues = {} self._wrn_threshold = 10 def get(self, msg_id, timeout): watch = timeutils.StopWatch(duration=timeout) watch.start() while not watch.expired(): try: # NOTE(amorin) we can't use block=True # See lp-2035113 return self._queues[msg_id].get(block=False) except queue.Empty: time.sleep(0.5) raise oslo_messaging.MessagingTimeout( 'Timed out waiting for a reply ' 'to message ID %s' % msg_id) def put(self, msg_id, message_data): LOG.debug('Received RPC response for msg %s', msg_id) queue = self._queues.get(msg_id) if not queue: LOG.info('No calling threads waiting for msg_id : %s', msg_id) LOG.debug(' queues: %(queues)s, message: %(message)s', {'queues': len(self._queues), 'message': message_data}) else: queue.put(message_data) def add(self, msg_id): self._queues[msg_id] = queue.Queue() queues_length = len(self._queues) if queues_length > self._wrn_threshold: LOG.warning('Number of call queues is %(queues_length)s, ' 'greater than warning threshold: %(old_threshold)s. ' 'There could be a leak. Increasing threshold to: ' '%(threshold)s', {'queues_length': queues_length, 'old_threshold': self._wrn_threshold, 'threshold': self._wrn_threshold * 2}) self._wrn_threshold *= 2 def remove(self, msg_id): del self._queues[msg_id] class ReplyWaiter(object): def __init__(self, reply_q, conn, allowed_remote_exmods): self.conn = conn self.allowed_remote_exmods = allowed_remote_exmods self.msg_id_cache = rpc_amqp._MsgIdCache() self.waiters = ReplyWaiters() self.conn.declare_direct_consumer(reply_q, self) self._thread_exit_event = eventletutils.Event() self._thread = threading.Thread(target=self.poll) self._thread.daemon = True self._thread.start() def stop(self): if self._thread: self._thread_exit_event.set() self.conn.stop_consuming() self._thread.join() self._thread = None def poll(self): current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN while not self._thread_exit_event.is_set(): try: # ack every ACK_REQUEUE_EVERY_SECONDS_MAX seconds self.conn.consume(timeout=current_timeout) except rpc_common.Timeout: current_timeout = min(current_timeout * 2, ACK_REQUEUE_EVERY_SECONDS_MAX) except Exception: LOG.exception("Failed to process incoming message, retrying..") else: current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN def __call__(self, message): # NOTE(sileht): __call__ is running within the polling thread, # (conn.consume -> conn.conn.drain_events() -> __call__ callback) # it's threadsafe to acknowledge the message here, no need to wait # the next polling message.acknowledge() incoming_msg_id = message.pop('_msg_id', None) if message.get('ending'): LOG.debug("received reply msg_id: %s", incoming_msg_id) self.waiters.put(incoming_msg_id, message) def listen(self, msg_id): self.waiters.add(msg_id) def unlisten(self, msg_id): self.waiters.remove(msg_id) @staticmethod def _raise_timeout_exception(msg_id, reply_q): raise oslo_messaging.MessagingTimeout( 'Timed out waiting for a reply %(reply_q)s ' 'to message ID %(msg_id)s.', {'msg_id': msg_id, 'reply_q': reply_q}) def _process_reply(self, data): self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] result = rpc_common.deserialize_remote_exception( failure, self.allowed_remote_exmods) else: result = data.get('result', None) ending = data.get('ending', False) return result, ending def wait(self, msg_id, timeout, call_monitor_timeout, reply_q): # NOTE(sileht): for each msg_id we receive two amqp message # first one with the payload, a second one to ensure the other # have finish to send the payload # NOTE(viktors): We are going to remove this behavior in the N # release, but we need to keep backward compatibility, so we should # support both cases for now. timer = rpc_common.DecayingTimer(duration=timeout) timer.start() if call_monitor_timeout: call_monitor_timer = rpc_common.DecayingTimer( duration=call_monitor_timeout) call_monitor_timer.start() else: call_monitor_timer = None final_reply = None ending = False while not ending: timeout = timer.check_return( self._raise_timeout_exception, msg_id, reply_q ) if call_monitor_timer and timeout > 0: cm_timeout = call_monitor_timer.check_return( self._raise_timeout_exception, msg_id, reply_q ) if cm_timeout < timeout: timeout = cm_timeout try: message = self.waiters.get(msg_id, timeout=timeout) except queue.Empty: self._raise_timeout_exception( msg_id, reply_q ) reply, ending = self._process_reply(message) if reply is not None: # NOTE(viktors): This can be either first _send_reply() with an # empty `result` field or a second _send_reply() with # ending=True and no `result` field. final_reply = reply elif ending is False: LOG.debug('Call monitor heartbeat received; ' 'renewing timeout timer') call_monitor_timer.restart() return final_reply class AMQPDriverBase(base.BaseDriver): missing_destination_retry_timeout = 0 def __init__(self, conf, url, connection_pool, default_exchange=None, allowed_remote_exmods=None): super(AMQPDriverBase, self).__init__(conf, url, default_exchange, allowed_remote_exmods) self._default_exchange = default_exchange self._connection_pool = connection_pool self._reply_q_lock = threading.Lock() self._reply_q = None self._reply_q_conn = None self._waiter = None if conf.oslo_messaging_rabbit.use_queue_manager: self._q_manager = QManager( hostname=conf.oslo_messaging_rabbit.hostname, processname=conf.oslo_messaging_rabbit.processname) else: self._q_manager = None def _get_exchange(self, target): return target.exchange or self._default_exchange def _get_connection(self, purpose=rpc_common.PURPOSE_SEND, retry=None): return rpc_common.ConnectionContext(self._connection_pool, purpose=purpose, retry=retry) def _get_reply_q(self): with self._reply_q_lock: # NOTE(amorin) Re-use reply_q when it already exists # This avoid creating too many queues on AMQP server (rabbit) if self._reply_q is not None: return self._reply_q if self._q_manager: reply_q = 'reply_' + self._q_manager.get() else: reply_q = 'reply_' + uuid.uuid4().hex LOG.debug('Creating reply queue: %s', reply_q) conn = self._get_connection(rpc_common.PURPOSE_LISTEN) self._waiter = ReplyWaiter(reply_q, conn, self._allowed_remote_exmods) self._reply_q = reply_q self._reply_q_conn = conn return self._reply_q def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, call_monitor_timeout=None, envelope=True, notify=False, retry=None, transport_options=None): msg = message reply_q = None if 'method' in msg: LOG.debug('Calling RPC method %s on target %s', msg.get('method'), target.topic) else: LOG.debug('Sending message to topic %s', target.topic) if wait_for_reply: reply_q = self._get_reply_q() msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) msg.update({'_reply_q': reply_q}) msg.update({'_timeout': call_monitor_timeout}) LOG.debug('Expecting reply to msg %s in queue %s', msg_id, reply_q) rpc_amqp._add_unique_id(msg) unique_id = msg[rpc_amqp.UNIQUE_ID] rpc_amqp.pack_context(msg, ctxt) if envelope: msg = rpc_common.serialize_msg(msg) if wait_for_reply: self._waiter.listen(msg_id) log_msg = "CALL msg_id: %s " % msg_id else: log_msg = "CAST unique_id: %s " % unique_id try: with self._get_connection(rpc_common.PURPOSE_SEND, retry) as conn: if notify: exchange = self._get_exchange(target) LOG.debug(log_msg + "NOTIFY exchange '%(exchange)s'" " topic '%(topic)s'", {'exchange': exchange, 'topic': target.topic}) conn.notify_send(exchange, target.topic, msg, retry=retry) elif target.fanout: log_msg += "FANOUT topic '%(topic)s'" % { 'topic': target.topic} LOG.debug(log_msg) conn.fanout_send(target.topic, msg, retry=retry) else: topic = target.topic exchange = self._get_exchange(target) if target.server: topic = '%s.%s' % (target.topic, target.server) LOG.debug(log_msg + "exchange '%(exchange)s'" " topic '%(topic)s'", {'exchange': exchange, 'topic': topic}) conn.topic_send(exchange_name=exchange, topic=topic, msg=msg, timeout=timeout, retry=retry, transport_options=transport_options) if wait_for_reply: result = self._waiter.wait(msg_id, timeout, call_monitor_timeout, reply_q) if isinstance(result, Exception): raise result return result finally: if wait_for_reply: self._waiter.unlisten(msg_id) def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, call_monitor_timeout=None, retry=None, transport_options=None): return self._send(target, ctxt, message, wait_for_reply, timeout, call_monitor_timeout, retry=retry, transport_options=transport_options) def send_notification(self, target, ctxt, message, version, retry=None): return self._send(target, ctxt, message, envelope=(version == 2.0), notify=True, retry=retry) def listen(self, target, batch_size, batch_timeout): conn = self._get_connection(rpc_common.PURPOSE_LISTEN) listener = RpcAMQPListener(self, conn) conn.declare_topic_consumer(exchange_name=self._get_exchange(target), topic=target.topic, callback=listener) conn.declare_topic_consumer(exchange_name=self._get_exchange(target), topic='%s.%s' % (target.topic, target.server), callback=listener) conn.declare_fanout_consumer(target.topic, listener) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): conn = self._get_connection(rpc_common.PURPOSE_LISTEN) listener = NotificationAMQPListener(self, conn) for target, priority in targets_and_priorities: conn.declare_topic_consumer( exchange_name=self._get_exchange(target), topic='%s.%s' % (target.topic, priority), callback=listener, queue_name=pool) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def cleanup(self): if self._connection_pool: self._connection_pool.empty() self._connection_pool = None with self._reply_q_lock: if self._reply_q is not None: self._waiter.stop() self._reply_q_conn.close() self._reply_q_conn = None self._reply_q = None self._waiter = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/base.py0000664000175000017500000006243100000000000022700 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import threading import uuid from oslo_config import cfg from oslo_utils import excutils from oslo_utils import timeutils from oslo_messaging import exceptions base_opts = [ cfg.IntOpt('rpc_conn_pool_size', default=30, deprecated_group='DEFAULT', help='Size of RPC connection pool.', min=1), cfg.IntOpt('conn_pool_min_size', default=2, help='The pool size limit for connections expiration policy'), cfg.IntOpt('conn_pool_ttl', default=1200, help='The time-to-live in sec of idle connections in the pool') ] def batch_poll_helper(func): """Decorator to poll messages in batch This decorator is used to add message batching support to a :py:meth:`PollStyleListener.poll` implementation that only polls for a single message per call. """ def wrapper(in_self, timeout=None, batch_size=1, batch_timeout=None): incomings = [] driver_prefetch = in_self.prefetch_size if driver_prefetch > 0: batch_size = min(batch_size, driver_prefetch) timeout = batch_timeout or timeout with timeutils.StopWatch(timeout) as watch: while True: message = func(in_self, timeout=watch.leftover(True)) if message is not None: incomings.append(message) if len(incomings) == batch_size or message is None: break return incomings return wrapper class TransportDriverError(exceptions.MessagingException): """Base class for transport driver specific exceptions.""" class IncomingMessage(object, metaclass=abc.ABCMeta): """The IncomingMessage class represents a single message received from the messaging backend. Instances of this class are passed to up a server's messaging processing logic. The backend driver must provide a concrete derivation of this class which provides the backend specific logic for its public methods. :param ctxt: Context metadata provided by sending application. :type ctxt: dict :param message: The message as provided by the sending application. :type message: dict """ def __init__(self, ctxt, message, msg_id=None): self.ctxt = ctxt self.message = message self.client_timeout = None if msg_id is None: self.msg_id = str(uuid.uuid4()) else: self.msg_id = msg_id def acknowledge(self): """Called by the server to acknowledge receipt of the message. When this is called the driver must notify the backend of the acknowledgment. This call should block at least until the driver has processed the acknowledgment request locally. It may unblock before the acknowledgment state has been acted upon by the backend. If the acknowledge operation fails this method must issue a log message describing the reason for the failure. :raises: Does not raise an exception """ @abc.abstractmethod def requeue(self): """Called by the server to return the message to the backend so it may be made available for consumption by another server. This call should block at least until the driver has processed the requeue request locally. It may unblock before the backend makes the requeued message available for consumption. If the requeue operation fails this method must issue a log message describing the reason for the failure. Support for this method is _optional_. The :py:meth:`BaseDriver.require_features` method should indicate whether or not support for requeue is available. :raises: Does not raise an exception """ class RpcIncomingMessage(IncomingMessage, metaclass=abc.ABCMeta): """The RpcIncomingMessage represents an RPC request message received from the backend. This class must be used for RPC calls that return a value to the caller. """ @abc.abstractmethod def reply(self, reply=None, failure=None): """Called by the server to send an RPC reply message or an exception back to the calling client. If an exception is passed via *failure* the driver must convert it to a form that can be sent as a message and properly converted back to the exception at the remote. The driver must provide a way to determine the destination address for the reply. For example the driver may use the *reply-to* field from the corresponding incoming message. Often a driver will also need to set a correlation identifier in the reply to help the remote route the reply to the correct RPCClient. The driver should provide an *at-most-once* delivery guarantee for reply messages. This call should block at least until the reply message has been handed off to the backend - there is no need to confirm that the reply has been delivered. If the reply operation fails this method must issue a log message describing the reason for the failure. See :py:meth:`BaseDriver.send` for details regarding how the received reply is processed. :param reply: reply message body :type reply: dict :param failure: an exception thrown by the RPC call :type failure: Exception :raises: Does not raise an exception """ @abc.abstractmethod def heartbeat(self): """Called by the server to send an RPC heartbeat message back to the calling client. If the client (is new enough to have) passed its timeout value during the RPC call, this method will be called periodically by the server to update the client's timeout timer while a long-running call is executing. :raises: Does not raise an exception """ class PollStyleListener(object, metaclass=abc.ABCMeta): """A PollStyleListener is used to transfer received messages to a server for processing. A polling pattern is used to retrieve messages. A PollStyleListener uses a separate thread to run the polling loop. A :py:class:`PollStyleListenerAdapter` can be used to create a :py:class:`Listener` from a PollStyleListener. :param prefetch_size: The number of messages that should be pulled from the backend per receive transaction. May not be honored by all backend implementations. :type prefetch_size: int """ def __init__(self, prefetch_size=-1): self.prefetch_size = prefetch_size @abc.abstractmethod def poll(self, timeout=None, batch_size=1, batch_timeout=None): """poll is called by the server to retrieve incoming messages. It blocks until 'batch_size' incoming messages are available, a timeout occurs, or the poll is interrupted by a call to the :py:meth:`stop` method. If 'batch_size' is > 1 poll must block until 'batch_size' messages are available or at least one message is available and batch_timeout expires :param timeout: Block up to 'timeout' seconds waiting for a message :type timeout: float :param batch_size: Block until this number of messages are received. :type batch_size: int :param batch_timeout: Time to wait in seconds for a full batch to arrive. A timer is started when the first message in a batch is received. If a full batch's worth of messages is not received when the timer expires then :py:meth:`poll` returns all messages received thus far. :type batch_timeout: float :raises: Does not raise an exception. :return: A list of up to batch_size IncomingMessage objects. """ def stop(self): """Stop the listener from polling for messages. This method must cause the :py:meth:`poll` call to unblock and return whatever messages are currently available. This method is called from a different thread than the poller so it must be thread-safe. """ pass def cleanup(self): """Cleanup all resources held by the listener. This method should block until the cleanup is completed. """ pass class Listener(object, metaclass=abc.ABCMeta): """A Listener is used to transfer incoming messages from the driver to a server for processing. A callback is used by the driver to transfer the messages. :param batch_size: desired number of messages passed to single on_incoming_callback notification :type batch_size: int :param batch_timeout: defines how long should we wait in seconds for batch_size messages if we already have some messages waiting for processing :type batch_timeout: float :param prefetch_size: defines how many messages we want to prefetch from the messaging backend in a single request. May not be honored by all backend implementations. :type prefetch_size: int """ def __init__(self, batch_size, batch_timeout, prefetch_size=-1): self.on_incoming_callback = None self.batch_timeout = batch_timeout self.prefetch_size = prefetch_size if prefetch_size > 0: batch_size = min(batch_size, prefetch_size) self.batch_size = batch_size def start(self, on_incoming_callback): """Start receiving messages. This should cause the driver to start receiving messages from the backend. When message(s) arrive the driver must invoke 'on_incoming_callback' passing it the received messages as a list of IncomingMessages. :param on_incoming_callback: callback function to be executed when listener receives messages. :type on_incoming_callback: func """ self.on_incoming_callback = on_incoming_callback def stop(self): """Stop receiving messages. The driver must no longer invoke the callback. """ self.on_incoming_callback = None @abc.abstractmethod def cleanup(self): """Cleanup all resources held by the listener. This method should block until the cleanup is completed. """ class PollStyleListenerAdapter(Listener): """A Listener that uses a PollStyleListener for message transfer. A dedicated thread is created to do message polling. """ def __init__(self, poll_style_listener, batch_size, batch_timeout): super(PollStyleListenerAdapter, self).__init__( batch_size, batch_timeout, poll_style_listener.prefetch_size ) self._poll_style_listener = poll_style_listener self._listen_thread = threading.Thread(target=self._runner) self._listen_thread.daemon = True self._started = False def start(self, on_incoming_callback): super(PollStyleListenerAdapter, self).start(on_incoming_callback) self._started = True self._listen_thread.start() @excutils.forever_retry_uncaught_exceptions def _runner(self): while self._started: incoming = self._poll_style_listener.poll( batch_size=self.batch_size, batch_timeout=self.batch_timeout) if incoming: self.on_incoming_callback(incoming) # listener is stopped but we need to process all already consumed # messages while True: incoming = self._poll_style_listener.poll( batch_size=self.batch_size, batch_timeout=self.batch_timeout) if not incoming: return self.on_incoming_callback(incoming) def stop(self): self._started = False self._poll_style_listener.stop() self._listen_thread.join() super(PollStyleListenerAdapter, self).stop() def cleanup(self): self._poll_style_listener.cleanup() class BaseDriver(object, metaclass=abc.ABCMeta): """Defines the backend driver interface. Each backend driver implementation must provide a concrete derivation of this class implementing the backend specific logic for its public methods. :param conf: The configuration settings provided by the user. :type conf: ConfigOpts :param url: The network address of the messaging backend(s). :type url: TransportURL :param default_exchange: The exchange to use if no exchange is specified in a Target. :type default_exchange: str :param allowed_remote_exmods: whitelist of those exception modules which are permitted to be re-raised if an exception is returned in response to an RPC call. :type allowed_remote_exmods: list """ prefetch_size = 0 def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): self.conf = conf self._url = url self._default_exchange = default_exchange self._allowed_remote_exmods = allowed_remote_exmods or [] def require_features(self, requeue=False): """The driver must raise a 'NotImplementedError' if any of the feature flags passed as True are not supported. """ if requeue: raise NotImplementedError('Message requeueing not supported by ' 'this transport driver') @abc.abstractmethod def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, call_monitor_timeout=None, retry=None, transport_options=None): """Send a message to the given target and optionally wait for a reply. This method is used by the RPC client when sending RPC requests to a server. The driver must use the *topic*, *exchange*, and *server* (if present) attributes of the *target* to construct the backend-native message address. The message address must match the format used by subscription(s) created by the :py:meth:`BaseDriver.listen` method. If the *target's* *fanout* attribute is set, a copy of the message must be sent to all subscriptions using the *exchange* and *topic* values. If *fanout* is not set, then only one subscriber should receive the message. In the case of multiple subscribers to the same address, only one copy of the message is delivered. In this case the driver should implement a delivery pattern that distributes messages in a balanced fashion across the multiple subscribers. This method must block the caller until one of the following events occur: * the send operation completes successfully * *timeout* seconds elapse (if specified) * *retry* count is reached (if specified) The *wait_for_reply* parameter determines whether or not the caller expects a response to the RPC request. If True, this method must block until a response message is received. This method then returns the response message to the caller. The driver must implement a mechanism for routing incoming responses back to their corresponding send request. How this is done may vary based on the type of messaging backend, but typically it involves having the driver create an internal subscription for reply messages and setting the request message's *reply-to* header to the subscription address. The driver may also need to supply a correlation identifier for mapping the response back to the sender. See :py:meth:`RpcIncomingMessage.reply` If *wait_for_reply* is False this method will block until the message has been handed off to the backend - there is no need to confirm that the message has been delivered. Once the handoff completes this method returns. The driver may attempt to retry sending the message should a recoverable error occur that prevents the message from being passed to the backend. The *retry* parameter specifies how many attempts to re-send the message the driver may make before raising a :py:exc:`MessageDeliveryFailure` exception. A value of None or -1 means unlimited retries. 0 means no retry is attempted. N means attempt at most N retries before failing. **Note well:** the driver MUST guarantee that the message is not duplicated by the retry process. :param target: The message's destination address :type target: Target :param ctxt: Context metadata provided by sending application which is transferred along with the message. :type ctxt: dict :param message: message provided by the caller :type message: dict :param wait_for_reply: If True block until a reply message is received. :type wait_for_reply: bool :param timeout: Maximum time in seconds to block waiting for the send operation to complete. Should this expire the :py:meth:`send` must raise a :py:exc:`MessagingTimeout` exception :type timeout: float :param call_monitor_timeout: Maximum time the client will wait for the call to complete or receive a message heartbeat indicating the remote side is still executing. :type call_monitor_timeout: float :param retry: maximum message send attempts permitted :type retry: int :param transport_options: additional parameters to configure the driver for example to send parameters as "mandatory" flag in RabbitMQ :type transport_options: dictionary :returns: A reply message or None if no reply expected :raises: :py:exc:`MessagingException`, any exception thrown by the remote server when executing the RPC call. """ @abc.abstractmethod def send_notification(self, target, ctxt, message, version, retry): """Send a notification message to the given target. This method is used by the Notifier to send notification messages to a Listener. Notifications use a *store and forward* delivery pattern. The driver must allow for delivery in the case where the intended recipient is not present at the time the notification is published. Typically this requires a messaging backend that has the ability to store messages until a consumer is present. Therefore this method must block at least until the backend accepts ownership of the message. This method does not guarantee that the message has or will be processed by the intended recipient. The driver must use the *topic* and *exchange* attributes of the *target* to construct the backend-native message address. The message address must match the format used by subscription(s) created by the :py:meth:`BaseDriver.listen_for_notifications` method. Only one copy of the message is delivered in the case of multiple subscribers to the same address. In this case the driver should implement a delivery pattern that distributes messages in a balanced fashion across the multiple subscribers. There is an exception to the single delivery semantics described above: the *pool* parameter to the :py:meth:`BaseDriver.listen_for_notifications` method may be used to set up shared subscriptions. See :py:meth:`BaseDriver.listen_for_notifications` for details. This method must also honor the *retry* parameter. See :py:meth:`BaseDriver.send` for details regarding implementing the *retry* process. *version* indicates whether or not the message should be encapsulated in an envelope. A value < 2.0 should not envelope the message. See :py:func:`common.serialize_msg` for more detail. :param target: The message's destination address :type target: Target :param ctxt: Context metadata provided by sending application which is transferred along with the message. :type ctxt: dict :param message: message provided by the caller :type message: dict :param version: determines the envelope for the message :type version: float :param retry: maximum message send attempts permitted :type retry: int :returns: None :raises: :py:exc:`MessagingException` """ @abc.abstractmethod def listen(self, target, batch_size, batch_timeout): """Construct a listener for the given target. The listener may be either a :py:class:`Listener` or :py:class:`PollStyleListener` depending on the driver's preference. This method is used by the RPC server. The driver must create subscriptions to the address provided in *target*. These subscriptions must then be associated with a :py:class:`Listener` or :py:class:`PollStyleListener` which is returned by this method. See :py:meth:`BaseDriver.send` for more detail regarding message addressing. The driver must support receiving messages sent to the following addresses derived from the values in *target*: * all messages sent to the exchange and topic given in the target. This includes messages sent using a fanout pattern. * if the server attribute of the target is set then the driver must also subscribe to messages sent to the exchange, topic, and server For example, given a target with exchange 'my-exchange', topic 'my-topic', and server 'my-server', the driver would create subscriptions for: * all messages sent to my-exchange and my-topic (including fanout) * all messages sent to my-exchange, my-topic, and my-server The driver must pass messages arriving from these subscriptions to the listener. For :py:class:`PollStyleListener` the driver should trigger the :py:meth:`PollStyleListener.poll` method to unblock and return the incoming messages. For :py:class:`Listener` the driver should invoke the callback with the incoming messages. This method only blocks long enough to establish the subscription(s) and construct the listener. In the case of failover, the driver must restore the subscription(s). Subscriptions should remain active until the listener is stopped. :param target: The address(es) to subscribe to. :type target: Target :param batch_size: passed to the listener :type batch_size: int :param batch_timeout: passed to the listener :type batch_timeout: float :returns: None :raises: :py:exc:`MessagingException` """ @abc.abstractmethod def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): """Construct a notification listener for the given list of tuples of (target, priority) addresses. The driver must create a subscription for each (*target*, *priority*) pair. The topic for the subscription is created for each pair using the format `"%s.%s" % (target.topic, priority)`. This format is used by the caller of the :py:meth:`BaseDriver.send_notification` when setting the topic member of the target parameter. Only the *exchange* and *topic* must be considered when creating subscriptions. *server* and *fanout* must be ignored. The *pool* parameter, if specified, should cause the driver to create a subscription that is shared with other subscribers using the same pool identifier. Each pool gets a single copy of the message. For example if there is a subscriber pool with identifier **foo** and another pool **bar**, then one **foo** subscriber and one **bar** subscriber will each receive a copy of the message. The driver should implement a delivery pattern that distributes message in a balanced fashion across the subscribers in a pool. The driver must raise a :py:exc:`NotImplementedError` if pooling is not supported and a pool identifier is passed in. Refer to the description of :py:meth:`BaseDriver.send_notification` for further details regarding implementation. :param targets_and_priorities: List of (target, priority) pairs :type targets_and_priorities: list :param pool: pool identifier :type pool: str :param batch_size: passed to the listener :type batch_size: int :param batch_timeout: passed to the listener :type batch_timeout: float :returns: None :raises: :py:exc:`MessagingException`, :py:exc:`NotImplementedError` """ @abc.abstractmethod def cleanup(self): """Release all resources used by the driver. This method must block until the cleanup is complete. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/common.py0000664000175000017500000004635500000000000023265 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections.abc import Mapping import copy import logging import sys import traceback from oslo_serialization import jsonutils from oslo_utils import timeutils import oslo_messaging from oslo_messaging import _utils as utils LOG = logging.getLogger(__name__) _EXCEPTIONS_MODULE = 'builtins' _EXCEPTIONS_MODULES = ['exceptions', 'builtins'] '''RPC Envelope Version. This version number applies to the top level structure of messages sent out. It does *not* apply to the message payload, which must be versioned independently. For example, when using rpc APIs, a version number is applied for changes to the API being exposed over rpc. This version number is handled in the rpc proxy and dispatcher modules. This version number applies to the message envelope that is used in the serialization done inside the rpc layer. See serialize_msg() and deserialize_msg(). The current message format (version 2.0) is very simple. It is: { 'oslo.version': , 'oslo.message': } Message format version '1.0' is just considered to be the messages we sent without a message envelope. So, the current message envelope just includes the envelope version. It may eventually contain additional information, such as a signature for the message payload. We will JSON encode the application message payload. The message envelope, which includes the JSON encoded application message body, will be passed down to the messaging libraries as a dict. ''' _RPC_ENVELOPE_VERSION = '2.0' _VERSION_KEY = 'oslo.version' _MESSAGE_KEY = 'oslo.message' _REMOTE_POSTFIX = '_Remote' class RPCException(Exception): msg_fmt = "An unknown RPC related exception occurred." def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception('Exception in string format operation, ' 'kwargs are:') for name, value in kwargs.items(): LOG.error("%s: %s", name, value) # at least get the core message out if something happened message = self.msg_fmt super(RPCException, self).__init__(message) class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ msg_fmt = ('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param method: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or '', topic=topic or '', method=method or '') class DuplicateMessageError(RPCException): msg_fmt = "Found duplicate message(%(msg_id)s). Skipping it." class InvalidRPCConnectionReuse(RPCException): msg_fmt = "Invalid reuse of an RPC connection." class UnsupportedRpcVersion(RPCException): msg_fmt = ("Specified RPC version, %(version)s, not supported by " "this endpoint.") class UnsupportedRpcEnvelopeVersion(RPCException): msg_fmt = ("Specified RPC envelope version, %(version)s, " "not supported by this endpoint.") class RpcVersionCapError(RPCException): msg_fmt = "Specified RPC version cap, %(version_cap)s, is too low" class Connection(object): """A connection, returned by rpc.create_connection(). This class represents a connection to the message bus used for rpc. An instance of this class should never be created by users of the rpc API. Use rpc.create_connection() instead. """ def close(self): """Close the connection. This method must be called when the connection will no longer be used. It will ensure that any resources associated with the connection, such as a network connection, and cleaned up. """ raise NotImplementedError() def serialize_remote_exception(failure_info): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = str(failure.__class__.__name__) mod_name = str(failure.__class__.__module__) if (cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX)): cls_name = cls_name[:-len(_REMOTE_POSTFIX)] mod_name = mod_name[:-len(_REMOTE_POSTFIX)] data = { 'class': cls_name, 'module': mod_name, 'message': str(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data def deserialize_remote_exception(data, allowed_remote_exmods): failure = jsonutils.loads(str(data)) trace = failure.get('tb', []) message = failure.get('message', "") + "\n" + "\n".join(trace) name = failure.get('class') module = failure.get('module') # the remote service which raised the given exception might have a # different python version than the caller. For example, the caller might # run python 2.7, while the remote service might run python 3.4. Thus, # the exception module will be "builtins" instead of "exceptions". if module in _EXCEPTIONS_MODULES: module = _EXCEPTIONS_MODULE # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. if module != _EXCEPTIONS_MODULE and module not in allowed_remote_exmods: return oslo_messaging.RemoteError(name, failure.get('message'), trace) try: __import__(module) mod = sys.modules[module] klass = getattr(mod, name) if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError) as error: LOG.warning("Failed to rebuild remote exception due to error: %s", str(error)) return oslo_messaging.RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), {'__str__': str_override, '__unicode__': str_override}) new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined # Exceptions and not core Python exceptions. This is important because # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type except TypeError: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] return failure class CommonRpcContext(object): def __init__(self, **kwargs): self.values = kwargs def __getattr__(self, key): try: return self.values[key] except KeyError: raise AttributeError(key) def to_dict(self): return copy.deepcopy(self.values) @classmethod def from_dict(cls, values): return cls(**values) def deepcopy(self): return self.from_dict(self.to_dict()) def update_store(self): # local.store.context = self pass class ClientException(Exception): """Encapsulates actual exception expected to be hit by a RPC proxy object. Merely instantiating it records the current exception information, which will be passed back to the RPC client without exceptional logging. """ def __init__(self): self._exc_info = sys.exc_info() def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg)} return msg def deserialize_msg(msg): # NOTE(russellb): Hang on to your hats, this road is about to # get a little bumpy. # # Robustness Principle: # "Be strict in what you send, liberal in what you accept." # # At this point we have to do a bit of guessing about what it # is we just received. Here is the set of possibilities: # # 1) We received a dict. This could be 2 things: # # a) Inspect it to see if it looks like a standard message envelope. # If so, great! # # b) If it doesn't look like a standard message envelope, it could either # be a notification, or a message from before we added a message # envelope (referred to as version 1.0). # Just return the message as-is. # # 2) It's any other non-dict type. Just return it and hope for the best. # This case covers return values from rpc.call() from before message # envelopes were used. (messages to call a method were always a dict) if not isinstance(msg, dict): # See #2 above. return msg base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) if not all(map(lambda key: key in msg, base_envelope_keys)): # See #1.b above. return msg # At this point we think we have the message envelope # format we were expecting. (#1.a above) if not utils.version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) return raw_msg class DecayingTimer(object): def __init__(self, duration=None): self._watch = timeutils.StopWatch(duration=duration) def start(self): self._watch.start() def restart(self): self._watch.restart() def check_return(self, timeout_callback=None, *args, **kwargs): maximum = kwargs.pop('maximum', None) left = self._watch.leftover(return_none=True) if left is None: return maximum if left <= 0 and timeout_callback is not None: timeout_callback(*args, **kwargs) return left if maximum is None else min(left, maximum) # NOTE(sileht): Even if rabbit has only one Connection class, # this connection can be used for two purposes: # * wait and receive amqp messages (only do read stuffs on the socket) # * send messages to the broker (only do write stuffs on the socket) # The code inside a connection class is not concurrency safe. # Using one Connection class instance for doing both, will result # of eventlet complaining of multiple greenthreads that read/write the # same fd concurrently... because 'send' and 'listen' run in different # greenthread. # So, a connection cannot be shared between thread/greenthread and # this two variables permit to define the purpose of the connection # to allow drivers to add special handling if needed (like heartbeat). # amqp drivers create 3 kind of connections: # * driver.listen*(): each call create a new 'PURPOSE_LISTEN' connection # * driver.send*(): a pool of 'PURPOSE_SEND' connections is used # * driver internally have another 'PURPOSE_LISTEN' connection dedicated # to wait replies of rpc call PURPOSE_LISTEN = 'listen' PURPOSE_SEND = 'send' class ConnectionContext(Connection): """The class that is actually returned to the create_connection() caller. This is essentially a wrapper around Connection that supports 'with'. It can also return a new Connection, or one from a pool. The function will also catch when an instance of this class is to be deleted. With that we can return Connections to the pool on exceptions and so forth without making the caller be responsible for catching them. If possible the function makes sure to return a connection to the pool. """ def __init__(self, connection_pool, purpose, retry): """Create a new connection, or get one from the pool.""" self.connection = None self.connection_pool = connection_pool # NOTE(sileht): Even if rabbit only has one Connection class this # connection can be used for only one of two purposes: # # * receiving messages from the broker (read actions on the socket) # * sending messages to the broker (write actions on the socket) # # Using one Connection class instance for both purposes will result in # eventlet complaining about multiple greenthreads that read/write the # same fd concurrently. This is because 'send' and 'listen' run in # different greenthreads and the code inside a connection class is not # concurrency safe. The 'purpose' parameter ensures that no connection # is used for both sending and receiving messages. # # The rabbitmq driver allocates connections in the following manner: # * driver.listen*(): each call creates a new dedicated # 'PURPOSE_LISTEN' connection for the listener # * driver.send*(): senders are assigned a connection from a pool of # 'PURPOSE_SEND' connections maintained by the driver. # * One 'PURPOSE_LISTEN' connection is dedicated to waiting for replies # to rpc calls. pooled = purpose == PURPOSE_SEND if pooled: self.connection = connection_pool.get(retry=retry) else: self.connection = connection_pool.create(purpose) self.pooled = pooled self.connection.pooled = pooled def __enter__(self): """When with ConnectionContext() is used, return self.""" return self def _done(self): """If the connection came from a pool, clean it up and put it back. If it did not come from a pool, close it. """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller # to grab from the pool try: self.connection.reset() except Exception: LOG.exception("Fail to reset the connection, drop it") try: self.connection.close() except Exception as exc: LOG.debug("pooled conn close failure (ignored): %s", str(exc)) self.connection = self.connection_pool.create() finally: self.connection_pool.put(self.connection) else: try: self.connection.close() except Exception as exc: LOG.debug("pooled conn close failure (ignored): %s", str(exc)) self.connection = None def __exit__(self, exc_type, exc_value, tb): """End of 'with' statement. We're done here.""" self._done() def __del__(self): """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): """Caller is done with this connection.""" self._done() def __getattr__(self, key): """Proxy all other calls to the Connection instance.""" if self.connection: return getattr(self.connection, key) else: raise InvalidRPCConnectionReuse() class ConfigOptsProxy(Mapping): """Proxy for oslo_config.cfg.ConfigOpts. Values from the query part of the transport url (if they are both present and valid) override corresponding values from the configuration. """ def __init__(self, conf, url, group): self._conf = conf self._url = url self._group = group self._validate_query() def _validate_query(self): for name in self._url.query: self.GroupAttrProxy(self._conf, self._group, self._conf[self._group], self._url)[name] def __getattr__(self, name): value = getattr(self._conf, name) if isinstance(value, self._conf.GroupAttr) and name == self._group: return self.GroupAttrProxy(self._conf, name, value, self._url) return value def __getitem__(self, name): return self.__getattr__(name) def __contains__(self, name): return name in self._conf def __iter__(self): return iter(self._conf) def __len__(self): return len(self._conf) class GroupAttrProxy(Mapping): """Internal helper proxy for oslo_config.cfg.ConfigOpts.GroupAttr.""" _VOID_MARKER = object() def __init__(self, conf, group_name, group, url): self._conf = conf self._group_name = group_name self._group = group self._url = url def __getattr__(self, opt_name): # Make sure that the group has this specific option opt_value_conf = getattr(self._group, opt_name) # If the option is also present in the url and has a valid # (i.e. convertible) value type, then try to override it opt_value_url = self._url.query.get(opt_name, self._VOID_MARKER) if opt_value_url is self._VOID_MARKER: return opt_value_conf opt_info = self._conf._get_opt_info(opt_name, self._group_name) return opt_info['opt'].type(opt_value_url) def __getitem__(self, opt_name): return self.__getattr__(opt_name) def __contains__(self, opt_name): return opt_name in self._group def __iter__(self): return iter(self._group) def __len__(self): return len(self._group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/impl_amqp1.py0000664000175000017500000004430500000000000024026 0ustar00zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for the 'amqp' transport. This module provides a transport driver that speaks version 1.0 of the AMQP messaging protocol. The driver sends messages and creates subscriptions via 'tasks' that are performed on its behalf via the controller module. """ import collections import logging import os import threading import uuid import warnings from debtcollector import removals from oslo_config import cfg from oslo_messaging.target import Target from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_messaging._drivers.amqp1_driver.eventloop import compute_timeout from oslo_messaging._drivers.amqp1_driver import opts from oslo_messaging._drivers import base from oslo_messaging._drivers import common warnings.simplefilter('always') proton = importutils.try_import('proton') controller = importutils.try_import( 'oslo_messaging._drivers.amqp1_driver.controller' ) LOG = logging.getLogger(__name__) # Build/Decode RPC Response messages # Body Format - json string containing a map with keys: # 'failure' - (optional) serialized exception from remote # 'response' - (if no failure provided) data returned by call def marshal_response(reply, failure): # TODO(grs): do replies have a context? # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't # have support for vbin8. msg = proton.Message(inferred=True) if failure: failure = common.serialize_remote_exception(failure) data = {"failure": failure} else: data = {"response": reply} msg.body = jsonutils.dumps(data) return msg def unmarshal_response(message, allowed): # TODO(kgiusti) This may fail to unpack and raise an exception. Need to # communicate this to the caller! data = jsonutils.loads(message.body) failure = data.get('failure') if failure is not None: raise common.deserialize_remote_exception(failure, allowed) return data.get("response") # Build/Decode RPC Request and Notification messages # Body Format: json string containing a map with keys: # 'request' - possibly serialized application data # 'context' - context provided by the application # 'call_monitor_timeout' - optional time in seconds for RPC call monitoring def marshal_request(request, context, envelope=False, call_monitor_timeout=None): # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't # have support for vbin8. msg = proton.Message(inferred=True) if envelope: request = common.serialize_msg(request) data = { "request": request, "context": context } if call_monitor_timeout is not None: data["call_monitor_timeout"] = call_monitor_timeout msg.body = jsonutils.dumps(data) return msg def unmarshal_request(message): data = jsonutils.loads(message.body) msg = common.deserialize_msg(data.get("request")) return (msg, data.get("context"), data.get("call_monitor_timeout")) @removals.removed_class("ProtonIncomingMessage") class ProtonIncomingMessage(base.RpcIncomingMessage): def __init__(self, listener, message, disposition): request, ctxt, client_timeout = unmarshal_request(message) super(ProtonIncomingMessage, self).__init__(ctxt, request) self.listener = listener self.client_timeout = client_timeout self._reply_to = message.reply_to self._correlation_id = message.id self._disposition = disposition def heartbeat(self): # heartbeats are sent "worst effort": non-blocking, no retries, # pre-settled (no blocking for acks). We don't want the server thread # being blocked because it is unable to send a heartbeat. if not self._reply_to: LOG.warning("Cannot send RPC heartbeat: no reply-to provided") return # send a null msg (no body). This will cause the client to simply reset # its timeout (the null message is dropped). Use time-to-live to # prevent stale heartbeats from building up on the message bus msg = proton.Message() msg.correlation_id = self._correlation_id msg.ttl = self.client_timeout task = controller.SendTask("RPC KeepAlive", msg, self._reply_to, deadline=None, retry=0, wait_for_ack=False) self.listener.driver._ctrl.add_task(task) task.wait() def reply(self, reply=None, failure=None): """Schedule an RPCReplyTask to send the reply.""" if self._reply_to: response = marshal_response(reply, failure) response.correlation_id = self._correlation_id driver = self.listener.driver deadline = compute_timeout(driver._default_reply_timeout) ack = not driver._pre_settle_reply task = controller.SendTask("RPC Reply", response, self._reply_to, # analogous to kombu missing dest t/o: deadline, retry=driver._default_reply_retry, wait_for_ack=ack) driver._ctrl.add_task(task) rc = task.wait() if rc: # something failed. Not much we can do at this point but log LOG.debug("RPC Reply failed to send: %s", str(rc)) else: LOG.debug("Ignoring reply as no reply address available") def acknowledge(self): """Schedule a MessageDispositionTask to send the settlement.""" task = controller.MessageDispositionTask(self._disposition, released=False) self.listener.driver._ctrl.add_task(task) def requeue(self): """Schedule a MessageDispositionTask to release the message""" task = controller.MessageDispositionTask(self._disposition, released=True) self.listener.driver._ctrl.add_task(task) @removals.removed_class("Queue") class Queue(object): def __init__(self): self._queue = collections.deque() self._lock = threading.Lock() self._pop_wake_condition = threading.Condition(self._lock) self._started = True def put(self, item): with self._lock: self._queue.appendleft(item) self._pop_wake_condition.notify() def pop(self, timeout): with timeutils.StopWatch(timeout) as stop_watcher: with self._lock: while len(self._queue) == 0: if stop_watcher.expired() or not self._started: return None self._pop_wake_condition.wait( stop_watcher.leftover(return_none=True) ) return self._queue.pop() def stop(self): with self._lock: self._started = False self._pop_wake_condition.notify_all() @removals.removed_class("ProtonListener") class ProtonListener(base.PollStyleListener): def __init__(self, driver): super(ProtonListener, self).__init__(driver.prefetch_size) self.driver = driver self.incoming = Queue() self.id = uuid.uuid4().hex def stop(self): self.incoming.stop() @base.batch_poll_helper def poll(self, timeout=None): qentry = self.incoming.pop(timeout) if qentry is None: return None return ProtonIncomingMessage(self, qentry['message'], qentry['disposition']) @removals.removed_class("ProtonDriver") class ProtonDriver(base.BaseDriver): """AMQP 1.0 Driver See :doc:`AMQP1.0` for details. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=[]): if proton is None or controller is None: raise NotImplementedError("Proton AMQP C libraries not installed") super(ProtonDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods) opt_group = cfg.OptGroup(name='oslo_messaging_amqp', title='AMQP 1.0 driver options') conf.register_group(opt_group) conf.register_opts(opts.amqp1_opts, group=opt_group) conf = common.ConfigOptsProxy(conf, url, opt_group.name) self._conf = conf self._default_exchange = default_exchange # lazy connection setup - don't create the controller until # after the first messaging request: self._ctrl = None self._pid = None self._lock = threading.Lock() # timeout for message acknowledgement opt_name = conf.oslo_messaging_amqp self._default_reply_timeout = opt_name.default_reply_timeout self._default_send_timeout = opt_name.default_send_timeout self._default_notify_timeout = opt_name.default_notify_timeout self._default_reply_retry = opt_name.default_reply_retry # which message types should be sent pre-settled? ps = [s.lower() for s in opt_name.pre_settled] self._pre_settle_call = 'rpc-call' in ps self._pre_settle_reply = 'rpc-reply' in ps self._pre_settle_cast = 'rpc-cast' in ps self._pre_settle_notify = 'notify' in ps bad_opts = set(ps).difference(['rpc-call', 'rpc-reply', 'rpc-cast', 'notify']) if bad_opts: LOG.warning("Ignoring unrecognized pre_settle value(s): %s", " ".join(bad_opts)) def _ensure_connect_called(func): """Causes a new controller to be created when the messaging service is first used by the current process. It is safe to push tasks to it whether connected or not, but those tasks won't be processed until connection completes. """ def wrap(self, *args, **kws): with self._lock: # check to see if a fork was done after the Controller and its # I/O thread was spawned. old_pid will be None the first time # this is called which will cause the Controller to be created. old_pid = self._pid self._pid = os.getpid() if old_pid != self._pid: if self._ctrl is not None: # fork was called after the Controller was created, and # we are now executing as the child process. Do not # touch the existing Controller - it is owned by the # parent. Best we can do here is simply drop it and # hope we get lucky. LOG.warning("Process forked after connection " "established!") self._ctrl = None # Create a Controller that connects to the messaging # service: self._ctrl = controller.Controller(self._url, self._default_exchange, self._conf) self._ctrl.connect() return func(self, *args, **kws) return wrap @_ensure_connect_called def send(self, target, ctxt, message, wait_for_reply=False, timeout=None, call_monitor_timeout=None, retry=None, transport_options=None): """Send a message to the given target. :param target: destination for message :type target: oslo_messaging.Target :param ctxt: message context :type ctxt: dict :param message: message payload :type message: dict :param wait_for_reply: expects a reply message, wait for it :type wait_for_reply: bool :param timeout: raise exception if send does not complete within timeout seconds. None == no timeout. :type timeout: float :param call_monitor_timeout: Maximum time the client will wait for the call to complete or receive a message heartbeat indicating the remote side is still executing. :type call_monitor_timeout: float :param retry: (optional) maximum re-send attempts on recoverable error None or -1 means to retry forever 0 means no retry N means N retries :type retry: int :param transport_options: transport-specific options to apply to the sending of the message (TBD) :type transport_options: dictionary """ request = marshal_request(message, ctxt, None, call_monitor_timeout) if timeout: expire = compute_timeout(timeout) request.ttl = timeout request.expiry_time = compute_timeout(timeout) else: # no timeout provided by application. If the backend is queueless # this could lead to a hang - provide a default to prevent this # TODO(kgiusti) only do this if brokerless backend expire = compute_timeout(self._default_send_timeout) if wait_for_reply: ack = not self._pre_settle_call if call_monitor_timeout is None: task = controller.RPCCallTask(target, request, expire, retry, wait_for_ack=ack) else: task = controller.RPCMonitoredCallTask(target, request, expire, call_monitor_timeout, retry, wait_for_ack=ack) else: ack = not self._pre_settle_cast task = controller.SendTask("RPC Cast", request, target, expire, retry, wait_for_ack=ack) self._ctrl.add_task(task) reply = task.wait() if isinstance(reply, Exception): raise reply if reply: # TODO(kgiusti) how to handle failure to un-marshal? # Must log, and determine best way to communicate this failure # back up to the caller reply = unmarshal_response(reply, self._allowed_remote_exmods) return reply @_ensure_connect_called def send_notification(self, target, ctxt, message, version, retry=None): """Send a notification message to the given target. :param target: destination for message :type target: oslo_messaging.Target :param ctxt: message context :type ctxt: dict :param message: message payload :type message: dict :param version: message envelope version :type version: float :param retry: (optional) maximum re-send attempts on recoverable error None or -1 means to retry forever 0 means no retry N means N retries :type retry: int """ request = marshal_request(message, ctxt, envelope=(version == 2.0)) # no timeout is applied to notifications, however if the backend is # queueless this could lead to a hang - provide a default to prevent # this # TODO(kgiusti) should raise NotImplemented if not broker backend deadline = compute_timeout(self._default_notify_timeout) ack = not self._pre_settle_notify task = controller.SendTask("Notify", request, target, deadline, retry, wait_for_ack=ack, notification=True) self._ctrl.add_task(task) rc = task.wait() if isinstance(rc, Exception): raise rc @_ensure_connect_called def listen(self, target, batch_size, batch_timeout): """Construct a Listener for the given target.""" LOG.debug("Listen to %s", target) listener = ProtonListener(self) task = controller.SubscribeTask(target, listener) self._ctrl.add_task(task) task.wait() return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) @_ensure_connect_called def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): """Construct a Listener for notifications on the given target and priority. """ # TODO(kgiusti) should raise NotImplemented if not broker backend LOG.debug("Listen for notifications %s", targets_and_priorities) if pool: raise NotImplementedError('"pool" not implemented by ' 'this transport driver') listener = ProtonListener(self) # this is how the destination target is created by the notifier, # see MessagingDriver.notify in oslo_messaging/notify/messaging.py for target, priority in targets_and_priorities: topic = '%s.%s' % (target.topic, priority) # Sooo... the exchange is simply discarded? (see above comment) task = controller.SubscribeTask(Target(topic=topic), listener, notifications=True) self._ctrl.add_task(task) task.wait() return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def cleanup(self): """Release all resources.""" if self._ctrl: self._ctrl.shutdown() self._ctrl = None LOG.info("AMQP 1.0 messaging driver shutdown") def require_features(self, requeue=True): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/impl_fake.py0000664000175000017500000002241600000000000023714 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import queue import threading import time from oslo_serialization import jsonutils from oslo_utils import eventletutils import oslo_messaging from oslo_messaging._drivers import base class FakeIncomingMessage(base.RpcIncomingMessage): def __init__(self, ctxt, message, reply_q, requeue): super(FakeIncomingMessage, self).__init__(ctxt, message) self.requeue_callback = requeue self._reply_q = reply_q def reply(self, reply=None, failure=None): if self._reply_q: failure = failure[1] if failure else None self._reply_q.put((reply, failure)) def requeue(self): self.requeue_callback() def heartbeat(self): """Heartbeat is not supported.""" class FakeListener(base.PollStyleListener): def __init__(self, exchange_manager, targets, pool=None): super(FakeListener, self).__init__() self._exchange_manager = exchange_manager self._targets = targets self._pool = pool self._stopped = eventletutils.Event() # NOTE(sileht): Ensure that all needed queues exists even the listener # have not been polled yet for target in self._targets: exchange = self._exchange_manager.get_exchange(target.exchange) exchange.ensure_queue(target, pool) @base.batch_poll_helper def poll(self, timeout=None): if timeout is not None: deadline = time.time() + timeout else: deadline = None while not self._stopped.is_set(): for target in self._targets: exchange = self._exchange_manager.get_exchange(target.exchange) (ctxt, message, reply_q, requeue) = exchange.poll(target, self._pool) if message is not None: message = FakeIncomingMessage(ctxt, message, reply_q, requeue) return message if deadline is not None: pause = deadline - time.time() if pause < 0: break pause = min(pause, 0.050) else: pause = 0.050 time.sleep(pause) return None def stop(self): self._stopped.set() class FakeExchange(object): def __init__(self, name): self.name = name self._queues_lock = threading.RLock() self._topic_queues = {} self._server_queues = {} def ensure_queue(self, target, pool): with self._queues_lock: if target.server: self._get_server_queue(target.topic, target.server) else: self._get_topic_queue(target.topic, pool) def _get_topic_queue(self, topic, pool=None): if pool and (topic, pool) not in self._topic_queues: # NOTE(sileht): if the pool name is set, we need to # copy all the already delivered messages from the # default queue to this queue self._topic_queues[(topic, pool)] = copy.deepcopy( self._get_topic_queue(topic)) return self._topic_queues.setdefault((topic, pool), []) def _get_server_queue(self, topic, server): return self._server_queues.setdefault((topic, server), []) def deliver_message(self, topic, ctxt, message, server=None, fanout=False, reply_q=None): with self._queues_lock: if fanout: queues = [q for t, q in self._server_queues.items() if t[0] == topic] elif server is not None: queues = [self._get_server_queue(topic, server)] else: # NOTE(sileht): ensure at least the queue without # pool name exists self._get_topic_queue(topic) queues = [q for t, q in self._topic_queues.items() if t[0] == topic] def requeue(): self.deliver_message(topic, ctxt, message, server=server, fanout=fanout, reply_q=reply_q) for q in queues: q.append((ctxt, message, reply_q, requeue)) def poll(self, target, pool): with self._queues_lock: if target.server: queue = self._get_server_queue(target.topic, target.server) else: queue = self._get_topic_queue(target.topic, pool) return queue.pop(0) if queue else (None, None, None, None) class FakeExchangeManager(object): _exchanges_lock = threading.Lock() _exchanges = {} def __init__(self, default_exchange): self._default_exchange = default_exchange def get_exchange(self, name): if name is None: name = self._default_exchange with self._exchanges_lock: return self._exchanges.setdefault(name, FakeExchange(name)) @classmethod def cleanup(cls): cls._exchanges.clear() class FakeDriver(base.BaseDriver): """Fake driver used for testing. This driver passes messages in memory, and should only be used for unit tests. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): super(FakeDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods) self._exchange_manager = FakeExchangeManager(default_exchange) def require_features(self, requeue=True): pass @staticmethod def _check_serialize(message): """Make sure a message intended for rpc can be serialized. All the in tree drivers implementing RPC send uses jsonutils.dumps on the message. So in the test we ensure here that all the messages are serializable with this call. """ jsonutils.dumps(message) def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, transport_options=None): self._check_serialize(message) exchange = self._exchange_manager.get_exchange(target.exchange) reply_q = None if wait_for_reply: reply_q = queue.Queue() exchange.deliver_message(target.topic, ctxt, message, server=target.server, fanout=target.fanout, reply_q=reply_q) if wait_for_reply: try: reply, failure = reply_q.get(timeout=timeout) if failure: raise failure else: return reply except queue.Empty: raise oslo_messaging.MessagingTimeout( 'No reply on topic %s' % target.topic) return None def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, call_monitor_timeout=None, retry=None, transport_options=None): # NOTE(sileht): retry doesn't need to be implemented, the fake # transport always works return self._send(target, ctxt, message, wait_for_reply, timeout, transport_options) def send_notification(self, target, ctxt, message, version, retry=None): # NOTE(sileht): retry doesn't need to be implemented, the fake # transport always works self._send(target, ctxt, message) def listen(self, target, batch_size, batch_timeout): exchange = target.exchange or self._default_exchange listener = FakeListener(self._exchange_manager, [oslo_messaging.Target( topic=target.topic, server=target.server, exchange=exchange), oslo_messaging.Target( topic=target.topic, exchange=exchange)]) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): targets = [ oslo_messaging.Target( topic='%s.%s' % (target.topic, priority), exchange=target.exchange) for target, priority in targets_and_priorities] listener = FakeListener(self._exchange_manager, targets, pool) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def cleanup(self): self._exchange_manager.cleanup() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/impl_kafka.py0000664000175000017500000004162200000000000024063 0ustar00zuulzuul00000000000000# Copyright (C) 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import threading import confluent_kafka from confluent_kafka import KafkaException from oslo_serialization import jsonutils from oslo_utils import eventletutils from oslo_utils import importutils from oslo_messaging._drivers import base from oslo_messaging._drivers import common as driver_common from oslo_messaging._drivers.kafka_driver import kafka_options if eventletutils.EVENTLET_AVAILABLE: tpool = importutils.try_import('eventlet.tpool') LOG = logging.getLogger(__name__) def unpack_message(msg): """Unpack context and msg.""" context = {} message = None msg = jsonutils.loads(msg) message = driver_common.deserialize_msg(msg) context = message['_context'] del message['_context'] return context, message def pack_message(ctxt, msg): """Pack context into msg.""" if isinstance(ctxt, dict): context_d = ctxt else: context_d = ctxt.to_dict() msg['_context'] = context_d msg = driver_common.serialize_msg(msg) return msg def concat(sep, items): return sep.join(filter(bool, items)) def target_to_topic(target, priority=None, vhost=None): """Convert target into topic string :param target: Message destination target :type target: oslo_messaging.Target :param priority: Notification priority :type priority: string :param priority: Notification vhost :type priority: string """ return concat(".", [target.topic, priority, vhost]) class ConsumerTimeout(KafkaException): pass class AssignedPartition(object): """This class is used by the ConsumerConnection to track the assigned partitions. """ def __init__(self, topic, partition): super(AssignedPartition, self).__init__() self.topic = topic self.partition = partition self.skey = '%s %d' % (self.topic, self.partition) def to_dict(self): return {'topic': self.topic, 'partition': self.partition} class Connection(object): """This is the base class for consumer and producer connections for transport attributes. """ def __init__(self, conf, url): self.driver_conf = conf.oslo_messaging_kafka self.security_protocol = self.driver_conf.security_protocol self.sasl_mechanism = self.driver_conf.sasl_mechanism self.ssl_cafile = self.driver_conf.ssl_cafile self.ssl_client_cert_file = self.driver_conf.ssl_client_cert_file self.ssl_client_key_file = self.driver_conf.ssl_client_key_file self.ssl_client_key_password = self.driver_conf.ssl_client_key_password self.url = url self.virtual_host = url.virtual_host self._parse_url() def _parse_url(self): self.hostaddrs = [] self.username = None self.password = None for host in self.url.hosts: # NOTE(ansmith): connections and failover are transparently # managed by the client library. Credentials will be # selectd from first host encountered in transport_url if self.username is None: self.username = host.username self.password = host.password else: if self.username != host.username: LOG.warning("Different transport usernames detected") if host.hostname: if ':' in host.hostname: hostaddr = "[%s]:%s" % (host.hostname, host.port) else: hostaddr = "%s:%s" % (host.hostname, host.port) self.hostaddrs.append(hostaddr) def reset(self): """Reset a connection so it can be used again.""" pass class ConsumerConnection(Connection): """This is the class for kafka topic/assigned partition consumer """ def __init__(self, conf, url): super(ConsumerConnection, self).__init__(conf, url) self.consumer = None self.consumer_timeout = self.driver_conf.kafka_consumer_timeout self.max_fetch_bytes = self.driver_conf.kafka_max_fetch_bytes self.group_id = self.driver_conf.consumer_group self.use_auto_commit = self.driver_conf.enable_auto_commit self.max_poll_records = self.driver_conf.max_poll_records self._consume_loop_stopped = False self.assignment_dict = dict() def find_assignment(self, topic, partition): """Find and return existing assignment based on topic and partition""" skey = '%s %d' % (topic, partition) return self.assignment_dict.get(skey) def on_assign(self, consumer, topic_partitions): """Rebalance on_assign callback""" assignment = [AssignedPartition(p.topic, p.partition) for p in topic_partitions] self.assignment_dict = {a.skey: a for a in assignment} for t in topic_partitions: LOG.debug("Topic %s assigned to partition %d", t.topic, t.partition) def on_revoke(self, consumer, topic_partitions): """Rebalance on_revoke callback""" self.assignment_dict = dict() for t in topic_partitions: LOG.debug("Topic %s revoked from partition %d", t.topic, t.partition) def _poll_messages(self, timeout): """Consume messages, callbacks and return list of messages""" msglist = self.consumer.consume(self.max_poll_records, timeout) if ((len(self.assignment_dict) == 0) or (len(msglist) == 0)): raise ConsumerTimeout() messages = [] for message in msglist: if message is None: break a = self.find_assignment(message.topic(), message.partition()) if a is None: LOG.warning(("Message for %s received on unassigned " "partition %d"), message.topic(), message.partition()) else: messages.append(message.value()) if not self.use_auto_commit: self.consumer.commit(asynchronous=False) return messages def consume(self, timeout=None): """Receive messages. :param timeout: poll timeout in seconds """ def _raise_timeout(exc): raise driver_common.Timeout(str(exc)) timer = driver_common.DecayingTimer(duration=timeout) timer.start() poll_timeout = (self.consumer_timeout if timeout is None else min(timeout, self.consumer_timeout)) while True: if self._consume_loop_stopped: return try: if eventletutils.is_monkey_patched('thread'): return tpool.execute(self._poll_messages, poll_timeout) return self._poll_messages(poll_timeout) except ConsumerTimeout as exc: poll_timeout = timer.check_return( _raise_timeout, exc, maximum=self.consumer_timeout) except Exception: LOG.exception("Failed to consume messages") return def stop_consuming(self): self._consume_loop_stopped = True def close(self): if self.consumer: self.consumer.close() self.consumer = None def declare_topic_consumer(self, topics, group=None): conf = { 'bootstrap.servers': ",".join(self.hostaddrs), 'group.id': (group or self.group_id), 'enable.auto.commit': self.use_auto_commit, 'max.partition.fetch.bytes': self.max_fetch_bytes, 'security.protocol': self.security_protocol, 'sasl.mechanism': self.sasl_mechanism, 'sasl.username': self.username, 'sasl.password': self.password, 'ssl.ca.location': self.ssl_cafile, 'ssl.certificate.location': self.ssl_client_cert_file, 'ssl.key.location': self.ssl_client_key_file, 'ssl.key.password': self.ssl_client_key_password, 'enable.partition.eof': False, 'default.topic.config': {'auto.offset.reset': 'latest'} } LOG.debug("Subscribing to %s as %s", topics, (group or self.group_id)) self.consumer = confluent_kafka.Consumer(conf) self.consumer.subscribe(topics, on_assign=self.on_assign, on_revoke=self.on_revoke) class ProducerConnection(Connection): def __init__(self, conf, url): super(ProducerConnection, self).__init__(conf, url) self.batch_size = self.driver_conf.producer_batch_size self.linger_ms = self.driver_conf.producer_batch_timeout * 1000 self.compression_codec = self.driver_conf.compression_codec self.producer = None self.producer_lock = threading.Lock() def _produce_message(self, topic, message, poll): if poll: self.producer.poll(poll) try: self.producer.produce(topic, message) except KafkaException as e: self.producer.poll(0) raise e except BufferError as e: # We'll have to poll next time raise e self.producer.poll(0) def notify_send(self, topic, ctxt, msg, retry): """Send messages to Kafka broker. :param topic: String of the topic :param ctxt: context for the messages :param msg: messages for publishing :param retry: the number of retry """ retry = retry if retry >= 0 else None message = pack_message(ctxt, msg) message = jsonutils.dumps(message).encode('utf-8') try: self._ensure_producer() poll = 0 while True: try: if eventletutils.is_monkey_patched('thread'): return tpool.execute(self._produce_message, topic, message, poll) return self._produce_message(topic, message, poll) except KafkaException as e: LOG.error("Produce message failed: %s" % str(e)) break except BufferError: LOG.debug("Produce message queue full, " "waiting for deliveries") # We'll retry with .5s polling poll = 0.5 except Exception: # NOTE(sileht): if something goes wrong close the producer # connection self._close_producer() raise def close(self): self._close_producer() def _close_producer(self): with self.producer_lock: if self.producer: try: self.producer.flush() except KafkaException: LOG.error("Flush error during producer close") self.producer = None def _ensure_producer(self): if self.producer: return with self.producer_lock: if self.producer: return conf = { 'bootstrap.servers': ",".join(self.hostaddrs), 'linger.ms': self.linger_ms, 'batch.num.messages': self.batch_size, 'compression.codec': self.compression_codec, 'security.protocol': self.security_protocol, 'sasl.mechanism': self.sasl_mechanism, 'sasl.username': self.username, 'sasl.password': self.password, 'ssl.ca.location': self.ssl_cafile, 'ssl.certificate.location': self.ssl_client_cert_file, 'ssl.key.location': self.ssl_client_key_file, 'ssl.key.password': self.ssl_client_key_password } self.producer = confluent_kafka.Producer(conf) class OsloKafkaMessage(base.RpcIncomingMessage): def __init__(self, ctxt, message): super(OsloKafkaMessage, self).__init__(ctxt, message) def requeue(self): LOG.warning("requeue is not supported") def reply(self, reply=None, failure=None): LOG.warning("reply is not supported") def heartbeat(self): LOG.warning("heartbeat is not supported") class KafkaListener(base.PollStyleListener): def __init__(self, conn): super(KafkaListener, self).__init__() self._stopped = eventletutils.Event() self.conn = conn self.incoming_queue = [] # FIXME(sileht): We do a first poll to ensure we topics are created # This is a workaround mainly for functional tests, in real life # this is fine if topics are not created synchroneously self.poll(5) @base.batch_poll_helper def poll(self, timeout=None): while not self._stopped.is_set(): if self.incoming_queue: return self.incoming_queue.pop(0) try: messages = self.conn.consume(timeout=timeout) or [] for message in messages: msg = OsloKafkaMessage(*unpack_message(message)) self.incoming_queue.append(msg) except driver_common.Timeout: return None def stop(self): self._stopped.set() self.conn.stop_consuming() def cleanup(self): self.conn.close() class KafkaDriver(base.BaseDriver): """Kafka Driver See :doc:`kafka` for details. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): conf = kafka_options.register_opts(conf, url) super(KafkaDriver, self).__init__( conf, url, default_exchange, allowed_remote_exmods) self.listeners = [] self.virtual_host = url.virtual_host self.pconn = ProducerConnection(conf, url) def cleanup(self): self.pconn.close() for c in self.listeners: c.close() self.listeners = [] LOG.info("Kafka messaging driver shutdown") def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, call_monitor_timeout=None, retry=None, transport_options=None): raise NotImplementedError( 'The RPC implementation for Kafka is not implemented') def send_notification(self, target, ctxt, message, version, retry=None): """Send notification to Kafka brokers :param target: Message destination target :type target: oslo_messaging.Target :param ctxt: Message context :type ctxt: dict :param message: Message payload to pass :type message: dict :param version: Messaging API version (currently not used) :type version: str :param call_monitor_timeout: Maximum time the client will wait for the call to complete before or receive a message heartbeat indicating the remote side is still executing. :type call_monitor_timeout: float :param retry: an optional default kafka consumer retries configuration None means to retry forever 0 means no retry N means N retries :type retry: int """ self.pconn.notify_send(target_to_topic(target, vhost=self.virtual_host), ctxt, message, retry) def listen(self, target, batch_size, batch_timeout): raise NotImplementedError( 'The RPC implementation for Kafka is not implemented') def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): """Listen to a specified list of targets on Kafka brokers :param targets_and_priorities: List of pairs (target, priority) priority is not used for kafka driver target.exchange_target.topic is used as a kafka topic :type targets_and_priorities: list :param pool: consumer group of Kafka consumers :type pool: string """ conn = ConsumerConnection(self.conf, self._url) topics = [] for target, priority in targets_and_priorities: topics.append(target_to_topic(target, priority)) conn.declare_topic_consumer(topics, pool) listener = KafkaListener(conn) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/impl_rabbit.py0000664000175000017500000023435400000000000024257 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import errno import functools import itertools import math import os import random import socket import ssl import sys import threading import time from urllib import parse import uuid from amqp import exceptions as amqp_ex import kombu import kombu.connection import kombu.entity import kombu.messaging from oslo_config import cfg from oslo_log import log as logging from oslo_utils import eventletutils import oslo_messaging from oslo_messaging._drivers import amqp as rpc_amqp from oslo_messaging._drivers import amqpdriver from oslo_messaging._drivers import base from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers import pool from oslo_messaging import _utils from oslo_messaging import exceptions # The QuorumMemConfig will hold the quorum queue memory configurations QuorumMemConfig = collections.namedtuple('QuorumMemConfig', 'delivery_limit' ' max_memory_length' ' max_memory_bytes') # NOTE(sileht): don't exist in py2 socket module TCP_USER_TIMEOUT = 18 rabbit_opts = [ cfg.BoolOpt('ssl', default=False, deprecated_name='rabbit_use_ssl', help='Connect over SSL.'), cfg.StrOpt('ssl_version', default='', deprecated_name='kombu_ssl_version', help='SSL version to use (valid only if SSL enabled). ' 'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, ' 'TLSv1_1, and TLSv1_2 may be available on some ' 'distributions.' ), cfg.StrOpt('ssl_key_file', default='', deprecated_name='kombu_ssl_keyfile', help='SSL key file (valid only if SSL enabled).'), cfg.StrOpt('ssl_cert_file', default='', deprecated_name='kombu_ssl_certfile', help='SSL cert file (valid only if SSL enabled).'), cfg.StrOpt('ssl_ca_file', default='', deprecated_name='kombu_ssl_ca_certs', help='SSL certification authority file ' '(valid only if SSL enabled).'), cfg.BoolOpt('ssl_enforce_fips_mode', default=False, help='Global toggle for enforcing the OpenSSL FIPS mode. ' 'This feature requires Python support. ' 'This is available in Python 3.9 in all ' 'environments and may have been backported to older ' 'Python versions on select environments. If the Python ' 'executable used does not support OpenSSL FIPS mode, ' 'an exception will be raised.'), cfg.BoolOpt('heartbeat_in_pthread', default=False, deprecated_for_removal=True, deprecated_reason='The option is related to Eventlet which ' 'will be removed. In addition this has ' 'never worked as expected with services ' 'using eventlet for core service framework.', help="(DEPRECATED) It is recommend not to use this option " "anymore. Run the health check heartbeat thread " "through a native python thread by default. If this " "option is equal to False then the health check " "heartbeat will inherit the execution model " "from the parent process. For " "example if the parent process has monkey patched the " "stdlib by using eventlet/greenlet then the heartbeat " "will be run through a green thread. " "This option should be set to True only for the " "wsgi services.", ), cfg.FloatOpt('kombu_reconnect_delay', default=1.0, min=0.0, max=amqpdriver.ACK_REQUEUE_EVERY_SECONDS_MAX * 0.9, deprecated_group='DEFAULT', help='How long to wait (in seconds) before reconnecting in ' 'response to an AMQP consumer cancel notification.'), cfg.StrOpt('kombu_compression', help="EXPERIMENTAL: Possible values are: gzip, bz2. If not " "set compression will not be used. This option may not " "be available in future versions."), cfg.IntOpt('kombu_missing_consumer_retry_timeout', deprecated_name="kombu_reconnect_timeout", default=60, help='How long to wait a missing client before abandoning to ' 'send it its replies. This value should not be longer ' 'than rpc_response_timeout.'), cfg.StrOpt('kombu_failover_strategy', choices=('round-robin', 'shuffle'), default='round-robin', help='Determines how the next RabbitMQ node is chosen in case ' 'the one we are currently connected to becomes ' 'unavailable. Takes effect only if more than one ' 'RabbitMQ node is provided in config.'), cfg.StrOpt('rabbit_login_method', choices=('PLAIN', 'AMQPLAIN', 'EXTERNAL', 'RABBIT-CR-DEMO'), default='AMQPLAIN', deprecated_group='DEFAULT', help='The RabbitMQ login method.'), cfg.IntOpt('rabbit_retry_interval', default=1, help='How frequently to retry connecting with RabbitMQ.'), cfg.IntOpt('rabbit_retry_backoff', default=2, deprecated_group='DEFAULT', help='How long to backoff for between retries when connecting ' 'to RabbitMQ.'), cfg.IntOpt('rabbit_interval_max', default=30, help='Maximum interval of RabbitMQ connection retries. ' 'Default is 30 seconds.'), cfg.BoolOpt('rabbit_ha_queues', default=False, deprecated_group='DEFAULT', help='Try to use HA queues in RabbitMQ (x-ha-policy: all). ' 'If you change this option, you must wipe the RabbitMQ ' 'database. In RabbitMQ 3.0, queue mirroring is no longer ' 'controlled by the x-ha-policy argument when declaring a ' 'queue. If you just want to make sure that all queues (except ' 'those with auto-generated names) are mirrored across all ' 'nodes, run: ' """\"rabbitmqctl set_policy HA '^(?!amq\\.).*' """ """'{"ha-mode": "all"}' \""""), cfg.BoolOpt('rabbit_quorum_queue', default=False, help='Use quorum queues in RabbitMQ (x-queue-type: quorum). ' 'The quorum queue is a modern queue type for RabbitMQ ' 'implementing a durable, replicated FIFO queue based on the ' 'Raft consensus algorithm. It is available as of ' 'RabbitMQ 3.8.0. If set this option will conflict with ' 'the HA queues (``rabbit_ha_queues``) aka mirrored queues, ' 'in other words the HA queues should be disabled. ' 'Quorum queues are also durable by default so the ' 'amqp_durable_queues option is ignored when this option is ' 'enabled.'), cfg.BoolOpt('rabbit_transient_quorum_queue', default=False, help='Use quorum queues for transients queues in RabbitMQ. ' 'Enabling this option will then make sure those queues are ' 'also using quorum kind of rabbit queues, which are HA by ' 'default.'), cfg.IntOpt('rabbit_quorum_delivery_limit', default=0, help='Each time a message is redelivered to a consumer, ' 'a counter is incremented. Once the redelivery count ' 'exceeds the delivery limit the message gets dropped ' 'or dead-lettered (if a DLX exchange has been configured) ' 'Used only when rabbit_quorum_queue is enabled, ' 'Default 0 which means dont set a limit.'), cfg.IntOpt('rabbit_quorum_max_memory_length', deprecated_name='rabbit_quroum_max_memory_length', default=0, help='By default all messages are maintained in memory ' 'if a quorum queue grows in length it can put memory ' 'pressure on a cluster. This option can limit the number ' 'of messages in the quorum queue. ' 'Used only when rabbit_quorum_queue is enabled, ' 'Default 0 which means dont set a limit.'), cfg.IntOpt('rabbit_quorum_max_memory_bytes', deprecated_name='rabbit_quroum_max_memory_bytes', default=0, help='By default all messages are maintained in memory ' 'if a quorum queue grows in length it can put memory ' 'pressure on a cluster. This option can limit the number ' 'of memory bytes used by the quorum queue. ' 'Used only when rabbit_quorum_queue is enabled, ' 'Default 0 which means dont set a limit.'), cfg.IntOpt('rabbit_transient_queues_ttl', min=0, default=1800, help='Positive integer representing duration in seconds for ' 'queue TTL (x-expires). Queues which are unused for the ' 'duration of the TTL are automatically deleted. The ' 'parameter affects only reply and fanout queues. Setting ' '0 as value will disable the x-expires. If doing so, ' 'make sure you have a rabbitmq policy to delete the ' 'queues or you deployment will create an infinite number ' 'of queue over time.'), cfg.IntOpt('rabbit_qos_prefetch_count', default=0, help='Specifies the number of messages to prefetch. Setting to ' 'zero allows unlimited messages.'), cfg.IntOpt('heartbeat_timeout_threshold', default=60, help="Number of seconds after which the Rabbit broker is " "considered down if heartbeat's keep-alive fails " "(0 disables heartbeat)."), cfg.IntOpt('heartbeat_rate', default=3, help='How often times during the heartbeat_timeout_threshold ' 'we check the heartbeat.'), cfg.BoolOpt('direct_mandatory_flag', default=True, deprecated_for_removal=True, deprecated_reason='Mandatory flag no longer deactivable.', help='(DEPRECATED) Enable/Disable the RabbitMQ mandatory ' 'flag for direct send. The direct send is used as reply, ' 'so the MessageUndeliverable exception is raised ' 'in case the client queue does not exist.' 'MessageUndeliverable exception will be used to loop for a ' 'timeout to lets a chance to sender to recover.' 'This flag is deprecated and it will not be possible to ' 'deactivate this functionality anymore'), cfg.BoolOpt('enable_cancel_on_failover', default=False, help="Enable x-cancel-on-ha-failover flag so that " "rabbitmq server will cancel and notify consumers" "when queue is down"), cfg.BoolOpt('use_queue_manager', default=False, help='Should we use consistant queue names or random ones'), cfg.StrOpt('hostname', sample_default='node1.example.com', default=socket.gethostname(), help='Hostname used by queue manager. Defaults to the value ' 'returned by socket.gethostname().'), cfg.StrOpt('processname', default=os.path.basename(sys.argv[0]), help='Process name used by queue manager'), cfg.BoolOpt('rabbit_stream_fanout', default=False, help='Use stream queues in RabbitMQ (x-queue-type: stream). ' 'Streams are a new persistent and replicated data structure ' '("queue type") in RabbitMQ which models an append-only log ' 'with non-destructive consumer semantics. It is available ' 'as of RabbitMQ 3.9.0. If set this option will replace all ' 'fanout queues with only one stream queue.'), ] LOG = logging.getLogger(__name__) def _get_queue_arguments(rabbit_ha_queues, rabbit_queue_ttl, rabbit_quorum_queue, rabbit_quorum_queue_config, rabbit_stream_fanout): """Construct the arguments for declaring a queue. If the rabbit_ha_queues option is set, we try to declare a mirrored queue as described here: http://www.rabbitmq.com/ha.html Setting x-ha-policy to all means that the queue will be mirrored to all nodes in the cluster. In RabbitMQ 3.0, queue mirroring is no longer controlled by the x-ha-policy argument when declaring a queue. If you just want to make sure that all queues (except those with auto-generated names) are mirrored across all nodes, run: rabbitmqctl set_policy HA '^(?!amq\\.).*' '{"ha-mode": "all"}' If the rabbit_queue_ttl option is > 0, then the queue is declared with the "Queue TTL" value as described here: https://www.rabbitmq.com/ttl.html Setting a queue TTL causes the queue to be automatically deleted if it is unused for the TTL duration. This is a helpful safeguard to prevent queues with zero consumers from growing without bound. If the rabbit_quorum_queue option is set, we try to declare a mirrored queue as described here: https://www.rabbitmq.com/quorum-queues.html Setting x-queue-type to quorum means that replicated FIFO queue based on the Raft consensus algorithm will be used. It is available as of RabbitMQ 3.8.0. If set this option will conflict with the HA queues (``rabbit_ha_queues``) aka mirrored queues, in other words HA queues should be disabled. rabbit_quorum_queue_config: Quorum queues provides three options to handle message poisoning and limit the resources the quorum queue can use x-delivery-limit number of times the queue will try to deliver a message before it decide to discard it x-max-in-memory-length, x-max-in-memory-bytes control the size of memory used by quorum queue If the rabbit_stream_fanout option is set, fanout queues are going to use stream instead of quorum queues. See here: https://www.rabbitmq.com/streams.html """ args = {} if rabbit_quorum_queue and rabbit_ha_queues: raise RuntimeError('Configuration Error: rabbit_quorum_queue ' 'and rabbit_ha_queues both enabled, queue ' 'type is quorum or HA (mirrored) not both') if rabbit_ha_queues: args['x-ha-policy'] = 'all' if rabbit_quorum_queue: args['x-queue-type'] = 'quorum' if rabbit_quorum_queue_config.delivery_limit: args['x-delivery-limit'] = \ rabbit_quorum_queue_config.delivery_limit if rabbit_quorum_queue_config.max_memory_length: args['x-max-in-memory-length'] = \ rabbit_quorum_queue_config.max_memory_length if rabbit_quorum_queue_config.max_memory_bytes: args['x-max-in-memory-bytes'] = \ rabbit_quorum_queue_config.max_memory_bytes if rabbit_queue_ttl > 0: args['x-expires'] = rabbit_queue_ttl * 1000 if rabbit_stream_fanout: args = {'x-queue-type': 'stream'} if rabbit_queue_ttl > 0: # max-age is a string args['x-max-age'] = f"{rabbit_queue_ttl}s" return args class RabbitMessage(dict): def __init__(self, raw_message): super(RabbitMessage, self).__init__( rpc_common.deserialize_msg(raw_message.payload)) LOG.trace('RabbitMessage.Init: message %s', self) self._raw_message = raw_message def acknowledge(self): LOG.trace('RabbitMessage.acknowledge: message %s', self) self._raw_message.ack() def requeue(self): LOG.trace('RabbitMessage.requeue: message %s', self) self._raw_message.requeue() class Consumer(object): """Consumer class.""" def __init__(self, exchange_name, queue_name, routing_key, type, durable, exchange_auto_delete, queue_auto_delete, callback, nowait=False, rabbit_ha_queues=None, rabbit_queue_ttl=0, enable_cancel_on_failover=False, rabbit_quorum_queue=False, rabbit_quorum_queue_config=QuorumMemConfig(0, 0, 0), rabbit_stream_fanout=False): """Init the Consumer class with the exchange_name, routing_key, type, durable auto_delete """ self.queue_name = queue_name self.exchange_name = exchange_name self.routing_key = routing_key self.exchange_auto_delete = exchange_auto_delete self.queue_auto_delete = queue_auto_delete self.durable = durable self.callback = callback self.type = type self.nowait = nowait rabbit_quorum_queue_config = rabbit_quorum_queue_config or {} self.queue_arguments = _get_queue_arguments( rabbit_ha_queues, rabbit_queue_ttl, rabbit_quorum_queue, rabbit_quorum_queue_config, rabbit_stream_fanout) self.queue = None self._declared_on = None self.exchange = kombu.entity.Exchange( name=exchange_name, type=self.type, durable=self.durable, auto_delete=self.exchange_auto_delete) self.enable_cancel_on_failover = enable_cancel_on_failover self.rabbit_stream_fanout = rabbit_stream_fanout self.next_stream_offset = "last" def _declare_fallback(self, err, conn, consumer_arguments): """Fallback by declaring a non durable queue. When a control exchange is shared between services it is possible that some service created first a non durable control exchange and then after that an other service can try to create the same control exchange but as a durable control exchange. In this case RabbitMQ will raise an exception (PreconditionFailed), and then it will stop our execution and our service will fail entirly. In this case we want to fallback by creating a non durable queue to match the default config. """ if "PRECONDITION_FAILED - inequivalent arg 'durable'" in str(err): LOG.info( "[%s] Retrying to declare the exchange (%s) as " "non durable", conn.connection_id, self.exchange_name) self.exchange = kombu.entity.Exchange( name=self.exchange_name, type=self.type, durable=False, auto_delete=self.queue_auto_delete) self.queue = kombu.entity.Queue( name=self.queue_name, channel=conn.channel, exchange=self.exchange, durable=False, auto_delete=self.queue_auto_delete, routing_key=self.routing_key, queue_arguments=self.queue_arguments, consumer_arguments=consumer_arguments ) self.queue.declare() def reset_stream_offset(self): if not self.rabbit_stream_fanout: return LOG.warn("Reset consumer for queue %s next offset was at %s.", self.queue_name, self.next_stream_offset) self.next_stream_offset = "last" def declare(self, conn): """Re-declare the queue after a rabbit (re)connect.""" consumer_arguments = None if self.enable_cancel_on_failover: consumer_arguments = { "x-cancel-on-ha-failover": True} if self.rabbit_stream_fanout: consumer_arguments = { "x-stream-offset": self.next_stream_offset} self.queue = kombu.entity.Queue( name=self.queue_name, channel=conn.channel, exchange=self.exchange, durable=self.durable, auto_delete=self.queue_auto_delete, routing_key=self.routing_key, queue_arguments=self.queue_arguments, consumer_arguments=consumer_arguments ) try: if self.rabbit_stream_fanout: LOG.info('[%s] Stream Queue.declare: %s after offset %s', conn.connection_id, self.queue_name, self.next_stream_offset) else: LOG.debug('[%s] Queue.declare: %s', conn.connection_id, self.queue_name) try: self.queue.declare() except amqp_ex.PreconditionFailed as err: # NOTE(hberaud): This kind of exception may be triggered # when a control exchange is shared between services and # when services try to create it with configs that differ # from each others. RabbitMQ will reject the services # that try to create it with a configuration that differ # from the one used first. LOG.warning('[%s] Queue %s could not be declared probably ' 'because of conflicting configurations: %s', conn.connection_id, self.queue_name, err) self._declare_fallback(err, conn, consumer_arguments) except amqp_ex.NotFound as ex: # NOTE(viktor.krivak): This exception is raised when # non-durable and non-ha queue is hosted on node that # is currently unresponsive or down. Thus, it is # not possible to redeclare the queue since its status # is unknown, and it could disappear in # a few moments. However, the queue could be explicitly # deleted and the redeclaration will then succeed. # Queue will be then scheduled on any of the # running/responsive nodes. # This fixes bug: # https://bugs.launchpad.net/oslo.messaging/+bug/2068630 LOG.warning("Queue %s is stuck on unresponsive node. " "Trying to delete the queue and redeclare it " "again, Error info: %s", self.queue_name, ex) try: self.queue.delete() except Exception as in_ex: LOG.warning("During cleanup of stuck queue deletion " "another exception occurred: %s. Ignoring...", in_ex) self.queue.declare() except conn.connection.channel_errors as exc: # NOTE(jrosenboom): This exception may be triggered by a race # condition. Simply retrying will solve the error most of the time # and should work well enough as a workaround until the race # condition itself can be fixed. # See https://bugs.launchpad.net/neutron/+bug/1318721 for details. if exc.code == 404: self.queue.declare() else: raise except kombu.exceptions.ConnectionError as exc: # NOTE(gsantomaggio): This exception happens when the # connection is established,but it fails to create the queue. # Add some delay to avoid too many requests to the server. # See: https://bugs.launchpad.net/oslo.messaging/+bug/1822778 # for details. if exc.code == 541: interval = 2 info = {'sleep_time': interval, 'queue': self.queue_name, 'err_str': exc } LOG.error('Internal amqp error (541) ' 'during queue declare,' 'retrying in %(sleep_time)s seconds. ' 'Queue: [%(queue)s], ' 'error message: [%(err_str)s]', info) time.sleep(interval) if self.queue_arguments.get('x-queue-type') == 'quorum': # Before re-declare queue, try to delete it # This is helping with issue #2028384 # NOTE(amorin) we need to make sure the connection is # established again, because when an error occur, the # connection is closed. conn.ensure_connection() self.queue.delete() self.queue.declare() else: raise self._declared_on = conn.channel def consume(self, conn, tag): """Actually declare the consumer on the amqp channel. This will start the flow of messages from the queue. Using the Connection.consume() will process the messages, calling the appropriate callback. """ # Ensure we are on the correct channel before consuming if conn.channel != self._declared_on: self.declare(conn) try: self.queue.consume(callback=self._callback, consumer_tag=str(tag), nowait=self.nowait) except conn.connection.channel_errors as exc: # We retries once because of some races that we can # recover before informing the deployer # https://bugs.launchpad.net/oslo.messaging/+bug/1581148 # https://bugs.launchpad.net/oslo.messaging/+bug/1609766 # https://bugs.launchpad.net/neutron/+bug/1318721 # 406 error code relates to messages that are doubled ack'd # At any channel error, the RabbitMQ closes # the channel, but the amqp-lib quietly re-open # it. So, we must reset all tags and declare # all consumers again. conn._new_tags = set(conn._consumers.values()) if exc.code == 404 or (exc.code == 406 and exc.method_name == 'Basic.ack'): self.declare(conn) self.queue.consume(callback=self._callback, consumer_tag=str(tag), nowait=self.nowait) else: raise except amqp_ex.InternalError as exc: if self.queue_arguments.get('x-queue-type') == 'quorum': # Before re-consume queue, try to delete it # This is helping with issue #2028384 if exc.code == 541: LOG.warning('Queue %s seems broken, will try delete it ' 'before starting over.', self.queue.name) # NOTE(amorin) we need to make sure the connection is # established again, because when an error occur, the # connection is closed. conn.ensure_connection() self.queue.delete() self.declare(conn) self.queue.consume(callback=self._callback, consumer_tag=str(tag), nowait=self.nowait) else: raise def cancel(self, tag): LOG.trace('ConsumerBase.cancel: canceling %s', tag) self.queue.cancel(str(tag)) def _callback(self, message): """Call callback with deserialized message. Messages that are processed and ack'ed. """ offset = message.headers.get("x-stream-offset") if offset is not None: LOG.debug("Stream for %s current offset: %s", self.queue_name, offset) self.next_stream_offset = offset + 1 m2p = getattr(self.queue.channel, 'message_to_python', None) if m2p: message = m2p(message) try: self.callback(RabbitMessage(message)) except Exception: LOG.exception("Failed to process message ... skipping it.") message.reject() class DummyConnectionLock(_utils.DummyLock): def heartbeat_acquire(self): pass class ConnectionLock(DummyConnectionLock): """Lock object to protect access to the kombu connection This is a lock object to protect access to the kombu connection object between the heartbeat thread and the driver thread. They are two way to acquire this lock: * lock.acquire() * lock.heartbeat_acquire() In both case lock.release(), release the lock. The goal is that the heartbeat thread always have the priority for acquiring the lock. This ensures we have no heartbeat starvation when the driver sends a lot of messages. So when lock.heartbeat_acquire() is called next time the lock is released(), the caller unconditionally acquires the lock, even someone else have asked for the lock before it. """ def __init__(self): self._workers_waiting = 0 self._heartbeat_waiting = False self._lock_acquired = None self._monitor = threading.Lock() self._workers_locks = threading.Condition(self._monitor) self._heartbeat_lock = threading.Condition(self._monitor) self._get_thread_id = eventletutils.fetch_current_thread_functor() def acquire(self): with self._monitor: while self._lock_acquired: self._workers_waiting += 1 self._workers_locks.wait() self._workers_waiting -= 1 self._lock_acquired = self._get_thread_id() def heartbeat_acquire(self): # NOTE(sileht): must be called only one time with self._monitor: while self._lock_acquired is not None: self._heartbeat_waiting = True self._heartbeat_lock.wait() self._heartbeat_waiting = False self._lock_acquired = self._get_thread_id() def release(self): with self._monitor: if self._lock_acquired is None: raise RuntimeError("We can't release a not acquired lock") thread_id = self._get_thread_id() if self._lock_acquired != thread_id: raise RuntimeError("We can't release lock acquired by another " "thread/greenthread; %s vs %s" % (self._lock_acquired, thread_id)) self._lock_acquired = None if self._heartbeat_waiting: self._heartbeat_lock.notify() elif self._workers_waiting > 0: self._workers_locks.notify() @contextlib.contextmanager def for_heartbeat(self): self.heartbeat_acquire() try: yield finally: self.release() class Connection(object): """Connection object.""" def __init__(self, conf, url, purpose, retry=None): # NOTE(viktors): Parse config options driver_conf = conf.oslo_messaging_rabbit self.interval_start = driver_conf.rabbit_retry_interval self.interval_stepping = driver_conf.rabbit_retry_backoff self.interval_max = driver_conf.rabbit_interval_max self.max_retries = retry self.login_method = driver_conf.rabbit_login_method self.rabbit_ha_queues = driver_conf.rabbit_ha_queues self.rabbit_quorum_queue = driver_conf.rabbit_quorum_queue self.rabbit_quorum_queue_config = self._get_quorum_configurations( driver_conf) self.rabbit_transient_quorum_queue = \ driver_conf.rabbit_transient_quorum_queue self.rabbit_stream_fanout = driver_conf.rabbit_stream_fanout self.rabbit_transient_queues_ttl = \ driver_conf.rabbit_transient_queues_ttl self.rabbit_qos_prefetch_count = driver_conf.rabbit_qos_prefetch_count self.heartbeat_timeout_threshold = \ driver_conf.heartbeat_timeout_threshold self.heartbeat_rate = driver_conf.heartbeat_rate self.kombu_reconnect_delay = driver_conf.kombu_reconnect_delay self.amqp_durable_queues = driver_conf.amqp_durable_queues self.amqp_auto_delete = driver_conf.amqp_auto_delete self.ssl = driver_conf.ssl self.kombu_missing_consumer_retry_timeout = \ driver_conf.kombu_missing_consumer_retry_timeout self.kombu_failover_strategy = driver_conf.kombu_failover_strategy self.kombu_compression = driver_conf.kombu_compression self.heartbeat_in_pthread = driver_conf.heartbeat_in_pthread self.ssl_enforce_fips_mode = driver_conf.ssl_enforce_fips_mode self.enable_cancel_on_failover = driver_conf.enable_cancel_on_failover self.use_queue_manager = driver_conf.use_queue_manager if self.rabbit_stream_fanout and self.rabbit_qos_prefetch_count <= 0: raise RuntimeError('Configuration Error: rabbit_stream_fanout ' 'need rabbit_qos_prefetch_count to be set to ' 'a value greater than 0.') if (self.rabbit_stream_fanout and not self.rabbit_transient_quorum_queue): raise RuntimeError('Configuration Error: rabbit_stream_fanout ' 'need rabbit_transient_quorum_queue to be set ' 'to true.') if self.heartbeat_in_pthread: # NOTE(hberaud): Experimental: threading module is in use to run # the rabbitmq health check heartbeat. in some situation like # with nova-api, nova need green threads to run the cells # mechanismes in an async mode, so they used eventlet and # greenlet to monkey patch the python stdlib and get green threads. # The issue here is that nova-api run under the apache MPM prefork # module and mod_wsgi. The apache prefork module doesn't support # epoll and recent kernel features, and evenlet is built over epoll # and libevent, so when we run the rabbitmq heartbeat we inherit # from the execution model of the parent process (nova-api), and # in this case we will run the heartbeat through a green thread. # We want to allow users to choose between pthread and # green threads if needed in some specific situations. # This experimental feature allow user to use pthread in an env # that doesn't support eventlet without forcing the parent process # to stop to use eventlet if they need monkey patching for some # specific reasons. # If users want to use pthread we need to make sure that we # will use the *native* threading module for # initialize the heartbeat thread. # Here we override globaly the previously imported # threading module with the native python threading module # if it was already monkey patched by eventlet/greenlet. global threading threading = _utils.stdlib_threading amqpdriver.threading = _utils.stdlib_threading amqpdriver.queue = _utils.stdlib_queue self.direct_mandatory_flag = driver_conf.direct_mandatory_flag if self.ssl: self.ssl_version = driver_conf.ssl_version self.ssl_key_file = driver_conf.ssl_key_file self.ssl_cert_file = driver_conf.ssl_cert_file self.ssl_ca_file = driver_conf.ssl_ca_file if self.ssl_enforce_fips_mode: if hasattr(ssl, 'FIPS_mode'): LOG.info("Enforcing the use of the OpenSSL FIPS mode") ssl.FIPS_mode_set(1) else: raise exceptions.ConfigurationError( "OpenSSL FIPS mode is not supported by your Python " "version. You must either change the Python " "executable used to a version with FIPS mode " "support or disable FIPS mode by setting the " "'[oslo_messaging_rabbit] ssl_enforce_fips_mode' " "configuration option to 'False'.") self._url = '' if url.hosts: if url.transport.startswith('kombu+'): LOG.warning('Selecting the kombu transport through the ' 'transport url (%s) is a experimental feature ' 'and this is not yet supported.', url.transport) if len(url.hosts) > 1: random.shuffle(url.hosts) transformed_urls = [ self._transform_transport_url(url, host) for host in url.hosts] self._url = ';'.join(transformed_urls) elif url.transport.startswith('kombu+'): # NOTE(sileht): url have a + but no hosts # (like kombu+memory:///), pass it to kombu as-is transport = url.transport.replace('kombu+', '') self._url = "%s://" % transport if url.virtual_host: self._url += url.virtual_host elif not url.hosts: host = oslo_messaging.transport.TransportHost('') # NOTE(moguimar): default_password in this function's context is # a fallback option, not a hardcoded password. # username and password are read from host. self._url = self._transform_transport_url( # nosec url, host, default_username='guest', default_password='guest', default_hostname='localhost') self._initial_pid = os.getpid() self._consumers = {} self._producer = None self._new_tags = set() self._active_tags = {} self._tags = itertools.count(1) # Set of exchanges and queues declared on the channel to avoid # unnecessary redeclaration. This set is resetted each time # the connection is resetted in Connection._set_current_channel self._declared_exchanges = set() self._declared_queues = set() self._consume_loop_stopped = False self.channel = None self.purpose = purpose # NOTE(sileht): if purpose is PURPOSE_LISTEN # we don't need the lock because we don't # have a heartbeat thread if purpose == rpc_common.PURPOSE_SEND: self._connection_lock = ConnectionLock() else: self._connection_lock = DummyConnectionLock() self.connection_id = str(uuid.uuid4()) self.name = "%s:%d:%s" % (os.path.basename(sys.argv[0]), os.getpid(), self.connection_id) self.connection = kombu.connection.Connection( self._url, ssl=self._fetch_ssl_params(), login_method=self.login_method, heartbeat=self.heartbeat_timeout_threshold, failover_strategy=self.kombu_failover_strategy, transport_options={ 'confirm_publish': True, 'client_properties': { 'capabilities': { 'authentication_failure_close': True, 'connection.blocked': True, 'consumer_cancel_notify': True }, 'connection_name': self.name}, 'on_blocked': self._on_connection_blocked, 'on_unblocked': self._on_connection_unblocked, }, ) LOG.debug('[%(connection_id)s] Connecting to AMQP server on' ' %(hostname)s:%(port)s', self._get_connection_info()) # NOTE(sileht): kombu recommend to run heartbeat_check every # seconds, but we use a lock around the kombu connection # so, to not lock to much this lock to most of the time do nothing # expected waiting the events drain, we start heartbeat_check and # retrieve the server heartbeat packet only two times more than # the minimum required for the heartbeat works # (heartbeat_timeout/heartbeat_rate/2.0, default kombu # heartbeat_rate is 2) self._heartbeat_wait_timeout = ( float(self.heartbeat_timeout_threshold) / float(self.heartbeat_rate) / 2.0) self._heartbeat_support_log_emitted = False # NOTE(sileht): just ensure the connection is setuped at startup with self._connection_lock: self.ensure_connection() # NOTE(sileht): if purpose is PURPOSE_LISTEN # the consume code does the heartbeat stuff # we don't need a thread self._heartbeat_thread = None if purpose == rpc_common.PURPOSE_SEND: self._heartbeat_start() LOG.debug('[%(connection_id)s] Connected to AMQP server on ' '%(hostname)s:%(port)s via [%(transport)s] client with' ' port %(client_port)s.', self._get_connection_info()) # NOTE(sileht): value chosen according the best practice from kombu # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop # For heartbeat, we can set a bigger timeout, and check we receive the # heartbeat packets regulary if self._heartbeat_supported_and_enabled(): self._poll_timeout = self._heartbeat_wait_timeout else: self._poll_timeout = 1 if self._url.startswith('memory://'): # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 # Fixup logging self.connection.hostname = "memory_driver" self.connection.port = 1234 self._poll_timeout = 0.05 if self.use_queue_manager: self._q_manager = amqpdriver.QManager( hostname=driver_conf.hostname, processname=driver_conf.processname) else: self._q_manager = None # FIXME(markmc): use oslo sslutils when it is available as a library _SSL_PROTOCOLS = { "tlsv1": ssl.PROTOCOL_TLSv1, "sslv23": ssl.PROTOCOL_SSLv23 } _OPTIONAL_PROTOCOLS = { 'sslv2': 'PROTOCOL_SSLv2', 'sslv3': 'PROTOCOL_SSLv3', 'tlsv1_1': 'PROTOCOL_TLSv1_1', 'tlsv1_2': 'PROTOCOL_TLSv1_2', } for protocol in _OPTIONAL_PROTOCOLS: try: _SSL_PROTOCOLS[protocol] = getattr(ssl, _OPTIONAL_PROTOCOLS[protocol]) except AttributeError: pass @property def durable(self): # Quorum queues are durable by default, durable option should # be enabled by default with quorum queues return self.amqp_durable_queues or self.rabbit_quorum_queue @classmethod def validate_ssl_version(cls, version): key = version.lower() try: return cls._SSL_PROTOCOLS[key] except KeyError: raise RuntimeError("Invalid SSL version : %s" % version) def _get_quorum_configurations(self, driver_conf): """Get the quorum queue configurations""" delivery_limit = driver_conf.rabbit_quorum_delivery_limit max_memory_length = driver_conf.rabbit_quorum_max_memory_length max_memory_bytes = driver_conf.rabbit_quorum_max_memory_bytes return QuorumMemConfig(delivery_limit, max_memory_length, max_memory_bytes) # NOTE(moguimar): default_password in this function's context is just # a fallback option, not a hardcoded password. def _transform_transport_url(self, url, host, default_username='', # nosec default_password='', default_hostname=''): transport = url.transport.replace('kombu+', '') transport = transport.replace('rabbit', 'amqp') return '%s://%s:%s@%s:%s/%s' % ( transport, parse.quote(host.username or default_username), parse.quote(host.password or default_password), self._parse_url_hostname(host.hostname) or default_hostname, str(host.port or 5672), url.virtual_host or '') def _parse_url_hostname(self, hostname): """Handles hostname returned from urlparse and checks whether it's ipaddress. If it's ipaddress it ensures that it has brackets for IPv6. """ return '[%s]' % hostname if ':' in hostname else hostname def _fetch_ssl_params(self): """Handles fetching what ssl params should be used for the connection (if any). """ if self.ssl: ssl_params = dict() # http://docs.python.org/library/ssl.html - ssl.wrap_socket if self.ssl_version: ssl_params['ssl_version'] = self.validate_ssl_version( self.ssl_version) if self.ssl_key_file: ssl_params['keyfile'] = self.ssl_key_file if self.ssl_cert_file: ssl_params['certfile'] = self.ssl_cert_file if self.ssl_ca_file: ssl_params['ca_certs'] = self.ssl_ca_file # We might want to allow variations in the # future with this? ssl_params['cert_reqs'] = ssl.CERT_REQUIRED return ssl_params or True return False @staticmethod def _on_connection_blocked(reason): LOG.error("The broker has blocked the connection: %s", reason) @staticmethod def _on_connection_unblocked(): LOG.info("The broker has unblocked the connection") def ensure_connection(self): # NOTE(sileht): we reset the channel and ensure # the kombu underlying connection works def on_error(exc, interval): LOG.error("Connection failed: %s (retrying in %s seconds)", str(exc), interval) self._set_current_channel(None) self.connection.ensure_connection( errback=on_error, max_retries=self.max_retries, interval_start=self.interval_start or 1, interval_step=self.interval_stepping, interval_max=self.interval_max, ) self._set_current_channel(self.connection.channel()) self.set_transport_socket_timeout() def ensure(self, method, retry=None, recoverable_error_callback=None, error_callback=None, timeout_is_error=True): """Will retry up to retry number of times. retry = None or -1 means to retry forever retry = 0 means no retry retry = N means N retries NOTE(sileht): Must be called within the connection lock """ current_pid = os.getpid() if self._initial_pid != current_pid: LOG.warning("Process forked after connection established! " "This can result in unpredictable behavior. " "See: https://docs.openstack.org/oslo.messaging/" "latest/reference/transport.html") self._initial_pid = current_pid if retry is None or retry < 0: retry = float('inf') def on_error(exc, interval): LOG.debug("[%s] Received recoverable error from kombu:" % self.connection_id, exc_info=True) recoverable_error_callback and recoverable_error_callback(exc) interval = (self.kombu_reconnect_delay + interval if self.kombu_reconnect_delay > 0 else interval) info = {'err_str': exc, 'sleep_time': interval} info.update(self._get_connection_info(conn_error=True)) if 'Basic.cancel' in str(exc): # This branch allows for consumer offset reset # in the unlikely case consumers are cancelled. This may # happen, for example, when we delete the stream queue. # We need to start consuming from "last" because the stream # offset maybe reset. LOG.warn('[%s] Basic.cancel received. ' 'Resetting consumers offsets to last.', self.connection_id) for consumer in self._consumers: consumer.reset_stream_offset() if 'Socket closed' in str(exc): LOG.error('[%(connection_id)s] AMQP server' ' %(hostname)s:%(port)s closed' ' the connection. Check login credentials:' ' %(err_str)s', info) else: LOG.error('[%(connection_id)s] AMQP server on ' '%(hostname)s:%(port)s is unreachable: ' '%(err_str)s. Trying again in ' '%(sleep_time)d seconds.', info) # XXX(nic): when reconnecting to a RabbitMQ cluster # with mirrored queues in use, the attempt to release the # connection can hang "indefinitely" somewhere deep down # in Kombu. Blocking the thread for a bit prior to # release seems to kludge around the problem where it is # otherwise reproduceable. # TODO(sileht): Check if this is useful since we # use kombu for HA connection, the interval_step # should sufficient, because the underlying kombu transport # connection object freed. if self.kombu_reconnect_delay > 0: LOG.trace('Delaying reconnect for %1.1f seconds ...', self.kombu_reconnect_delay) time.sleep(self.kombu_reconnect_delay) def on_reconnection(new_channel): """Callback invoked when the kombu reconnects and creates a new channel, we use it the reconfigure our consumers. """ self._set_current_channel(new_channel) self.set_transport_socket_timeout() LOG.info('[%(connection_id)s] Reconnected to AMQP server on ' '%(hostname)s:%(port)s via [%(transport)s] client ' 'with port %(client_port)s.', self._get_connection_info()) def execute_method(channel): self._set_current_channel(channel) method() try: autoretry_method = self.connection.autoretry( execute_method, channel=self.channel, max_retries=retry, errback=on_error, interval_start=self.interval_start or 1, interval_step=self.interval_stepping, interval_max=self.interval_max, on_revive=on_reconnection) ret, channel = autoretry_method() self._set_current_channel(channel) return ret except rpc_amqp.AMQPDestinationNotFound: # NOTE(sileht): we must reraise this without # trigger error_callback raise except exceptions.MessageUndeliverable: # NOTE(gsantomaggio): we must reraise this without # trigger error_callback raise except Exception as exc: error_callback and error_callback(exc) self._set_current_channel(None) # NOTE(sileht): number of retry exceeded and the connection # is still broken info = {'err_str': exc, 'retry': retry} info.update(self.connection.info()) msg = ('Unable to connect to AMQP server on ' '%(hostname)s:%(port)s after %(retry)s ' 'tries: %(err_str)s' % info) LOG.error(msg) raise exceptions.MessageDeliveryFailure(msg) @staticmethod def on_return(exception, exchange, routing_key, message): raise exceptions.MessageUndeliverable(exception, exchange, routing_key, message) def _set_current_channel(self, new_channel): """Change the channel to use. NOTE(sileht): Must be called within the connection lock """ if new_channel == self.channel: return if self.channel is not None: self._declared_queues.clear() self._declared_exchanges.clear() self.connection.maybe_close_channel(self.channel) self.channel = new_channel if new_channel is not None: if self.purpose == rpc_common.PURPOSE_LISTEN: self._set_qos(new_channel) self._producer = kombu.messaging.Producer(new_channel, on_return=self.on_return) for consumer in self._consumers: consumer.declare(self) def _set_qos(self, channel): """Set QoS prefetch count on the channel""" if self.rabbit_qos_prefetch_count > 0: channel.basic_qos(0, self.rabbit_qos_prefetch_count, False) def close(self): """Close/release this connection.""" self._heartbeat_stop() if self.connection: # NOTE(jcosmao) Delete queue should be called only when queue name # is randomized. When using streams, queue is shared between # all consumers, thus deleting fanout queue will force all other # consumers to disconnect/reconnect by throwing # amqp.exceptions.ConsumerCancelled. # When using QManager, queue name is consistent accross agent # restart, so we don't need to delete it either. Deletion must be # handled by expiration policy. if not self.rabbit_stream_fanout and not self.use_queue_manager: for consumer in filter(lambda c: c.type == 'fanout', self._consumers): LOG.debug('[connection close] Deleting fanout ' 'queue: %s ' % consumer.queue.name) consumer.queue.delete() self._set_current_channel(None) self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again.""" with self._connection_lock: try: for consumer, tag in self._consumers.items(): consumer.cancel(tag=tag) except kombu.exceptions.OperationalError: self.ensure_connection() self._consumers.clear() self._active_tags.clear() self._new_tags.clear() self._tags = itertools.count(1) def _heartbeat_supported_and_enabled(self): if self.heartbeat_timeout_threshold <= 0: return False if self.connection.supports_heartbeats: return True elif not self._heartbeat_support_log_emitted: LOG.warning("Heartbeat support requested but it is not " "supported by the kombu driver or the broker") self._heartbeat_support_log_emitted = True return False def set_transport_socket_timeout(self, timeout=None): # NOTE(sileht): they are some case where the heartbeat check # or the producer.send return only when the system socket # timeout if reach. kombu doesn't allow use to customise this # timeout so for py-amqp we tweak ourself # NOTE(dmitryme): Current approach works with amqp==1.4.9 and # kombu==3.0.33. Once the commit below is released, we should # try to set the socket timeout in the constructor: # https://github.com/celery/py-amqp/pull/64 heartbeat_timeout = self.heartbeat_timeout_threshold if self._heartbeat_supported_and_enabled(): # NOTE(sileht): we are supposed to send heartbeat every # heartbeat_timeout, no need to wait more otherwise will # disconnect us, so raise timeout earlier ourself if timeout is None: timeout = heartbeat_timeout else: timeout = min(heartbeat_timeout, timeout) try: sock = self.channel.connection.sock except AttributeError as e: # Level is set to debug because otherwise we would spam the logs LOG.debug('[%s] Failed to get socket attribute: %s' % (self.connection_id, str(e))) else: sock.settimeout(timeout) # TCP_USER_TIMEOUT is not defined on Windows and Mac OS X if sys.platform != 'win32' and sys.platform != 'darwin': try: timeout = timeout * 1000 if timeout is not None else 0 # NOTE(gdavoian): only integers and strings are allowed # as socket options' values, and TCP_USER_TIMEOUT option # can take only integer values, so we round-up the timeout # to the nearest integer in order to ensure that the # connection is not broken before the expected timeout sock.setsockopt(socket.IPPROTO_TCP, TCP_USER_TIMEOUT, int(math.ceil(timeout))) except socket.error as error: code = error[0] # TCP_USER_TIMEOUT not defined on kernels <2.6.37 if code != errno.ENOPROTOOPT: raise @contextlib.contextmanager def _transport_socket_timeout(self, timeout): self.set_transport_socket_timeout(timeout) yield self.set_transport_socket_timeout() def _heartbeat_check(self): # NOTE(sileht): we are supposed to send at least one heartbeat # every heartbeat_timeout_threshold, so no need to way more self.connection.heartbeat_check(rate=self.heartbeat_rate) def _heartbeat_start(self): if self._heartbeat_supported_and_enabled(): self._heartbeat_exit_event = threading.Event() self._heartbeat_thread = threading.Thread( target=self._heartbeat_thread_job, name="Rabbit-heartbeat") self._heartbeat_thread.daemon = True self._heartbeat_thread.start() else: self._heartbeat_thread = None def _heartbeat_stop(self): if self._heartbeat_thread is not None: self._heartbeat_exit_event.set() self._heartbeat_thread.join() self._heartbeat_thread = None def _heartbeat_thread_job(self): """Thread that maintains inactive connections """ while not self._heartbeat_exit_event.is_set(): with self._connection_lock.for_heartbeat(): try: try: self._heartbeat_check() # NOTE(sileht): We need to drain event to receive # heartbeat from the broker but don't hold the # connection too much times. In amqpdriver a connection # is used exclusively for read or for write, so we have # to do this for connection used for write drain_events # already do that for other connection try: self.connection.drain_events(timeout=0.001) except socket.timeout: pass # NOTE(hberaud): In a clustered rabbitmq when # a node disappears, we get a ConnectionRefusedError # because the socket get disconnected. # The socket access yields a OSError because the heartbeat # tries to reach an unreachable host (No route to host). # Catch these exceptions to ensure that we call # ensure_connection for switching the # connection destination. except (socket.timeout, ConnectionRefusedError, OSError, kombu.exceptions.OperationalError, amqp_ex.ConnectionForced) as exc: LOG.info("A recoverable connection/channel error " "occurred, trying to reconnect: %s", exc) self.ensure_connection() except Exception: LOG.warning("Unexpected error during heartbeat " "thread processing, retrying...") LOG.debug('Exception', exc_info=True) self._heartbeat_exit_event.wait( timeout=self._heartbeat_wait_timeout) self._heartbeat_exit_event.clear() def declare_consumer(self, consumer): """Create a Consumer using the class that was passed in and add it to our list of consumers """ def _connect_error(exc): log_info = {'topic': consumer.routing_key, 'err_str': exc} LOG.error("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s", log_info) def _declare_consumer(): consumer.declare(self) tag = self._active_tags.get(consumer.queue_name) if tag is None: tag = next(self._tags) self._active_tags[consumer.queue_name] = tag self._new_tags.add(tag) self._consumers[consumer] = tag return consumer with self._connection_lock: return self.ensure(_declare_consumer, error_callback=_connect_error) def consume(self, timeout=None): """Consume from all queues/consumers.""" timer = rpc_common.DecayingTimer(duration=timeout) timer.start() def _raise_timeout(): raise rpc_common.Timeout() def _recoverable_error_callback(exc): if not isinstance(exc, rpc_common.Timeout): self._new_tags = set(self._consumers.values()) timer.check_return(_raise_timeout) def _error_callback(exc): _recoverable_error_callback(exc) LOG.error('Failed to consume message from queue: %s', exc) def _consume(): # NOTE(sileht): in case the acknowledgment or requeue of a # message fail, the kombu transport can be disconnected # In this case, we must redeclare our consumers, so raise # a recoverable error to trigger the reconnection code. if not self.connection.connected: raise self.connection.recoverable_connection_errors[0] while self._new_tags: for consumer, tag in self._consumers.items(): if tag in self._new_tags: consumer.consume(self, tag=tag) self._new_tags.remove(tag) poll_timeout = (self._poll_timeout if timeout is None else min(timeout, self._poll_timeout)) while True: if self._consume_loop_stopped: return if self._heartbeat_supported_and_enabled(): self._heartbeat_check() try: self.connection.drain_events(timeout=poll_timeout) return except socket.timeout: poll_timeout = timer.check_return( _raise_timeout, maximum=self._poll_timeout) except self.connection.channel_errors as exc: if exc.code == 406 and exc.method_name == 'Basic.ack': # NOTE(gordc): occasionally multiple workers will grab # same message and acknowledge it. if it happens, meh. raise self.connection.recoverable_channel_errors[0] raise with self._connection_lock: self.ensure(_consume, recoverable_error_callback=_recoverable_error_callback, error_callback=_error_callback) def stop_consuming(self): self._consume_loop_stopped = True def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ consumer = Consumer( exchange_name='', # using default exchange queue_name=topic, routing_key='', type='direct', durable=self.rabbit_transient_quorum_queue, exchange_auto_delete=False, queue_auto_delete=False, callback=callback, rabbit_ha_queues=self.rabbit_ha_queues, rabbit_queue_ttl=self.rabbit_transient_queues_ttl, enable_cancel_on_failover=self.enable_cancel_on_failover, rabbit_quorum_queue=self.rabbit_transient_quorum_queue, rabbit_quorum_queue_config=self.rabbit_quorum_queue_config) self.declare_consumer(consumer) def declare_topic_consumer(self, exchange_name, topic, callback=None, queue_name=None): """Create a 'topic' consumer.""" consumer = Consumer( exchange_name=exchange_name, queue_name=queue_name or topic, routing_key=topic, type='topic', durable=self.durable, exchange_auto_delete=self.amqp_auto_delete, queue_auto_delete=self.amqp_auto_delete, callback=callback, rabbit_ha_queues=self.rabbit_ha_queues, enable_cancel_on_failover=self.enable_cancel_on_failover, rabbit_quorum_queue=self.rabbit_quorum_queue, rabbit_quorum_queue_config=self.rabbit_quorum_queue_config) self.declare_consumer(consumer) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer.""" exchange_name = '%s_fanout' % topic if self.rabbit_stream_fanout: queue_name = '%s_fanout' % topic else: if self._q_manager: unique = self._q_manager.get() else: unique = uuid.uuid4().hex queue_name = '%s_fanout_%s' % (topic, unique) LOG.debug('Creating fanout queue: %s', queue_name) is_durable = (self.rabbit_transient_quorum_queue or self.rabbit_stream_fanout) consumer = Consumer( exchange_name=exchange_name, queue_name=queue_name, routing_key=topic, type='fanout', durable=is_durable, exchange_auto_delete=True, queue_auto_delete=False, callback=callback, rabbit_ha_queues=self.rabbit_ha_queues, rabbit_queue_ttl=self.rabbit_transient_queues_ttl, enable_cancel_on_failover=self.enable_cancel_on_failover, rabbit_quorum_queue=self.rabbit_transient_quorum_queue, rabbit_quorum_queue_config=self.rabbit_quorum_queue_config, rabbit_stream_fanout=self.rabbit_stream_fanout) self.declare_consumer(consumer) def _ensure_publishing(self, method, exchange, msg, routing_key=None, timeout=None, retry=None, transport_options=None): """Send to a publisher based on the publisher class.""" def _error_callback(exc): log_info = {'topic': exchange.name, 'err_str': exc} LOG.error("Failed to publish message to topic " "'%(topic)s': %(err_str)s", log_info) LOG.debug('Exception', exc_info=exc) method = functools.partial(method, exchange, msg, routing_key, timeout, transport_options) with self._connection_lock: self.ensure(method, retry=retry, error_callback=_error_callback) def _get_connection_info(self, conn_error=False): # Bug #1745166: set 'conn_error' true if this is being called when the # connection is in a known error state. Otherwise attempting to access # the connection's socket while it is in an error state will cause # py-amqp to attempt reconnecting. ci = self.connection.info() info = dict([(k, ci.get(k)) for k in ['hostname', 'port', 'transport']]) client_port = None if (not conn_error and self.channel and hasattr(self.channel.connection, 'sock') and self.channel.connection.sock): client_port = self.channel.connection.sock.getsockname()[1] info.update({'client_port': client_port, 'connection_id': self.connection_id}) return info def _publish(self, exchange, msg, routing_key=None, timeout=None, transport_options=None): """Publish a message.""" if not (exchange.passive or exchange.name in self._declared_exchanges): try: exchange(self.channel).declare() except amqp_ex.PreconditionFailed as err: # NOTE(hberaud): This kind of exception may be triggered # when a control exchange is shared between services and # when services try to create it with configs that differ # from each others. RabbitMQ will reject the services # that try to create it with a configuration that differ # from the one used first. if "PRECONDITION_FAILED - inequivalent arg 'durable'" \ in str(err): LOG.warning("Force creating a non durable exchange.") exchange.durable = False exchange(self.channel).declare() self._declared_exchanges.add(exchange.name) log_info = {'msg': msg, 'who': exchange or 'default', 'key': routing_key, 'transport_options': str(transport_options)} LOG.trace('Connection._publish: sending message %(msg)s to' ' %(who)s with routing key %(key)s', log_info) # NOTE(sileht): no need to wait more, caller expects # a answer before timeout is reached with self._transport_socket_timeout(timeout): self._producer.publish( msg, mandatory=transport_options.at_least_once if transport_options else False, exchange=exchange, routing_key=routing_key, expiration=timeout, compression=self.kombu_compression) def _publish_and_creates_default_queue(self, exchange, msg, routing_key=None, timeout=None, transport_options=None): """Publisher that declares a default queue When the exchange is missing instead of silently creates an exchange not binded to a queue, this publisher creates a default queue named with the routing_key This is mainly used to not miss notification in case of nobody consumes them yet. If the future consumer bind the default queue it can retrieve missing messages. _set_current_channel is responsible to cleanup the cache. """ queue_identifier = (exchange.name, routing_key) # NOTE(sileht): We only do it once per reconnection # the Connection._set_current_channel() is responsible to clear # this cache if queue_identifier not in self._declared_queues: queue = kombu.entity.Queue( channel=self.channel, exchange=exchange, durable=exchange.durable, auto_delete=exchange.auto_delete, name=routing_key, routing_key=routing_key, queue_arguments=_get_queue_arguments( self.rabbit_ha_queues, 0, self.rabbit_quorum_queue, self.rabbit_quorum_queue_config, False)) log_info = {'key': routing_key, 'exchange': exchange} LOG.trace( 'Connection._publish_and_creates_default_queue: ' 'declare queue %(key)s on %(exchange)s exchange', log_info) queue.declare() self._declared_queues.add(queue_identifier) self._publish(exchange, msg, routing_key=routing_key, timeout=timeout) def _publish_and_raises_on_missing_exchange(self, exchange, msg, routing_key=None, timeout=None, transport_options=None): """Publisher that raises exception if exchange is missing.""" if not exchange.passive: raise RuntimeError("_publish_and_retry_on_missing_exchange() must " "be called with an passive exchange.") try: self._publish(exchange, msg, routing_key=routing_key, timeout=timeout, transport_options=transport_options) return except self.connection.channel_errors as exc: if exc.code == 404: # NOTE(noelbk/sileht): # If rabbit dies, the consumer can be disconnected before the # publisher sends, and if the consumer hasn't declared the # queue, the publisher's will send a message to an exchange # that's not bound to a queue, and the message wll be lost. # So we set passive=True to the publisher exchange and catch # the 404 kombu ChannelError and retry until the exchange # appears raise rpc_amqp.AMQPDestinationNotFound( "exchange %s doesn't exist" % exchange.name) raise def direct_send(self, msg_id, msg): """Send a 'direct' message.""" exchange = kombu.entity.Exchange( name='', # using default exchange type='direct', durable=self.rabbit_transient_quorum_queue, auto_delete=True, passive=True) options = oslo_messaging.TransportOptions( at_least_once=self.direct_mandatory_flag) LOG.debug('Sending direct to %s', msg_id) self._ensure_publishing(self._publish_and_raises_on_missing_exchange, exchange, msg, routing_key=msg_id, transport_options=options) def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None, transport_options=None): """Send a 'topic' message.""" exchange = kombu.entity.Exchange( name=exchange_name, type='topic', durable=self.durable, auto_delete=self.amqp_auto_delete) LOG.debug('Sending topic to %s with routing_key %s', exchange_name, topic) self._ensure_publishing(self._publish, exchange, msg, routing_key=topic, timeout=timeout, retry=retry, transport_options=transport_options) def fanout_send(self, topic, msg, retry=None): """Send a 'fanout' message.""" exchange = kombu.entity.Exchange( name='%s_fanout' % topic, type='fanout', durable=self.rabbit_transient_quorum_queue, auto_delete=True) LOG.debug('Sending fanout to %s_fanout', topic) self._ensure_publishing(self._publish, exchange, msg, retry=retry) def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs): """Send a notify message on a topic.""" exchange = kombu.entity.Exchange( name=exchange_name, type='topic', durable=self.durable, auto_delete=self.amqp_auto_delete) self._ensure_publishing(self._publish_and_creates_default_queue, exchange, msg, routing_key=topic, retry=retry) class RabbitDriver(amqpdriver.AMQPDriverBase): """RabbitMQ Driver The ``rabbit`` driver is the default driver used in OpenStack's integration tests. The driver is aliased as ``kombu`` to support upgrading existing installations with older settings. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): opt_group = cfg.OptGroup(name='oslo_messaging_rabbit', title='RabbitMQ driver options') conf.register_group(opt_group) conf.register_opts(rabbit_opts, group=opt_group) conf.register_opts(rpc_amqp.amqp_opts, group=opt_group) conf.register_opts(base.base_opts, group=opt_group) conf = rpc_common.ConfigOptsProxy(conf, url, opt_group.name) self.missing_destination_retry_timeout = ( conf.oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout) self.prefetch_size = ( conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count) # the pool configuration properties max_size = conf.oslo_messaging_rabbit.rpc_conn_pool_size min_size = conf.oslo_messaging_rabbit.conn_pool_min_size if max_size < min_size: raise RuntimeError( f"rpc_conn_pool_size: {max_size} must be greater than " f"or equal to conn_pool_min_size: {min_size}") ttl = conf.oslo_messaging_rabbit.conn_pool_ttl connection_pool = pool.ConnectionPool( conf, max_size, min_size, ttl, url, Connection) super(RabbitDriver, self).__init__( conf, url, connection_pool, default_exchange, allowed_remote_exmods ) def require_features(self, requeue=True): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1266725 oslo.messaging-14.9.0/oslo_messaging/_drivers/kafka_driver/0000775000175000017500000000000000000000000024036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/kafka_driver/__init__.py0000664000175000017500000000000000000000000026135 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/kafka_driver/kafka_options.py0000664000175000017500000000753400000000000027251 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_messaging._drivers import common KAFKA_OPTS = [ cfg.IntOpt('kafka_max_fetch_bytes', default=1024 * 1024, help='Max fetch bytes of Kafka consumer'), cfg.FloatOpt('kafka_consumer_timeout', default=1.0, help='Default timeout(s) for Kafka consumers'), cfg.IntOpt('pool_size', default=10, deprecated_for_removal=True, deprecated_reason='Driver no longer uses connection pool. ', help='Pool Size for Kafka Consumers'), cfg.IntOpt('conn_pool_min_size', default=2, deprecated_for_removal=True, deprecated_reason='Driver no longer uses connection pool. ', help='The pool size limit for connections expiration policy'), cfg.IntOpt('conn_pool_ttl', default=1200, deprecated_for_removal=True, deprecated_reason='Driver no longer uses connection pool. ', help='The time-to-live in sec of idle connections in the pool'), cfg.StrOpt('consumer_group', default="oslo_messaging_consumer", help='Group id for Kafka consumer. Consumers in one group ' 'will coordinate message consumption'), cfg.FloatOpt('producer_batch_timeout', default=0., help="Upper bound on the delay for KafkaProducer batching " "in seconds"), cfg.IntOpt('producer_batch_size', default=16384, help='Size of batch for the producer async send'), cfg.StrOpt('compression_codec', default='none', choices=['none', 'gzip', 'snappy', 'lz4', 'zstd'], help='The compression codec for all data generated by the ' 'producer. If not set, compression will not be used. ' 'Note that the allowed values of this depend on the kafka ' 'version'), cfg.BoolOpt('enable_auto_commit', default=False, help='Enable asynchronous consumer commits'), cfg.IntOpt('max_poll_records', default=500, help='The maximum number of records returned in a poll call'), cfg.StrOpt('security_protocol', default='PLAINTEXT', choices=('PLAINTEXT', 'SASL_PLAINTEXT', 'SSL', 'SASL_SSL'), help='Protocol used to communicate with brokers'), cfg.StrOpt('sasl_mechanism', default='PLAIN', help='Mechanism when security protocol is SASL'), cfg.StrOpt('ssl_cafile', default='', help='CA certificate PEM file used to verify the server' ' certificate'), cfg.StrOpt('ssl_client_cert_file', default='', help='Client certificate PEM file used for authentication.'), cfg.StrOpt('ssl_client_key_file', default='', help='Client key PEM file used for authentication.'), cfg.StrOpt('ssl_client_key_password', default='', help='Client key password file used for authentication.') ] def register_opts(conf, url): opt_group = cfg.OptGroup(name='oslo_messaging_kafka', title='Kafka driver options') conf.register_group(opt_group) conf.register_opts(KAFKA_OPTS, group=opt_group) return common.ConfigOptsProxy(conf, url, opt_group.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_drivers/pool.py0000664000175000017500000001071600000000000022736 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import threading from oslo_log import log as logging from oslo_utils import timeutils from oslo_messaging._drivers import common LOG = logging.getLogger(__name__) class Pool(object, metaclass=abc.ABCMeta): """A thread-safe object pool. Modelled after the eventlet.pools.Pool interface, but designed to be safe when using native threads without the GIL. Resizing is not supported. """ def __init__(self, max_size=4, min_size=2, ttl=1200, on_expire=None): super(Pool, self).__init__() self._min_size = min_size self._max_size = max_size self._item_ttl = ttl self._current_size = 0 self._cond = threading.Condition() self._items = collections.deque() self._on_expire = on_expire def expire(self): """Remove expired items from left (the oldest item) to right (the newest item). """ with self._cond: while len(self._items) > self._min_size: try: ttl_watch, item = self._items.popleft() if ttl_watch.expired(): self._on_expire and self._on_expire(item) self._current_size -= 1 else: self._items.appendleft((ttl_watch, item)) return except IndexError: break def put(self, item): """Return an item to the pool.""" with self._cond: ttl_watch = timeutils.StopWatch(duration=self._item_ttl) ttl_watch.start() self._items.append((ttl_watch, item)) self._cond.notify() def get(self, retry=None): """Return an item from the pool, when one is available. This may cause the calling thread to block. """ with self._cond: while True: try: ttl_watch, item = self._items.pop() self.expire() return item except IndexError: pass if self._current_size < self._max_size: self._current_size += 1 break LOG.warning("Connection pool limit exceeded: " "current size %s surpasses max " "configured rpc_conn_pool_size %s", self._current_size, self._max_size) self._cond.wait() # We've grabbed a slot and dropped the lock, now do the creation try: return self.create(retry=retry) except Exception: with self._cond: self._current_size -= 1 raise def iter_free(self): """Iterate over free items.""" while True: try: _, item = self._items.pop() yield item except IndexError: return @abc.abstractmethod def create(self, retry=None): """Construct a new item.""" class ConnectionPool(Pool): """Class that implements a Pool of Connections.""" def __init__(self, conf, max_size, min_size, ttl, url, connection_cls): self.connection_cls = connection_cls self.conf = conf self.url = url super(ConnectionPool, self).__init__(max_size, min_size, ttl, self._on_expire) def _on_expire(self, connection): connection.close() LOG.debug("Idle connection has expired and been closed." " Pool size: %d" % len(self._items)) def create(self, purpose=common.PURPOSE_SEND, retry=None): LOG.debug('Pool creating new connection') return self.connection_cls(self.conf, self.url, purpose, retry=retry) def empty(self): for item in self.iter_free(): item.close() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1266725 oslo.messaging-14.9.0/oslo_messaging/_metrics/0000775000175000017500000000000000000000000021376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_metrics/__init__.py0000664000175000017500000000126200000000000023510 0ustar00zuulzuul00000000000000# Copyright 2020 LINE Corp. # # Licensed under the Apache License, Version 2.0 (the 'License'); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'MetricsCollectorClient', 'get_collector', ] from .client import * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_metrics/client.py0000664000175000017500000002343500000000000023235 0ustar00zuulzuul00000000000000 # Copyright 2020 LINE Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import queue import socket import threading import time from oslo_config import cfg from oslo_log import log as logging from oslo_metrics import message_type from oslo_utils import eventletutils from oslo_utils import importutils LOG = logging.getLogger(__name__) eventlet = importutils.try_import('eventlet') if eventlet and eventletutils.is_monkey_patched("thread"): # Here we initialize module with the native python threading module # if it was already monkey patched by eventlet/greenlet. stdlib_threading = eventlet.patcher.original('threading') else: # Manage the case where we run this driver in a non patched environment # and where user even so configure the driver to run heartbeat through # a python thread, if we don't do that when the heartbeat will start # we will facing an issue by trying to override the threading module. stdlib_threading = threading oslo_messaging_metrics = [ cfg.BoolOpt('metrics_enabled', default=False, help='Boolean to send rpc metrics to oslo.metrics.'), cfg.IntOpt('metrics_buffer_size', default=1000, help='Buffer size to store in oslo.messaging.'), cfg.StrOpt('metrics_socket_file', default='/var/tmp/metrics_collector.sock', # nosec help='Unix domain socket file to be used' ' to send rpc related metrics'), cfg.StrOpt('metrics_process_name', default='', help='Process name which is used to identify which process' ' produce metrics'), cfg.IntOpt('metrics_thread_stop_timeout', default=10, help='Sending thread stop once metrics_thread_stop_timeout' ' seconds after the last successful metrics send.' ' So that this thread will not be the blocker' ' when process is shutting down.' ' If the process is still running, sending thread will' ' be restarted at the next metrics queueing time') ] cfg.CONF.register_opts(oslo_messaging_metrics, group='oslo_messaging_metrics') class MetricsCollectorClient(object): def __init__(self, conf, metrics_type, **kwargs): self.conf = conf.oslo_messaging_metrics self.unix_socket = self.conf.metrics_socket_file buffer_size = self.conf.metrics_buffer_size self.tx_queue = queue.Queue(buffer_size) self.next_send_metric = None self.metrics_type = metrics_type self.args = kwargs self.send_thread = threading.Thread(target=self.send_loop) self.send_thread.start() def __enter__(self): if not self.conf.metrics_enabled: return None self.start_time = time.time() send_method = getattr(self, self.metrics_type + "_invocation_start_total") send_method(**self.args) return self def __exit__(self, exc_type, exc_value, traceback): if self.conf.metrics_enabled: duration = time.time() - self.start_time send_method = getattr( self, self.metrics_type + "_processing_seconds") send_method(duration=duration, **self.args) send_method = getattr( self, self.metrics_type + "_invocation_end_total") send_method(**self.args) def put_into_txqueue(self, metrics_name, action, **labels): labels['process'] = \ self.conf.metrics_process_name m = message_type.Metric("oslo_messaging", metrics_name, action, **labels) try: self.tx_queue.put_nowait(m) except queue.Full: LOG.warning("tx queues is already full(%s/%s). Fails to " "send the metrics(%s)" % (self.tx_queue.qsize(), self.tx_queue.maxsize, m)) if not self.send_thread.is_alive(): self.send_thread = threading.Thread(target=self.send_loop) self.send_thread.start() def send_loop(self): timeout = self.conf.metrics_thread_stop_timeout stoptime = time.time() + timeout while stoptime > time.time(): if self.next_send_metric is None: try: self.next_send_metric = self.tx_queue.get(timeout=timeout) except queue.Empty: continue try: self.send_metric(self.next_send_metric) self.next_send_metric = None stoptime = time.time() + timeout except Exception as e: LOG.error("Failed to send metrics: %s. " "Wait 1 seconds for next try." % e) time.sleep(1) def send_metric(self, metric): s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) s.connect(self.unix_socket) s.send(metric.to_json().encode()) s.close() def put_rpc_client_metrics_to_txqueue(self, metric_name, action, target, method, call_type, timeout, exception=None): kwargs = { 'call_type': call_type, 'exchange': target.exchange, 'topic': target.topic, 'namespace': target.namespace, 'version': target.version, 'server': target.server, 'fanout': target.fanout, 'method': method, 'timeout': timeout, } if exception: kwargs['exception'] = exception self.put_into_txqueue(metric_name, action, **kwargs) def rpc_client_invocation_start_total(self, target, method, call_type, timeout=None): self.put_rpc_client_metrics_to_txqueue( "rpc_client_invocation_start_total", message_type.MetricAction("inc", None), target, method, call_type, timeout ) def rpc_client_invocation_end_total(self, target, method, call_type, timeout=None): self.put_rpc_client_metrics_to_txqueue( "rpc_client_invocation_end_total", message_type.MetricAction("inc", None), target, method, call_type, timeout ) def rpc_client_processing_seconds(self, target, method, call_type, duration, timeout=None): self.put_rpc_client_metrics_to_txqueue( "rpc_client_processing_seconds", message_type.MetricAction("observe", duration), target, method, call_type, timeout ) def rpc_client_exception_total(self, target, method, call_type, exception, timeout=None): self.put_rpc_client_metrics_to_txqueue( "rpc_client_exception_total", message_type.MetricAction("inc", None), target, method, call_type, timeout, exception ) def put_rpc_server_metrics_to_txqueue(self, metric_name, action, target, endpoint, ns, ver, method, exception=None): kwargs = { 'endpoint': endpoint, 'namespace': ns, 'version': ver, 'method': method, 'exchange': None, 'topic': None, 'server': None } if target: kwargs['exchange'] = target.exchange kwargs['topic'] = target.topic kwargs['server'] = target.server if exception: kwargs['exception'] = exception self.put_into_txqueue(metric_name, action, **kwargs) def rpc_server_invocation_start_total(self, target, endpoint, ns, ver, method): self.put_rpc_server_metrics_to_txqueue( "rpc_server_invocation_start_total", message_type.MetricAction("inc", None), target, endpoint, ns, ver, method ) def rpc_server_invocation_end_total(self, target, endpoint, ns, ver, method): self.put_rpc_server_metrics_to_txqueue( "rpc_server_invocation_end_total", message_type.MetricAction("inc", None), target, endpoint, ns, ver, method ) def rpc_server_processing_seconds(self, target, endpoint, ns, ver, method, duration): self.put_rpc_server_metrics_to_txqueue( "rpc_server_processing_seconds", message_type.MetricAction("observe", duration), target, endpoint, ns, ver, method ) def rpc_server_exception_total(self, target, endpoint, ns, ver, method, exception): self.put_rpc_server_metrics_to_txqueue( "rpc_server_exception_total", message_type.MetricAction("inc", None), target, endpoint, ns, ver, method, exception=exception ) METRICS_COLLECTOR = None def get_collector(conf, metrics_type, **kwargs): global threading threading = stdlib_threading global METRICS_COLLECTOR if METRICS_COLLECTOR is None: METRICS_COLLECTOR = MetricsCollectorClient( conf, metrics_type, **kwargs) return METRICS_COLLECTOR ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/_utils.py0000664000175000017500000000544100000000000021446 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import queue import threading from oslo_utils import eventletutils from oslo_utils import importutils LOG = logging.getLogger(__name__) eventlet = importutils.try_import('eventlet') if eventlet and eventletutils.is_monkey_patched("thread"): # Here we initialize module with the native python threading module # if it was already monkey patched by eventlet/greenlet. stdlib_threading = eventlet.patcher.original('threading') stdlib_queue = eventlet.patcher.original('queue') else: # Manage the case where we run this driver in a non patched environment # and where user even so configure the driver to run heartbeat through # a python thread, if we don't do that when the heartbeat will start # we will facing an issue by trying to override the threading module. stdlib_threading = threading stdlib_queue = queue def version_is_compatible(imp_version, version): """Determine whether versions are compatible. :param imp_version: The version implemented :param version: The version requested by an incoming message. """ if imp_version is None: return True if version is None: return False version_parts = version.split('.') imp_version_parts = imp_version.split('.') try: rev = version_parts[2] except IndexError: rev = 0 try: imp_rev = imp_version_parts[2] except IndexError: imp_rev = 0 if int(version_parts[0]) != int(imp_version_parts[0]): # Major return False if int(version_parts[1]) > int(imp_version_parts[1]): # Minor return False if (int(version_parts[1]) == int(imp_version_parts[1]) and int(rev) > int(imp_rev)): # Revision return False return True class DummyLock(object): def acquire(self): pass def release(self): pass def __enter__(self): self.acquire() def __exit__(self, type, value, traceback): self.release() def get_executor_with_context(): if eventletutils.is_monkey_patched('thread'): LOG.debug("Threading is patched, using an eventlet executor") return 'eventlet' LOG.debug("Using a threading executor") return 'threading' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/conffixture.py0000664000175000017500000001304400000000000022501 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import fixtures from functools import wraps __all__ = ['ConfFixture'] def _import_opts(conf, module, opts, group=None): __import__(module) conf.register_opts(getattr(sys.modules[module], opts), group=group) class ConfFixture(fixtures.Fixture): """Tweak configuration options for unit testing. oslo.messaging registers a number of configuration options, but rather than directly referencing those options, users of the API should use this interface for querying and overriding certain configuration options. An example usage:: self.messaging_conf = self.useFixture(messaging.ConfFixture(cfg.CONF)) self.messaging_conf.transport_url = 'fake:/' :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts :param transport_url: override default transport_url value :type transport_url: str """ def __init__(self, conf, transport_url=None): self.conf = conf _import_opts(self.conf, 'oslo_messaging._drivers.impl_rabbit', 'rabbit_opts', 'oslo_messaging_rabbit') _import_opts(self.conf, 'oslo_messaging._drivers.base', 'base_opts', 'oslo_messaging_rabbit') _import_opts(self.conf, 'oslo_messaging._drivers.amqp', 'amqp_opts', 'oslo_messaging_rabbit') _import_opts(self.conf, 'oslo_messaging._drivers.amqp1_driver.opts', 'amqp1_opts', 'oslo_messaging_amqp') _import_opts(self.conf, 'oslo_messaging.rpc.client', '_client_opts') _import_opts(self.conf, 'oslo_messaging.transport', '_transport_opts') _import_opts(self.conf, 'oslo_messaging.rpc.dispatcher', '_dispatcher_opts') _import_opts(self.conf, 'oslo_messaging.notify.notifier', '_notifier_opts', 'oslo_messaging_notifications') _import_opts(self.conf, 'oslo_messaging._metrics.client', 'oslo_messaging_metrics', 'oslo_messaging_metrics') if transport_url is not None: self.transport_url = transport_url def _setup_decorator(self): # Support older test cases that still use the set_override # with the old config key names def decorator_for_set_override(wrapped_function): @wraps(wrapped_function) def _wrapper(*args, **kwargs): group = 'oslo_messaging_notifications' if args[0] == 'notification_driver': args = ('driver', args[1], group) elif args[0] == 'notification_transport_url': args = ('transport_url', args[1], group) elif args[0] == 'notification_topics': args = ('topics', args[1], group) return wrapped_function(*args, **kwargs) _wrapper.wrapped = wrapped_function return _wrapper def decorator_for_clear_override(wrapped_function): @wraps(wrapped_function) def _wrapper(*args, **kwargs): group = 'oslo_messaging_notifications' if args[0] == 'notification_driver': args = ('driver', group) elif args[0] == 'notification_transport_url': args = ('transport_url', group) elif args[0] == 'notification_topics': args = ('topics', group) return wrapped_function(*args, **kwargs) _wrapper.wrapped = wrapped_function return _wrapper if not hasattr(self.conf.set_override, 'wrapped'): self.conf.set_override = decorator_for_set_override( self.conf.set_override) if not hasattr(self.conf.clear_override, 'wrapped'): self.conf.clear_override = decorator_for_clear_override( self.conf.clear_override) def _teardown_decorator(self): if hasattr(self.conf.set_override, 'wrapped'): self.conf.set_override = self.conf.set_override.wrapped if hasattr(self.conf.clear_override, 'wrapped'): self.conf.clear_override = self.conf.clear_override.wrapped def setUp(self): super(ConfFixture, self).setUp() self._setup_decorator() self.addCleanup(self._teardown_decorator) self.addCleanup(self.conf.reset) @property def transport_url(self): """The transport url""" return self.conf.transport_url @transport_url.setter def transport_url(self, value): self.conf.set_override('transport_url', value) @property def response_timeout(self): """Default number of seconds to wait for a response from a call.""" return self.conf.rpc_response_timeout @response_timeout.setter def response_timeout(self, value): self.conf.set_override('rpc_response_timeout', value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/dispatcher.py0000664000175000017500000000173000000000000022272 0ustar00zuulzuul00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc __all__ = [ "DispatcherBase" ] class DispatcherBase(object, metaclass=abc.ABCMeta): "Base class for dispatcher" @abc.abstractmethod def dispatch(self, incoming): """Dispatch incoming messages to the endpoints and return result :param incoming: incoming object for dispatching to the endpoint :type incoming: object, depends on endpoint type """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/exceptions.py0000664000175000017500000000334200000000000022326 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['MessagingException', 'MessagingTimeout', 'MessageDeliveryFailure', 'InvalidTarget', 'MessageUndeliverable'] class MessagingException(Exception): """Base class for exceptions.""" class MessagingTimeout(MessagingException): """Raised if message sending times out.""" class MessageDeliveryFailure(MessagingException): """Raised if message sending failed after the asked retry.""" class InvalidTarget(MessagingException, ValueError): """Raised if a target does not meet certain pre-conditions.""" def __init__(self, msg, target): msg = msg + ":" + str(target) super(InvalidTarget, self).__init__(msg) self.target = target class MessageUndeliverable(Exception): """Raised if message is not routed with mandatory flag""" def __init__(self, exception, exchange, routing_key, message): super(MessageUndeliverable, self).__init__() self.exception = exception self.exchange = exchange self.routing_key = routing_key self.message = message class ConfigurationError(Exception): """Raised when messaging isn't configured correctly.""" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1266725 oslo.messaging-14.9.0/oslo_messaging/hacking/0000775000175000017500000000000000000000000021175 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/hacking/__init__.py0000664000175000017500000000000000000000000023274 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/hacking/checks.py0000664000175000017500000002606300000000000023016 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import ast from hacking import core oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+") oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]") oslo_namespace_imports_from_root = re.compile(r"from[\s]+oslo[\s]+import[\s]+") mock_imports_directly = re.compile(r"import[\s]+mock") mock_imports_direclty_from = re.compile(r"from[\s]+mock[\s]+import[\s]+") @core.flake8ext def check_oslo_namespace_imports(logical_line): if re.match(oslo_namespace_imports_from_dot, logical_line): msg = ("O321: '%s' must be used instead of '%s'.") % ( logical_line.replace('oslo.', 'oslo_'), logical_line) yield (0, msg) elif re.match(oslo_namespace_imports_from_root, logical_line): msg = ("O321: '%s' must be used instead of '%s'.") % ( logical_line.replace('from oslo import ', 'import oslo_'), logical_line) yield (0, msg) elif re.match(oslo_namespace_imports_dot, logical_line): msg = ("O321: '%s' must be used instead of '%s'.") % ( logical_line.replace('import', 'from').replace('.', ' import '), logical_line) yield (0, msg) class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) class CheckForLoggingIssues(BaseASTChecker): DEBUG_CHECK_DESC = 'O324 Using translated string in debug logging' NONDEBUG_CHECK_DESC = 'O325 Not using translating helper for logging' EXCESS_HELPER_CHECK_DESC = 'O326 Using hints when _ is necessary' LOG_MODULES = ('logging') name = 'check_for_logging_issues' version = '1.0' def __init__(self, tree, filename): super(CheckForLoggingIssues, self).__init__(tree, filename) self.logger_names = [] self.logger_module_names = [] # NOTE(dstanek): this kinda accounts for scopes when talking # about only leaf node in the graph self.assignments = {} def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for field, value in ast.iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, ast.AST): item._parent = node self.visit(item) elif isinstance(value, ast.AST): value._parent = node self.visit(value) def _filter_imports(self, module_name, alias): """Keeps lists of logging.""" if module_name in self.LOG_MODULES: self.logger_module_names.append(alias.asname or alias.name) def visit_Import(self, node): for alias in node.names: self._filter_imports(alias.name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_ImportFrom(self, node): for alias in node.names: full_name = '%s.%s' % (node.module, alias.name) self._filter_imports(full_name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, str): return node else: # could be Subscript, Call or many more return None def visit_Assign(self, node): """Look for 'LOG = logging.getLogger' This handles the simple case: name = [logging_module].getLogger(...) """ attr_node_types = (ast.Name, ast.Attribute) if (len(node.targets) != 1 or not isinstance(node.targets[0], attr_node_types)): # say no to: "x, y = ..." return super(CheckForLoggingIssues, self).generic_visit(node) target_name = self._find_name(node.targets[0]) if (isinstance(node.value, ast.BinOp) and isinstance(node.value.op, ast.Mod)): if (isinstance(node.value.left, ast.Call) and isinstance(node.value.left.func, ast.Name)): # NOTE(dstanek): this is done to match cases like: # `msg = _('something %s') % x` node = ast.Assign(value=node.value.left) if not isinstance(node.value, ast.Call): # node.value must be a call to getLogger self.assignments.pop(target_name, None) return super(CheckForLoggingIssues, self).generic_visit(node) if isinstance(node.value.func, ast.Name): self.assignments[target_name] = node.value.func.id return super(CheckForLoggingIssues, self).generic_visit(node) if (not isinstance(node.value.func, ast.Attribute) or not isinstance(node.value.func.value, attr_node_types)): # function must be an attribute on an object like # logging.getLogger return super(CheckForLoggingIssues, self).generic_visit(node) object_name = self._find_name(node.value.func.value) func_name = node.value.func.attr if (object_name in self.logger_module_names and func_name == 'getLogger'): self.logger_names.append(target_name) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # obj.method if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # could be Subscript, Call or many more return super(CheckForLoggingIssues, self).generic_visit(node) # if dealing with a logger the method can't be "warn" if obj_name in self.logger_names and method_name == 'warn': msg = node.args[0] # first arg to a logging method is the msg self.add_error(msg, message=self.USING_DEPRECATED_WARN) # must be a logger instance and one of the support logging methods if obj_name not in self.logger_names: return super(CheckForLoggingIssues, self).generic_visit(node) # the call must have arguments if not node.args: return super(CheckForLoggingIssues, self).generic_visit(node) if method_name == 'debug': self._process_debug(node) return super(CheckForLoggingIssues, self).generic_visit(node) def _process_debug(self, node): msg = node.args[0] # first arg to a logging method is the msg if (isinstance(msg, ast.Call) and isinstance(msg.func, ast.Name)): self.add_error(msg, message=self.DEBUG_CHECK_DESC) elif (isinstance(msg, ast.Name) and msg.id in self.assignments and not self._is_raised_later(node, msg.id)): self.add_error(msg, message=self.DEBUG_CHECK_DESC) def _process_non_debug(self, node, method_name): msg = node.args[0] # first arg to a logging method is the msg if isinstance(msg, ast.Call): self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) elif isinstance(msg, ast.Name): # FIXME(dstanek): to make sure more robust we should be checking # all names passed into a logging method. we can't right now # because: # 1. We have code like this that we'll fix when dealing with the %: # msg = _('....') % {} # LOG.warning(msg) # 2. We also do LOG.exception(e) in several places. I'm not sure # exactly what we should be doing about that. if msg.id not in self.assignments: return if self._is_raised_later(node, msg.id): self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) elif self._is_raised_later(node, msg.id): self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC) def _is_raised_later(self, node, name): def find_peers(node): node_for_line = node._parent for _field, value in ast.iter_fields(node._parent._parent): if isinstance(value, list) and node_for_line in value: return value[value.index(node_for_line) + 1:] continue return [] peers = find_peers(node) for peer in peers: if isinstance(peer, ast.Raise): exc = peer.exc if (isinstance(exc, ast.Call) and len(exc.args) > 0 and isinstance(exc.args[0], ast.Name) and name in (a.id for a in exc.args)): return True else: return False elif isinstance(peer, ast.Assign): if name in (t.id for t in peer.targets if hasattr(t, 'id')): return False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1306727 oslo.messaging-14.9.0/oslo_messaging/notify/0000775000175000017500000000000000000000000021101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/__init__.py0000664000175000017500000000214500000000000023214 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['Notifier', 'LoggingNotificationHandler', 'get_notification_transport', 'get_notification_listener', 'get_batch_notification_listener', 'NotificationResult', 'NotificationFilter', 'PublishErrorsHandler', 'LoggingErrorNotificationHandler'] from .filter import NotificationFilter from .notifier import * from .listener import * from .log_handler import * from .logger import * from .dispatcher import NotificationResult ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/_impl_log.py0000664000175000017500000000336500000000000023423 0ustar00zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import warnings from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_messaging.notify import notifier class LogDriver(notifier.Driver): "Publish notifications via Python logging infrastructure." # NOTE(dhellmann): For backwards-compatibility with configurations # that may have modified the settings for this logger using a # configuration file, we keep the name # 'oslo.messaging.notification' even though the package is now # 'oslo_messaging'. LOGGER_BASE = 'oslo.messaging.notification' def notify(self, ctxt, message, priority, retry): logger = logging.getLogger('%s.%s' % (self.LOGGER_BASE, message['event_type'])) method = getattr(logger, priority.lower(), None) if method: method(jsonutils.dumps(strutils.mask_dict_password(message))) else: warnings.warn('Unable to log message as notify cannot find a ' 'logger with the priority specified ' '%s' % priority.lower()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/_impl_noop.py0000664000175000017500000000146100000000000023610 0ustar00zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging.notify import notifier class NoOpDriver(notifier.Driver): def notify(self, ctxt, message, priority, retry): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/_impl_routing.py0000664000175000017500000001212200000000000024320 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import logging from oslo_config import cfg from stevedore import dispatch import yaml from oslo_messaging.notify import notifier LOG = logging.getLogger(__name__) router_config = cfg.StrOpt('routing_config', default='', deprecated_group='DEFAULT', deprecated_name='routing_notifier_config', help='RoutingNotifier configuration file location.') CONF = cfg.CONF CONF.register_opt(router_config, group='oslo_messaging_notifications') class RoutingDriver(notifier.Driver): NOTIFIER_PLUGIN_NAMESPACE = 'oslo.messaging.notify.drivers' plugin_manager = None routing_groups = None # The routing groups from the config file. used_drivers = None # Used driver names, extracted from config file. def _should_load_plugin(self, ext, *args, **kwargs): # Hack to keep stevedore from circular importing since these # endpoints are used for different purposes. if ext.name == 'routing': return False return ext.name in self.used_drivers def _get_notifier_config_file(self, filename): """Broken out for testing.""" return open(filename, 'r') def _load_notifiers(self): """One-time load of notifier config file.""" self.routing_groups = {} self.used_drivers = set() filename = CONF.oslo_messaging_notifications.routing_config if not filename: return # Infer which drivers are used from the config file. self.routing_groups = yaml.safe_load( self._get_notifier_config_file(filename)) if not self.routing_groups: self.routing_groups = {} # In case we got None from load() return for group in self.routing_groups.values(): self.used_drivers.update(group.keys()) LOG.debug('loading notifiers from %s', self.NOTIFIER_PLUGIN_NAMESPACE) self.plugin_manager = dispatch.DispatchExtensionManager( namespace=self.NOTIFIER_PLUGIN_NAMESPACE, check_func=self._should_load_plugin, invoke_on_load=True, invoke_args=None) if not list(self.plugin_manager): LOG.warning("Failed to load any notifiers for %s", self.NOTIFIER_PLUGIN_NAMESPACE) def _get_drivers_for_message(self, group, event_type, priority): """Which drivers should be called for this event_type or priority. """ accepted_drivers = set() for driver, rules in group.items(): checks = [] for key, patterns in rules.items(): if key == 'accepted_events': c = [fnmatch.fnmatch(event_type, p) for p in patterns] checks.append(any(c)) if key == 'accepted_priorities': c = [fnmatch.fnmatch(priority, p.lower()) for p in patterns] checks.append(any(c)) if all(checks): accepted_drivers.add(driver) return list(accepted_drivers) def _filter_func(self, ext, context, message, priority, retry, accepted_drivers): """True/False if the driver should be called for this message. """ # context is unused here, but passed in by map() return ext.name in accepted_drivers def _call_notify(self, ext, context, message, priority, retry, accepted_drivers): """Emit the notification. """ # accepted_drivers is passed in as a result of the map() function LOG.info("Routing '%(event)s' notification to '%(driver)s' " "driver", {'event': message.get('event_type'), 'driver': ext.name}) ext.obj.notify(context, message, priority, retry) def notify(self, context, message, priority, retry): if not self.plugin_manager: self._load_notifiers() # Fail if these aren't present ... event_type = message['event_type'] accepted_drivers = set() for group in self.routing_groups.values(): accepted_drivers.update( self._get_drivers_for_message(group, event_type, priority.lower())) self.plugin_manager.map(self._filter_func, self._call_notify, context, message, priority, retry, list(accepted_drivers)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/_impl_test.py0000664000175000017500000000204600000000000023614 0ustar00zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging.notify import notifier NOTIFICATIONS = [] def reset(): "Clear out the list of recorded notifications." global NOTIFICATIONS NOTIFICATIONS = [] class TestDriver(notifier.Driver): "Store notifications in memory for test verification." def notify(self, ctxt, message, priority, retry): NOTIFICATIONS.append((ctxt, message, priority, retry)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/dispatcher.py0000664000175000017500000001404300000000000023603 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import logging import operator from oslo_messaging import dispatcher from oslo_messaging import serializer as msg_serializer LOG = logging.getLogger(__name__) PRIORITIES = ['audit', 'debug', 'info', 'warn', 'error', 'critical', 'sample'] class NotificationResult(object): HANDLED = 'handled' REQUEUE = 'requeue' class NotificationDispatcher(dispatcher.DispatcherBase): def __init__(self, endpoints, serializer): self.endpoints = endpoints self.serializer = serializer or msg_serializer.NoOpSerializer() self._callbacks_by_priority = {} for endpoint, prio in itertools.product(endpoints, PRIORITIES): if hasattr(endpoint, prio): method = getattr(endpoint, prio) screen = getattr(endpoint, 'filter_rule', None) self._callbacks_by_priority.setdefault(prio, []).append( (screen, method)) @property def supported_priorities(self): return self._callbacks_by_priority.keys() def dispatch(self, incoming): """Dispatch notification messages to the appropriate endpoint method. """ priority, raw_message, message = self._extract_user_message(incoming) if priority not in PRIORITIES: LOG.warning('Unknown priority "%s"', priority) return for screen, callback in self._callbacks_by_priority.get(priority, []): if screen and not screen.match(message["ctxt"], message["publisher_id"], message["event_type"], message["metadata"], message["payload"]): continue ret = self._exec_callback(callback, message) if ret == NotificationResult.REQUEUE: return ret return NotificationResult.HANDLED def _exec_callback(self, callback, message): try: return callback(message["ctxt"], message["publisher_id"], message["event_type"], message["payload"], message["metadata"]) except Exception: LOG.exception("Callback raised an exception.") return NotificationResult.REQUEUE def _extract_user_message(self, incoming): ctxt = self.serializer.deserialize_context(incoming.ctxt) message = incoming.message publisher_id = message.get('publisher_id') event_type = message.get('event_type') metadata = { 'message_id': message.get('message_id'), 'timestamp': message.get('timestamp') } priority = message.get('priority', '').lower() payload = self.serializer.deserialize_entity(ctxt, message.get('payload')) return priority, incoming, dict(ctxt=ctxt, publisher_id=publisher_id, event_type=event_type, payload=payload, metadata=metadata) class BatchNotificationDispatcher(NotificationDispatcher): """A message dispatcher which understands Notification messages. A MessageHandlingServer is constructed by passing a callable dispatcher which is invoked with a list of message dictionaries each time 'batch_size' messages are received or 'batch_timeout' seconds is reached. """ def dispatch(self, incoming): """Dispatch notification messages to the appropriate endpoint method. """ messages_grouped = itertools.groupby(sorted( (self._extract_user_message(m) for m in incoming), key=operator.itemgetter(0)), operator.itemgetter(0)) requeues = set() for priority, messages in messages_grouped: __, raw_messages, messages = zip(*messages) if priority not in PRIORITIES: LOG.warning('Unknown priority "%s"', priority) continue for screen, callback in self._callbacks_by_priority.get(priority, []): if screen: filtered_messages = [message for message in messages if screen.match( message["ctxt"], message["publisher_id"], message["event_type"], message["metadata"], message["payload"])] else: filtered_messages = list(messages) if not filtered_messages: continue ret = self._exec_callback(callback, filtered_messages) if ret == NotificationResult.REQUEUE: requeues.update(raw_messages) break return requeues def _exec_callback(self, callback, messages): try: return callback(messages) except Exception: LOG.exception("Callback raised an exception.") return NotificationResult.REQUEUE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/filter.py0000664000175000017500000000621600000000000022745 0ustar00zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re class NotificationFilter(object): r"""Filter notification messages The NotificationFilter class is used to filter notifications that an endpoint will received. The notification can be filter on different fields: context, publisher_id, event_type, metadata and payload. The filter is done via a regular expression filter_rule = NotificationFilter( publisher_id='^compute.*', context={'tenant_id': '^5f643cfc-664b-4c69-8000-ce2ed7b08216$', 'roles': 'private'}, event_type='^compute\.instance\..*', metadata={'timestamp': 'Aug'}, payload={'state': '^active$') """ def __init__(self, context=None, publisher_id=None, event_type=None, metadata=None, payload=None): self._regex_publisher_id = None self._regex_event_type = None if publisher_id is not None: self._regex_publisher_id = re.compile(publisher_id) if event_type is not None: self._regex_event_type = re.compile(event_type) self._regexs_context = self._build_regex_dict(context) self._regexs_metadata = self._build_regex_dict(metadata) self._regexs_payload = self._build_regex_dict(payload) @staticmethod def _build_regex_dict(regex_list): if regex_list is None: return {} return dict((k, re.compile(regex_list[k])) for k in regex_list) @staticmethod def _check_for_single_mismatch(data, regex): if regex is None: return False if not isinstance(data, str): return True if not regex.match(data): return True return False @classmethod def _check_for_mismatch(cls, data, regex): if isinstance(regex, dict): for k in regex: if k not in data: return True if cls._check_for_single_mismatch(data[k], regex[k]): return True return False else: return cls._check_for_single_mismatch(data, regex) def match(self, context, publisher_id, event_type, metadata, payload): if (self._check_for_mismatch(publisher_id, self._regex_publisher_id) or self._check_for_mismatch(event_type, self._regex_event_type) or self._check_for_mismatch(context, self._regexs_context) or self._check_for_mismatch(metadata, self._regexs_metadata) or self._check_for_mismatch(payload, self._regexs_payload)): return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/listener.py0000664000175000017500000002725300000000000023311 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. r"""A notification listener is used to process notification messages sent by a notifier that uses the ``messaging`` driver. A notification listener subscribes to the topic - and optionally exchange - in the supplied target. Notification messages sent by notifier clients to the target's topic/exchange are received by the listener. A notification listener exposes a number of endpoints, each of which contain a set of methods. Each method's name corresponds to a notification's priority. When a notification is received it is dispatched to the method named like the notification's priority - e.g. ``info`` notifications are dispatched to the info() method, etc. Optionally a notification endpoint can define a NotificationFilter. Notification messages that do not match the filter's rules will *not* be passed to the endpoint's methods. Parameters to endpoint methods are: the request context supplied by the client, the publisher_id of the notification message, the event_type, the payload and metadata. The metadata parameter is a mapping containing a unique message_id and a timestamp. An endpoint method can explicitly return oslo_messaging.NotificationResult.HANDLED to acknowledge a message or oslo_messaging.NotificationResult.REQUEUE to requeue the message. Note that not all transport drivers implement support for requeueing. In order to use this feature, applications should assert that the feature is available by passing allow_requeue=True to get_notification_listener(). If the driver does not support requeueing, it will raise NotImplementedError at this point. The message is acknowledged only if all endpoints either return oslo_messaging.NotificationResult.HANDLED or None. *NOTE*: If multiple listeners subscribe to the same target, the notification will be received by only *one* of the listeners. The receiving listener is selected from the group using a best-effort round-robin algorithm. This delivery pattern can be altered somewhat by specifying a pool name for the listener. Listeners with the same pool name behave like a subgroup within the group of listeners subscribed to the same topic/exchange. Each subgroup of listeners will receive a copy of the notification to be consumed by one member of the subgroup. Therefore, multiple copies of the notification will be delivered - one to the group of listeners that have no pool name (if they exist), and one to each subgroup of listeners that share the same pool name. **NOTE WELL:** This means that the Notifier always publishes notifications to a non-pooled Listener as well as the pooled Listeners. Therefore any application that uses listener pools **must have at least one listener that consumes from the non-pooled queue** (i.e. one or more listeners that do not set the *pool* parameter. Note that not all transport drivers have implemented support for listener pools. Those drivers that do not support pools will raise a NotImplementedError if a pool name is specified to get_notification_listener(). Each notification listener is associated with an executor which controls how incoming notification messages will be received and dispatched. Refer to the Executor documentation for descriptions of the other types of executors. *Note:* If the "eventlet" executor is used, the threading and time library need to be monkeypatched. Notification listener have start(), stop() and wait() messages to begin handling requests, stop handling requests, and wait for all in-process requests to complete after the listener has been stopped. To create a notification listener, you supply a transport, list of targets and a list of endpoints. A transport can be obtained simply by calling the get_notification_transport() method:: transport = messaging.get_notification_transport(conf) which will load the appropriate transport driver according to the user's messaging configuration. See get_notification_transport() for more details. A simple example of a notification listener with multiple endpoints might be:: from oslo_config import cfg import oslo_messaging class NotificationEndpoint(object): filter_rule = oslo_messaging.NotificationFilter( publisher_id='^compute.*') def warn(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) class ErrorEndpoint(object): filter_rule = oslo_messaging.NotificationFilter( event_type='^instance\..*\.start$', context={'ctxt_key': 'regexp'}) def error(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) transport = oslo_messaging.get_notification_transport(cfg.CONF) targets = [ oslo_messaging.Target(topic='notifications'), oslo_messaging.Target(topic='notifications_bis') ] endpoints = [ NotificationEndpoint(), ErrorEndpoint(), ] pool = "listener-workers" server = oslo_messaging.get_notification_listener(transport, targets, endpoints, pool=pool) server.start() server.wait() By supplying a serializer object, a listener can deserialize a request context and arguments from primitive types. """ import itertools import logging from oslo_messaging.notify import dispatcher as notify_dispatcher from oslo_messaging import server as msg_server from oslo_messaging import transport as msg_transport LOG = logging.getLogger(__name__) class NotificationServerBase(msg_server.MessageHandlingServer): def __init__(self, transport, targets, dispatcher, executor=None, allow_requeue=True, pool=None, batch_size=1, batch_timeout=None): super(NotificationServerBase, self).__init__(transport, dispatcher, executor) self._allow_requeue = allow_requeue self._pool = pool self.targets = targets self._targets_priorities = set( itertools.product(self.targets, self.dispatcher.supported_priorities) ) self._batch_size = batch_size self._batch_timeout = batch_timeout def _create_listener(self): return self.transport._listen_for_notifications( self._targets_priorities, self._pool, self._batch_size, self._batch_timeout ) class NotificationServer(NotificationServerBase): def __init__(self, transport, targets, dispatcher, executor=None, allow_requeue=True, pool=None): if not isinstance(transport, msg_transport.NotificationTransport): LOG.warning("Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") super(NotificationServer, self).__init__( transport, targets, dispatcher, executor, allow_requeue, pool, 1, None ) def _process_incoming(self, incoming): message = incoming[0] try: res = self.dispatcher.dispatch(message) except Exception: LOG.exception('Exception during message handling.') res = notify_dispatcher.NotificationResult.REQUEUE try: if (res == notify_dispatcher.NotificationResult.REQUEUE and self._allow_requeue): message.requeue() else: message.acknowledge() except Exception: LOG.exception("Fail to ack/requeue message.") class BatchNotificationServer(NotificationServerBase): def _process_incoming(self, incoming): try: not_processed_messages = self.dispatcher.dispatch(incoming) except Exception: not_processed_messages = set(incoming) LOG.exception('Exception during messages handling.') for m in incoming: try: if m in not_processed_messages and self._allow_requeue: m.requeue() else: m.acknowledge() except Exception: LOG.exception("Fail to ack/requeue message.") def get_notification_listener(transport, targets, endpoints, executor=None, serializer=None, allow_requeue=False, pool=None): """Construct a notification listener The executor parameter controls how incoming messages will be received and dispatched. If the eventlet executor is used, the threading and time library need to be monkeypatched. :param transport: the messaging transport :type transport: Transport :param targets: the exchanges and topics to listen on :type targets: list of Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer :param allow_requeue: whether NotificationResult.REQUEUE support is needed :type allow_requeue: bool :param pool: the pool name :type pool: str :raises: NotImplementedError """ dispatcher = notify_dispatcher.NotificationDispatcher(endpoints, serializer) return NotificationServer(transport, targets, dispatcher, executor, allow_requeue, pool) def get_batch_notification_listener(transport, targets, endpoints, executor=None, serializer=None, allow_requeue=False, pool=None, batch_size=None, batch_timeout=None): """Construct a batch notification listener The executor parameter controls how incoming messages will be received and dispatched. If the eventlet executor is used, the threading and time library need to be monkeypatched. :param transport: the messaging transport :type transport: Transport :param targets: the exchanges and topics to listen on :type targets: list of Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer :param allow_requeue: whether NotificationResult.REQUEUE support is needed :type allow_requeue: bool :param pool: the pool name :type pool: str :param batch_size: number of messages to wait before calling endpoints callacks :type batch_size: int :param batch_timeout: number of seconds to wait before calling endpoints callacks :type batch_timeout: int :raises: NotImplementedError """ dispatcher = notify_dispatcher.BatchNotificationDispatcher( endpoints, serializer) return BatchNotificationServer( transport, targets, dispatcher, executor, allow_requeue, pool, batch_size, batch_timeout ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/log_handler.py0000664000175000017500000000342300000000000023733 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg class LoggingErrorNotificationHandler(logging.Handler): def __init__(self, *args, **kwargs): # NOTE(dhellmann): Avoid a cyclical import by doing this one # at runtime. import oslo_messaging logging.Handler.__init__(self, *args, **kwargs) self._transport = oslo_messaging.get_notification_transport(cfg.CONF) self._notifier = oslo_messaging.Notifier( self._transport, publisher_id='error.publisher') def emit(self, record): conf = self._transport.conf # NOTE(bnemec): Notifier registers this opt with the transport. if ('log' in conf.oslo_messaging_notifications.driver): # NOTE(lbragstad): If we detect that log is one of the # notification drivers, then return. This protects from infinite # recursion where something bad happens, it gets logged, the log # handler sends a notification, and the log_notifier sees the # notification and logs it. return self._notifier.error({}, 'error_notification', dict(error=record.msg)) PublishErrorsHandler = LoggingErrorNotificationHandler ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/logger.py0000664000175000017500000000533600000000000022741 0ustar00zuulzuul00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for the Python logging package that sends log records as a notification. """ import logging from oslo_config import cfg from oslo_messaging.notify import notifier class LoggingNotificationHandler(logging.Handler): """Handler for logging to the messaging notification system. Each time the application logs a message using the :py:mod:`logging` module, it will be sent as a notification. The severity used for the notification will be the same as the one used for the log record. This can be used into a Python logging configuration this way:: [handler_notifier] class=oslo_messaging.LoggingNotificationHandler level=ERROR args=('rabbit:///') """ CONF = cfg.CONF """Default configuration object used, subclass this class if you want to use another one. """ def __init__(self, url, publisher_id=None, driver=None, topic=None, serializer=None): self.notifier = notifier.Notifier( notifier.get_notification_transport(self.CONF, url), publisher_id, driver, serializer() if serializer else None, topics=(topic if isinstance(topic, list) or topic is None else [topic])) logging.Handler.__init__(self) def emit(self, record): """Emit the log record to the messaging notification system. :param record: A log record to emit. """ method = getattr(self.notifier, record.levelname.lower(), None) if not method: return method( None, 'logrecord', { 'name': record.name, 'levelno': record.levelno, 'levelname': record.levelname, 'exc_info': record.exc_info, 'pathname': record.pathname, 'lineno': record.lineno, 'msg': record.getMessage(), 'funcName': record.funcName, 'thread': record.thread, 'processName': record.processName, 'process': record.process, 'extra': getattr(record, 'extra', None), } ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/messaging.py0000664000175000017500000000677200000000000023444 0ustar00zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Notification drivers for sending notifications via messaging. The messaging drivers publish notification messages to notification listeners. In case of the rabbit backend the driver will block the notifier's thread until the notification message has been passed to the messaging transport. There is no guarantee that the notification message will be consumed by a notification listener. In case of the kafka backend the driver will not block the notifier's thread but return immediately. The driver will try to deliver the message in the background. Notification messages are sent 'at-most-once' - ensuring that they are not duplicated. If the connection to the messaging service is not active when a notification is sent the rabbit backend will block waiting for the connection to complete. If the connection fails to complete, the driver will try to re-establish that connection. By default this will continue indefinitely until the connection completes. However, the retry parameter can be used to have the notification send fail. In this case an error is logged and the notifier's thread is resumed without any error. If the connection to the messaging service is not active when a notification is sent the kafka backend will return immediately and the backend tries to establish the connection and deliver the messages in the background. """ import logging import oslo_messaging from oslo_messaging.notify import notifier LOG = logging.getLogger(__name__) class MessagingDriver(notifier.Driver): """Send notifications using the 1.0 message format. This driver sends notifications over the configured messaging transport, but without any message envelope (also known as message format 1.0). This driver should only be used in cases where there are existing consumers deployed which do not support the 2.0 message format. """ def __init__(self, conf, topics, transport, version=1.0): super(MessagingDriver, self).__init__(conf, topics, transport) self.version = version def notify(self, ctxt, message, priority, retry): priority = priority.lower() for topic in self.topics: target = oslo_messaging.Target(topic='%s.%s' % (topic, priority)) try: self.transport._send_notification(target, ctxt, message, version=self.version, retry=retry) except Exception: LOG.exception("Could not send notification to %(topic)s. " "Payload=%(message)s", {'topic': topic, 'message': message}) class MessagingV2Driver(MessagingDriver): "Send notifications using the 2.0 message format." def __init__(self, conf, **kwargs): super(MessagingV2Driver, self).__init__(conf, version=2.0, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/middleware.py0000664000175000017500000000757100000000000023602 0ustar00zuulzuul00000000000000# Copyright (c) 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Send notifications on request """ import logging import os.path import sys import traceback as tb from oslo_config import cfg from oslo_middleware import base import webob.dec import oslo_messaging from oslo_messaging import notify LOG = logging.getLogger(__name__) def log_and_ignore_error(fn): def wrapped(*args, **kwargs): try: return fn(*args, **kwargs) except Exception as e: LOG.exception('An exception occurred processing ' 'the API call: %s ', e) return wrapped class RequestNotifier(base.Middleware): """Send notification on request.""" @classmethod def factory(cls, global_conf, **local_conf): """Factory method for paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def _factory(app): return cls(app, **conf) return _factory def __init__(self, app, **conf): self.notifier = notify.Notifier( oslo_messaging.get_notification_transport(cfg.CONF, conf.get('url')), publisher_id=conf.get('publisher_id', os.path.basename(sys.argv[0]))) self.service_name = conf.get('service_name') self.ignore_req_list = [x.upper().strip() for x in conf.get('ignore_req_list', '').split(',')] super(RequestNotifier, self).__init__(app) @staticmethod def environ_to_dict(environ): """Following PEP 333, server variables are lower case, so don't include them. """ return dict((k, v) for k, v in environ.items() if k.isupper() and k != 'HTTP_X_AUTH_TOKEN') @log_and_ignore_error def process_request(self, request): request.environ['HTTP_X_SERVICE_NAME'] = \ self.service_name or request.host payload = { 'request': self.environ_to_dict(request.environ), } self.notifier.info({}, 'http.request', payload) @log_and_ignore_error def process_response(self, request, response, exception=None, traceback=None): payload = { 'request': self.environ_to_dict(request.environ), } if response: payload['response'] = { 'status': response.status, 'headers': response.headers, } if exception: payload['exception'] = { 'value': repr(exception), 'traceback': tb.format_tb(traceback) } self.notifier.info({}, 'http.response', payload) @webob.dec.wsgify def __call__(self, req): if req.method in self.ignore_req_list: return req.get_response(self.application) else: self.process_request(req) try: response = req.get_response(self.application) except Exception: exc_type, value, traceback = sys.exc_info() self.process_response(req, None, value, traceback) raise else: self.process_response(req, response) return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/notify/notifier.py0000664000175000017500000004371200000000000023301 0ustar00zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import argparse import logging import uuid from oslo_config import cfg from oslo_utils import timeutils from stevedore import extension from stevedore import named from oslo_messaging import serializer as msg_serializer from oslo_messaging import transport as msg_transport _notifier_opts = [ cfg.MultiStrOpt('driver', default=[], deprecated_name='notification_driver', deprecated_group='DEFAULT', help='The Drivers(s) to handle sending notifications. ' 'Possible values are messaging, messagingv2, ' 'routing, log, test, noop'), cfg.StrOpt('transport_url', deprecated_name='notification_transport_url', deprecated_group='DEFAULT', secret=True, help='A URL representing the messaging driver to use for ' 'notifications. If not set, we fall back to the same ' 'configuration used for RPC.'), cfg.ListOpt('topics', default=['notifications', ], deprecated_opts=[ cfg.DeprecatedOpt('topics', group='rpc_notifier2'), cfg.DeprecatedOpt('notification_topics', group='DEFAULT') ], help='AMQP topic used for OpenStack notifications.'), cfg.IntOpt('retry', default=-1, help='The maximum number of attempts to re-send a notification ' 'message which failed to be delivered due to a ' 'recoverable error. 0 - No retry, -1 - indefinite'), ] _LOG = logging.getLogger(__name__) def _send_notification(): """Command line tool to send notifications manually.""" parser = argparse.ArgumentParser( description='Oslo.messaging notification sending', ) parser.add_argument('--config-file', help='Path to configuration file') parser.add_argument('--transport-url', help='Transport URL') parser.add_argument('--publisher-id', help='Publisher ID') parser.add_argument('--event-type', default="test", help="Event type") parser.add_argument('--topic', nargs='*', help="Topic to send to") parser.add_argument('--priority', default="info", choices=("info", "audit", "warn", "error", "critical", "sample"), help='Event type') parser.add_argument('--driver', default="messagingv2", choices=extension.ExtensionManager( 'oslo.messaging.notify.drivers' ).names(), help='Notification driver') parser.add_argument('payload', help="the notification payload (dict)") args = parser.parse_args() conf = cfg.ConfigOpts() conf([], default_config_files=[args.config_file] if args.config_file else None) transport = get_notification_transport(conf, url=args.transport_url) notifier = Notifier(transport, args.publisher_id, topics=args.topic, driver=args.driver) notifier._notify({}, args.event_type, args.payload, args.priority) class Driver(object, metaclass=abc.ABCMeta): """Base driver for Notifications""" def __init__(self, conf, topics, transport): """base driver initialization :param conf: configuration options :param topics: list of topics :param transport: transport driver to use """ self.conf = conf self.topics = topics self.transport = transport @abc.abstractmethod def notify(self, ctxt, msg, priority, retry): """send a single notification with a specific priority :param ctxt: current request context :param msg: message to be sent :type msg: str :param priority: priority of the message :type priority: str :param retry: connection retries configuration (used by the messaging driver): None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int """ pass def get_notification_transport(conf, url=None, allowed_remote_exmods=None): """A factory method for Transport objects for notifications. This method should be used for notifications, in case notifications are being sent over a different message bus than normal messaging functionality; for example, using a different driver, or with different access permissions. If no transport URL is provided, the URL in the notifications section of the config file will be used. If that URL is also absent, the same transport as specified in the messaging section will be used. If a transport URL is provided, then this function works exactly the same as get_transport. :param conf: the user configuration :type conf: cfg.ConfigOpts :param url: a transport URL, see :py:class:`transport.TransportURL` :type url: str or TransportURL :param allowed_remote_exmods: a list of modules which a client using this transport will deserialize remote exceptions from :type allowed_remote_exmods: list """ conf.register_opts(_notifier_opts, group='oslo_messaging_notifications') if url is None: url = conf.oslo_messaging_notifications.transport_url return msg_transport._get_transport( conf, url, allowed_remote_exmods, transport_cls=msg_transport.NotificationTransport) def _sanitize_context(ctxt): if ctxt is None or type(ctxt) is dict: # NOTE(JayF): Logging drivers, unit tests, and some code calls # notifier with an emptydict or None instead of an # actual context. In these cases, discard the passed # value. return {} try: return ctxt.redacted_copy() except AttributeError: # NOTE(JayF): We'd rather send a notification without any context # than missing sending the notification altogether. _LOG.warning("Unable to properly redact context for " "notification, omitting context from notification.") return {} class Notifier(object): """Send notification messages. The Notifier class is used for sending notification messages over a messaging transport or other means. Notification messages follow the following format:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} A Notifier object can be instantiated with a transport object and a publisher ID: notifier = messaging.Notifier(get_notification_transport(CONF), 'compute') and notifications are sent via drivers chosen with the driver config option and on the topics chosen with the topics config option in [oslo_messaging_notifications] section. Alternatively, a Notifier object can be instantiated with a specific driver or topic:: transport = notifier.get_notification_transport(CONF) notifier = notifier.Notifier(transport, 'compute.host', driver='messaging', topics=['notifications']) Notifier objects are relatively expensive to instantiate (mostly the cost of loading notification drivers), so it is possible to specialize a given Notifier object with a different publisher id using the prepare() method:: notifier = notifier.prepare(publisher_id='compute') notifier.info(ctxt, event_type, payload) """ def __init__(self, transport, publisher_id=None, driver=None, serializer=None, retry=None, topics=None): """Construct a Notifier object. :param transport: the transport to use for sending messages :type transport: oslo_messaging.Transport :param publisher_id: field in notifications sent, for example 'compute.host1' :type publisher_id: str :param driver: a driver to lookup from oslo_messaging.notify.drivers :type driver: str :param serializer: an optional entity serializer :type serializer: Serializer :param retry: connection retries configuration (used by the messaging driver): None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int :param topics: the topics which to send messages on :type topics: list of strings """ conf = transport.conf conf.register_opts(_notifier_opts, group='oslo_messaging_notifications') if not isinstance(transport, msg_transport.NotificationTransport): _LOG.warning("Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") self.transport = transport self.publisher_id = publisher_id if retry is not None: self.retry = retry else: self.retry = conf.oslo_messaging_notifications.retry self._driver_names = ([driver] if driver is not None else conf.oslo_messaging_notifications.driver) if topics is not None: self._topics = topics else: self._topics = conf.oslo_messaging_notifications.topics self._serializer = serializer or msg_serializer.NoOpSerializer() self._driver_mgr = named.NamedExtensionManager( 'oslo.messaging.notify.drivers', names=self._driver_names, invoke_on_load=True, invoke_args=[conf], invoke_kwds={ 'topics': self._topics, 'transport': self.transport, } ) _marker = object() def prepare(self, publisher_id=_marker, retry=_marker): """Return a specialized Notifier instance. Returns a new Notifier instance with the supplied publisher_id. Allows sending notifications from multiple publisher_ids without the overhead of notification driver loading. :param publisher_id: field in notifications sent, for example 'compute.host1' :type publisher_id: str :param retry: connection retries configuration (used by the messaging driver): None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int """ return _SubNotifier._prepare(self, publisher_id, retry=retry) def _notify(self, ctxt, event_type, payload, priority, publisher_id=None, retry=None): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(JayF): We must remove secure information from notification # payloads, otherwise we risk sending sensitive creds # to a notification bus. safe_ctxt = _sanitize_context(ctxt) ctxt = self._serializer.serialize_context(safe_ctxt) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id or self.publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) def do_notify(ext): try: ext.obj.notify(ctxt, msg, priority, retry or self.retry) except Exception as e: _LOG.exception("Problem '%(e)s' attempting to send to " "notification system. Payload=%(payload)s", {'e': e, 'payload': payload}) if self._driver_mgr.extensions: self._driver_mgr.map(do_notify) def audit(self, ctxt, event_type, payload): """Send a notification at audit level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'AUDIT') def debug(self, ctxt, event_type, payload): """Send a notification at debug level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'DEBUG') def info(self, ctxt, event_type, payload): """Send a notification at info level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'INFO') def warn(self, ctxt, event_type, payload): """Send a notification at warning level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'WARN') warning = warn def error(self, ctxt, event_type, payload): """Send a notification at error level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'ERROR') def critical(self, ctxt, event_type, payload): """Send a notification at critical level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'CRITICAL') def sample(self, ctxt, event_type, payload): """Send a notification at sample level. Sample notifications are for high-frequency events that typically contain small payloads. eg: "CPU = 70%" Not all drivers support the sample level (log, for example) so these could be dropped. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'SAMPLE') def is_enabled(self): """Check if the notifier will emit notifications anywhere. :return: false if the driver of the notifier is set only to noop, true otherwise """ return self._driver_mgr.names() != ['noop'] class _SubNotifier(Notifier): _marker = Notifier._marker def __init__(self, base, publisher_id, retry): self._base = base self.transport = base.transport self.publisher_id = publisher_id self.retry = retry self._serializer = self._base._serializer self._driver_mgr = self._base._driver_mgr def _notify(self, ctxt, event_type, payload, priority): super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority) @classmethod def _prepare(cls, base, publisher_id=_marker, retry=_marker): if publisher_id is cls._marker: publisher_id = base.publisher_id if retry is cls._marker: retry = base.retry return cls(base, publisher_id, retry=retry) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/opts.py0000664000175000017500000000614500000000000021136 0ustar00zuulzuul00000000000000 # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools from oslo_messaging._drivers import amqp from oslo_messaging._drivers.amqp1_driver import opts as amqp_opts from oslo_messaging._drivers import base as drivers_base from oslo_messaging._drivers import impl_rabbit from oslo_messaging._drivers.kafka_driver import kafka_options from oslo_messaging.notify import notifier from oslo_messaging.rpc import client from oslo_messaging.rpc import dispatcher from oslo_messaging import server from oslo_messaging import transport __all__ = [ 'list_opts' ] _global_opt_lists = [ drivers_base.base_opts, server._pool_opts, client._client_opts, transport._transport_opts, dispatcher._dispatcher_opts, ] _opts = [ (None, list(itertools.chain(*_global_opt_lists))), ('oslo_messaging_amqp', amqp_opts.amqp1_opts), ('oslo_messaging_notifications', notifier._notifier_opts), ('oslo_messaging_rabbit', list( itertools.chain(amqp.amqp_opts, impl_rabbit.rabbit_opts))), ('oslo_messaging_kafka', kafka_options.KAFKA_OPTS), ] def list_opts(): """Return a list of oslo.config options available in the library. The returned list includes all oslo.config options which may be registered at runtime by the library. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. This function is also discoverable via the 'oslo_messaging' entry point under the 'oslo.config.opts' namespace. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by this library. :returns: a list of (group_name, opts) tuples """ return [(g, copy.deepcopy(o)) for g, o in _opts] def set_defaults(conf, executor_thread_pool_size=None): """Set defaults for configuration variables. Overrides default options values. :param conf: Config instance specified to set default options in it. Using of instances instead of a global config object prevents conflicts between options declaration. :type conf: oslo.config.cfg.ConfigOpts instance. :keyword executor_thread_pool_size: Size of executor thread pool. :type executor_thread_pool_size: int :default executor_thread_pool_size: None """ if executor_thread_pool_size is not None: conf.set_default('executor_thread_pool_size', executor_thread_pool_size) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1306727 oslo.messaging-14.9.0/oslo_messaging/rpc/0000775000175000017500000000000000000000000020355 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/rpc/__init__.py0000664000175000017500000000217200000000000022470 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'ClientSendError', 'ExpectedException', 'NoSuchMethod', 'RPCClient', 'RPCAccessPolicyBase', 'LegacyRPCAccessPolicy', 'DefaultRPCAccessPolicy', 'ExplicitRPCAccessPolicy', 'RPCDispatcher', 'RPCDispatcherError', 'RPCVersionCapError', 'RemoteError', 'UnsupportedVersion', 'expected_exceptions', 'get_rpc_transport', 'get_rpc_server', 'get_rpc_client', 'expose' ] from .client import * from .dispatcher import * from .transport import * from .server import * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/rpc/client.py0000664000175000017500000006022300000000000022210 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging from oslo_config import cfg from oslo_messaging._drivers import base as driver_base from oslo_messaging import _metrics as metrics from oslo_messaging import _utils as utils from oslo_messaging import exceptions from oslo_messaging import serializer as msg_serializer from oslo_messaging import transport as msg_transport __all__ = [ 'ClientSendError', 'RPCClient', 'RPCVersionCapError', 'RemoteError', 'get_rpc_client', ] LOG = logging.getLogger(__name__) _client_opts = [ cfg.IntOpt('rpc_response_timeout', default=60, help='Seconds to wait for a response from a call.'), ] class RemoteError(exceptions.MessagingException): """Signifies that a remote endpoint method has raised an exception. Contains a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception contains all of the relevant info. """ def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type self.value = value self.traceback = traceback msg = ("Remote error: %(exc_type)s %(value)s\n%(traceback)s." % dict(exc_type=self.exc_type, value=self.value, traceback=self.traceback)) super(RemoteError, self).__init__(msg) class RPCVersionCapError(exceptions.MessagingException): def __init__(self, version, version_cap): self.version = version self.version_cap = version_cap msg = ("Requested message version, %(version)s is incompatible. It " "needs to be equal in major version and less than or equal " "in minor version as the specified version cap " "%(version_cap)s." % dict(version=self.version, version_cap=self.version_cap)) super(RPCVersionCapError, self).__init__(msg) class ClientSendError(exceptions.MessagingException): """Raised if we failed to send a message to a target.""" def __init__(self, target, ex): msg = 'Failed to send to target "%s": %s' % (target, ex) super(ClientSendError, self).__init__(msg) self.target = target self.ex = ex class _BaseCallContext(object, metaclass=abc.ABCMeta): _marker = object() def __init__(self, transport, target, serializer, timeout=None, version_cap=None, retry=None, call_monitor_timeout=None, transport_options=None): self.conf = transport.conf self.transport = transport self.target = target self.serializer = serializer self.timeout = timeout self.call_monitor_timeout = call_monitor_timeout self.retry = retry self.version_cap = version_cap self.transport_options = transport_options super(_BaseCallContext, self).__init__() def _make_message(self, ctxt, method, args): msg = dict(method=method) msg['args'] = dict() for argname, arg in args.items(): msg['args'][argname] = self.serializer.serialize_entity(ctxt, arg) if self.target.namespace is not None: msg['namespace'] = self.target.namespace if self.target.version is not None: msg['version'] = self.target.version return msg def _check_version_cap(self, version): if not utils.version_is_compatible(self.version_cap, version): raise RPCVersionCapError(version=version, version_cap=self.version_cap) def can_send_version(self, version=_marker): """Check to see if a version is compatible with the version cap.""" version = self.target.version if version is self._marker else version return utils.version_is_compatible(self.version_cap, version) @classmethod def _check_version(cls, version): if version is not cls._marker: # quick sanity check to make sure parsable version numbers are used try: utils.version_is_compatible(version, version) except (IndexError, ValueError): raise exceptions.MessagingException( "Version must contain a major and minor integer. Got %s" % version) def cast(self, ctxt, method, **kwargs): """Invoke a method and return immediately. See RPCClient.cast().""" msg = self._make_message(ctxt, method, kwargs) msg_ctxt = self.serializer.serialize_context(ctxt) self._check_version_cap(msg.get('version')) with metrics.get_collector(self.conf, "rpc_client", target=self.target, method=method, call_type="cast") as metrics_collector: try: self.transport._send(self.target, msg_ctxt, msg, retry=self.retry, transport_options=self.transport_options) except driver_base.TransportDriverError as ex: self._metrics_api.rpc_client_exception_total( self.target, method, "cast", ex.__class__.__name__) raise ClientSendError(self.target, ex) except Exception as ex: if self.conf.oslo_messaging_metrics.metrics_enabled: metrics_collector.rpc_client_exception_total( self.target, method, "cast", ex.__class__.__name__) raise def call(self, ctxt, method, **kwargs): """Invoke a method and wait for a reply. See RPCClient.call().""" if self.target.fanout: raise exceptions.InvalidTarget('A call cannot be used with fanout', self.target) msg = self._make_message(ctxt, method, kwargs) msg_ctxt = self.serializer.serialize_context(ctxt) timeout = self.timeout if self.timeout is None: timeout = self.conf.rpc_response_timeout cm_timeout = self.call_monitor_timeout self._check_version_cap(msg.get('version')) with metrics.get_collector(self.conf, "rpc_client", target=self.target, method=method, call_type="call") as metrics_collector: try: result = self.transport._send( self.target, msg_ctxt, msg, wait_for_reply=True, timeout=timeout, call_monitor_timeout=cm_timeout, retry=self.retry, transport_options=self.transport_options) except driver_base.TransportDriverError as ex: self._metrics_api.rpc_client_exception_total( self.target, method, "call", ex.__class__.__name__) raise ClientSendError(self.target, ex) except Exception as ex: if self.conf.oslo_messaging_metrics.metrics_enabled: metrics_collector.rpc_client_exception_total( self.target, method, "call", ex.__class__.__name__) raise return self.serializer.deserialize_entity(ctxt, result) @abc.abstractmethod def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker, call_monitor_timeout=_marker): """Prepare a method invocation context. See RPCClient.prepare().""" class _CallContext(_BaseCallContext): _marker = _BaseCallContext._marker @classmethod def _prepare(cls, call_context, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker, call_monitor_timeout=_marker, transport_options=_marker): cls._check_version(version) kwargs = dict( exchange=exchange, topic=topic, namespace=namespace, version=version, server=server, fanout=fanout) kwargs = dict([(k, v) for k, v in kwargs.items() if v is not cls._marker]) target = call_context.target(**kwargs) if timeout is cls._marker: timeout = call_context.timeout if version_cap is cls._marker: version_cap = call_context.version_cap if retry is cls._marker: retry = call_context.retry if call_monitor_timeout is cls._marker: call_monitor_timeout = call_context.call_monitor_timeout if transport_options is cls._marker: transport_options = call_context.transport_options return _CallContext(call_context.transport, target, call_context.serializer, timeout, version_cap, retry, call_monitor_timeout, transport_options) def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker, call_monitor_timeout=_marker): return _CallContext._prepare(self, exchange, topic, namespace, version, server, fanout, timeout, version_cap, retry, call_monitor_timeout) class RPCClient(_BaseCallContext): """A class for invoking methods on remote RPC servers. The RPCClient class is responsible for sending method invocations to and receiving return values from remote RPC servers via a messaging transport. The class should always be instantiated by using the get_rpc_client function and not constructing the class directly. Two RPC patterns are supported: RPC calls and RPC casts. An RPC cast is used when an RPC method does *not* return a value to the caller. An RPC call is used when a return value is expected from the method. For further information see the cast() and call() methods. The default target used for all subsequent calls and casts is supplied to the RPCClient constructor. The client uses the target to control how the RPC request is delivered to a server. If only the target's topic (and optionally exchange) are set, then the RPC can be serviced by any server that is listening to that topic (and exchange). If multiple servers are listening on that topic/exchange, then one server is picked using a best-effort round-robin algorithm. Alternatively, the client can set the Target's ``server`` attribute to the name of a specific server to send the RPC request to one particular server. In the case of RPC cast, the RPC request can be broadcast to all servers listening to the Target's topic/exchange by setting the Target's ``fanout`` property to ``True``. While the default target is set on construction, target attributes can be overridden for individual method invocations using the prepare() method. A method invocation consists of a request context dictionary, a method name and a dictionary of arguments. This class is intended to be used by wrapping it in another class which provides methods on the subclass to perform the remote invocation using call() or cast():: class TestClient(object): def __init__(self, transport): target = messaging.Target(topic='test', version='2.0') self._client = messaging.get_rpc_client(transport, target) def test(self, ctxt, arg): return self._client.call(ctxt, 'test', arg=arg) An example of using the prepare() method to override some attributes of the default target:: def test(self, ctxt, arg): cctxt = self._client.prepare(version='2.5') return cctxt.call(ctxt, 'test', arg=arg) RPCClient have a number of other properties - for example, timeout and version_cap - which may make sense to override for some method invocations, so they too can be passed to prepare():: def test(self, ctxt, arg): cctxt = self._client.prepare(timeout=10) return cctxt.call(ctxt, 'test', arg=arg) However, this class can be used directly without wrapping it another class. For example:: transport = messaging.get_rpc_transport(cfg.CONF) target = messaging.Target(topic='test', version='2.0') client = messaging.get_rpc_client(transport, target) client.call(ctxt, 'test', arg=arg) but this is probably only useful in limited circumstances as a wrapper class will usually help to make the code much more obvious. If the connection to the messaging service is not active when an RPC request is made the client will block waiting for the connection to complete. If the connection fails to complete, the client will try to re-establish that connection. By default this will continue indefinitely until the connection completes. However, the retry parameter can be used to have the RPC request fail with a MessageDeliveryFailure after the given number of retries. For example:: client = messaging.get_rpc_client(transport, target, retry=None) client.call(ctxt, 'sync') try: client.prepare(retry=0).cast(ctxt, 'ping') except messaging.MessageDeliveryFailure: LOG.error("Failed to send ping message") """ _marker = _BaseCallContext._marker def __init__(self, transport, target, timeout=None, version_cap=None, serializer=None, retry=None, call_monitor_timeout=None, transport_options=None, _manual_load=True): """Construct an RPC client. This should not be called directly, use the get_rpc_client function to instantiate this class. :param transport: a messaging transport handle :type transport: Transport :param target: the default target for invocations :type target: Target :param timeout: an optional default timeout (in seconds) for call()s :type timeout: int or float :param version_cap: raise a RPCVersionCapError version exceeds this cap :type version_cap: str :param serializer: an optional entity serializer :type serializer: Serializer :param retry: an optional default connection retries configuration: None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int :param call_monitor_timeout: an optional timeout (in seconds) for active call heartbeating. If specified, requires the server to heartbeat long-running calls at this interval (less than the overall timeout parameter). :type call_monitor_timeout: int :param transport_options: Transport options passed to client. :type transport_options: TransportOptions :param _manual_load: Internal use only to check if class was manually instantiated or not. :type _manual_load: bool """ if _manual_load: LOG.warning("Using RPCClient manually to instantiate client. " "Please use get_rpc_client to obtain an RPC client " "instance.") if serializer is None: serializer = msg_serializer.NoOpSerializer() if not isinstance(transport, msg_transport.RPCTransport): LOG.warning("Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") super(RPCClient, self).__init__( transport, target, serializer, timeout, version_cap, retry, call_monitor_timeout, transport_options ) self.conf.register_opts(_client_opts) def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker, call_monitor_timeout=_marker, transport_options=_marker): """Prepare a method invocation context. Use this method to override client properties for an individual method invocation. For example:: def test(self, ctxt, arg): cctxt = self.prepare(version='2.5') return cctxt.call(ctxt, 'test', arg=arg) :param exchange: see Target.exchange :type exchange: str :param topic: see Target.topic :type topic: str :param namespace: see Target.namespace :type namespace: str :param version: requirement the server must support, see Target.version :type version: str :param server: send to a specific server, see Target.server :type server: str :param fanout: send to all servers on topic, see Target.fanout :type fanout: bool :param timeout: an optional default timeout (in seconds) for call()s :type timeout: int or float :param version_cap: raise a RPCVersionCapError version exceeds this cap :type version_cap: str :param retry: an optional connection retries configuration: None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int :param transport_options: additional parameters to configure the driver for example to send parameters as "mandatory" flag in RabbitMQ :type transport_options: dictionary :param call_monitor_timeout: an optional timeout (in seconds) for active call heartbeating. If specified, requires the server to heartbeat long-running calls at this interval (less than the overall timeout parameter). :type call_monitor_timeout: int """ return _CallContext._prepare(self, exchange, topic, namespace, version, server, fanout, timeout, version_cap, retry, call_monitor_timeout, transport_options) def cast(self, ctxt, method, **kwargs): """Invoke a method without blocking for a return value. The cast() method is used to invoke an RPC method that does not return a value. cast() RPC requests may be broadcast to all Servers listening on a given topic by setting the fanout Target property to ``True``. The cast() operation is best-effort: cast() will block the calling thread until the RPC request method is accepted by the messaging transport, but cast() does *not* verify that the RPC method has been invoked by the server. cast() does guarantee that the method will be not executed twice on a destination (e.g. 'at-most-once' execution). There are no ordering guarantees across successive casts, even among casts to the same destination. Therefore methods may be executed in an order different from the order in which they are cast. Method arguments must either be primitive types or types supported by the client's serializer (if any). Similarly, the request context must be a dict unless the client's serializer supports serializing another type. :param ctxt: a request context dict :type ctxt: dict :param method: the method name :type method: str :param kwargs: a dict of method arguments :type kwargs: dict :raises: MessageDeliveryFailure if the messaging transport fails to accept the request. """ self.prepare().cast(ctxt, method, **kwargs) def call(self, ctxt, method, **kwargs): """Invoke a method and wait for a reply. The call() method is used to invoke RPC methods that return a value. Since only a single return value is permitted it is not possible to call() to a fanout target. call() will block the calling thread until the messaging transport provides the return value, a timeout occurs, or a non-recoverable error occurs. call() guarantees that the RPC request is done 'at-most-once' which ensures that the call will never be duplicated. However if the call should fail or time out before the return value arrives then there are no guarantees whether or not the method was invoked. Since call() blocks until completion of the RPC method, call()s from the same thread are guaranteed to be processed in-order. Method arguments must either be primitive types or types supported by the client's serializer (if any). Similarly, the request context must be a dict unless the client's serializer supports serializing another type. The semantics of how any errors raised by the remote RPC endpoint method are handled are quite subtle. Firstly, if the remote exception is contained in one of the modules listed in the allow_remote_exmods messaging.get_rpc_transport() parameter, then it this exception will be re-raised by call(). However, such locally re-raised remote exceptions are distinguishable from the same exception type raised locally because re-raised remote exceptions are modified such that their class name ends with the '_Remote' suffix so you may do:: if ex.__class__.__name__.endswith('_Remote'): # Some special case for locally re-raised remote exceptions Secondly, if a remote exception is not from a module listed in the allowed_remote_exmods list, then a messaging.RemoteError exception is raised with all details of the remote exception. :param ctxt: a request context dict :type ctxt: dict :param method: the method name :type method: str :param kwargs: a dict of method arguments :type kwargs: dict :raises: MessagingTimeout, RemoteError, MessageDeliveryFailure """ return self.prepare().call(ctxt, method, **kwargs) def can_send_version(self, version=_marker): """Check to see if a version is compatible with the version cap.""" return self.prepare(version=version).can_send_version() def get_rpc_client(transport, target, client_cls=RPCClient, **kwargs): """Construct an RPC client. :param transport: the messaging transport :type transport: Transport :param target: the exchange, topic and server to listen on :type target: Target :param client_cls: The client class to instantiate :type client_cls: class :param **kwargs: The kwargs will be passed down to the client_cls constructor """ return client_cls(transport, target, _manual_load=False, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/rpc/dispatcher.py0000664000175000017500000002765500000000000023074 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABCMeta from abc import abstractmethod import logging import sys import threading from oslo_config import cfg from oslo_utils import eventletutils from oslo_messaging import _utils as utils from oslo_messaging import dispatcher from oslo_messaging import serializer as msg_serializer from oslo_messaging import server as msg_server from oslo_messaging import target as msg_target _dispatcher_opts = [ cfg.BoolOpt('rpc_ping_enabled', default=False, help='Add an endpoint to answer to ping calls. ' 'Endpoint is named oslo_rpc_server_ping'), ] __all__ = [ 'NoSuchMethod', 'RPCAccessPolicyBase', 'LegacyRPCAccessPolicy', 'DefaultRPCAccessPolicy', 'ExplicitRPCAccessPolicy', 'RPCDispatcher', 'RPCDispatcherError', 'UnsupportedVersion', 'ExpectedException', ] LOG = logging.getLogger(__name__) class PingEndpoint(object): def oslo_rpc_server_ping(self, ctxt, **kwargs): return 'pong' class ExpectedException(Exception): """Encapsulates an expected exception raised by an RPC endpoint Merely instantiating this exception records the current exception information, which will be passed back to the RPC client without exceptional logging. """ def __init__(self): self.exc_info = sys.exc_info() class RPCDispatcherError(msg_server.MessagingServerError): "A base class for all RPC dispatcher exceptions." class NoSuchMethod(RPCDispatcherError, AttributeError): "Raised if there is no endpoint which exposes the requested method." def __init__(self, method): msg = "Endpoint does not support RPC method %s" % method super(NoSuchMethod, self).__init__(msg) self.method = method class UnsupportedVersion(RPCDispatcherError): "Raised if there is no endpoint which supports the requested version." def __init__(self, version, method=None): msg = "Endpoint does not support RPC version %s" % version if method: msg = "%s. Attempted method: %s" % (msg, method) super(UnsupportedVersion, self).__init__(msg) self.version = version self.method = method class RPCAccessPolicyBase(object, metaclass=ABCMeta): """Determines which endpoint methods may be invoked via RPC""" @abstractmethod def is_allowed(self, endpoint, method): """Applies an access policy to the rpc method :param endpoint: the instance of a rpc endpoint :param method: the method of the endpoint :return: True if the method may be invoked via RPC, else False. """ class LegacyRPCAccessPolicy(RPCAccessPolicyBase): """The legacy access policy allows RPC access to all callable endpoint methods including private methods (methods prefixed by '_') """ def is_allowed(self, endpoint, method): return True class DefaultRPCAccessPolicy(RPCAccessPolicyBase): """The default access policy prevents RPC calls to private methods (methods prefixed by '_') .. note:: LegacyRPCAdapterPolicy currently needs to be the default while we have projects that rely on exposing private methods. """ def is_allowed(self, endpoint, method): return not method.startswith('_') class ExplicitRPCAccessPolicy(RPCAccessPolicyBase): """Policy which requires decorated endpoint methods to allow dispatch""" def is_allowed(self, endpoint, method): if hasattr(endpoint, method): return hasattr(getattr(endpoint, method), 'exposed') return False class RPCDispatcher(dispatcher.DispatcherBase): """A message dispatcher which understands RPC messages. A MessageHandlingServer is constructed by passing a callable dispatcher which is invoked with context and message dictionaries each time a message is received. RPCDispatcher is one such dispatcher which understands the format of RPC messages. The dispatcher looks at the namespace, version and method values in the message and matches those against a list of available endpoints. Endpoints may have a target attribute describing the namespace and version of the methods exposed by that object. The RPCDispatcher may have an access_policy attribute which determines which of the endpoint methods are to be dispatched. The default access_policy dispatches all public methods on an endpoint object. """ def __init__(self, endpoints, serializer, access_policy=None): """Construct a rpc server dispatcher. :param endpoints: list of endpoint objects for dispatching to :param serializer: optional message serializer """ cfg.CONF.register_opts(_dispatcher_opts) oslo_rpc_server_ping = None for ep in endpoints: # Check if we have an attribute named 'target' target = getattr(ep, 'target', None) if target and not isinstance(target, msg_target.Target): errmsg = "'target' is a reserved Endpoint attribute used" + \ " for namespace and version filtering. It must" + \ " be of type oslo_messaging.Target. Do not" + \ " define an Endpoint method named 'target'" raise TypeError("%s: endpoint=%s" % (errmsg, ep)) # Check if we have an attribute named 'oslo_rpc_server_ping' oslo_rpc_server_ping = getattr(ep, 'oslo_rpc_server_ping', None) if oslo_rpc_server_ping: errmsg = "'oslo_rpc_server_ping' is a reserved Endpoint" + \ " attribute which can be use to ping the" + \ " endpoint. Please avoid using any oslo_* " + \ " naming." LOG.warning("%s (endpoint=%s)" % (errmsg, ep)) self.endpoints = endpoints # Add ping endpoint if enabled in config if cfg.CONF.rpc_ping_enabled: if oslo_rpc_server_ping: LOG.warning("rpc_ping_enabled=True in config but " "oslo_rpc_server_ping is already declared " "in an other Endpoint. Not enabling rpc_ping " "Endpoint.") else: self.endpoints.append(PingEndpoint()) self.serializer = serializer or msg_serializer.NoOpSerializer() self._default_target = msg_target.Target() if access_policy is not None: if issubclass(access_policy, RPCAccessPolicyBase): self.access_policy = access_policy() else: raise TypeError('access_policy must be a subclass of ' 'RPCAccessPolicyBase') else: self.access_policy = DefaultRPCAccessPolicy() @staticmethod def _is_namespace(target, namespace): return namespace in target.accepted_namespaces @staticmethod def _is_compatible(target, version): endpoint_version = target.version or '1.0' return utils.version_is_compatible(endpoint_version, version) def _do_dispatch(self, endpoint, method, ctxt, args): ctxt = self.serializer.deserialize_context(ctxt) new_args = dict() for argname, arg in args.items(): new_args[argname] = self.serializer.deserialize_entity(ctxt, arg) func = getattr(endpoint, method) result = func(ctxt, **new_args) return self.serializer.serialize_entity(ctxt, result) def _watchdog(self, event, incoming): # NOTE(danms): If the client declared that they are going to # time out after N seconds, send the call-monitor heartbeat # every N/2 seconds to make sure there is plenty of time to # account for inbound and outbound queuing delays. Client # timeouts must be integral and positive, otherwise we log and # ignore. try: client_timeout = int(incoming.client_timeout) cm_heartbeat_interval = client_timeout / 2 except ValueError: client_timeout = cm_heartbeat_interval = 0 if cm_heartbeat_interval < 1: LOG.warning('Client provided an invalid timeout value of %r' % ( incoming.client_timeout)) return while not event.wait(cm_heartbeat_interval): LOG.debug( 'Sending call-monitor heartbeat for active call to %(method)s ' '(interval=%(interval)i)' % ( {'method': incoming.message.get('method'), 'interval': cm_heartbeat_interval})) try: incoming.heartbeat() except Exception as exc: # The heartbeat message failed to send. Likely the broker or # client has died. Nothing to do here but exit the watchdog # thread. If the client is still alive (dead broker) then its # RPC will timeout as expected. LOG.debug("Call-monitor heartbeat failed: %(exc)s" % ({'exc': exc})) break def dispatch(self, incoming): """Dispatch an RPC message to the appropriate endpoint method. :param incoming: incoming message :type incoming: IncomingMessage :raises: NoSuchMethod, UnsupportedVersion """ message = incoming.message ctxt = incoming.ctxt method = message.get('method') args = message.get('args', {}) namespace = message.get('namespace') version = message.get('version', '1.0') # NOTE(danms): This event and watchdog thread are used to send # call-monitoring heartbeats for this message while the call # is executing if it runs for some time. The thread will wait # for the event to be signaled, which we do explicitly below # after dispatching the method call. completion_event = eventletutils.Event() watchdog_thread = threading.Thread(target=self._watchdog, args=(completion_event, incoming)) if incoming.client_timeout: # NOTE(danms): The client provided a timeout, so we start # the watchdog thread. If the client is old or didn't send # a timeout, we just never start the watchdog thread. watchdog_thread.start() found_compatible = False for endpoint in self.endpoints: target = getattr(endpoint, 'target', None) if not target: target = self._default_target if not (self._is_namespace(target, namespace) and self._is_compatible(target, version)): continue if hasattr(endpoint, method): if self.access_policy.is_allowed(endpoint, method): try: return self._do_dispatch(endpoint, method, ctxt, args) finally: completion_event.set() if incoming.client_timeout: watchdog_thread.join() found_compatible = True if found_compatible: raise NoSuchMethod(method) else: raise UnsupportedVersion(version, method=method) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/rpc/server.py0000664000175000017500000002555200000000000022246 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ An RPC server exposes a number of endpoints, each of which contain a set of methods which may be invoked remotely by clients over a given transport. To create an RPC server, you supply a transport, target and a list of endpoints. A transport can be obtained simply by calling the get_rpc_transport() method:: transport = messaging.get_rpc_transport(conf) which will load the appropriate transport driver according to the user's messaging configuration. See get_rpc_transport() for more details. The target supplied when creating an RPC server expresses the topic, server name and - optionally - the exchange to listen on. See Target for more details on these attributes. Multiple RPC Servers may listen to the same topic (and exchange) simultaneously. See RPCClient for details regarding how RPC requests are distributed to the Servers in this case. Each endpoint object may have a target attribute which may have namespace and version fields set. By default, we use the 'null namespace' and version 1.0. Incoming method calls will be dispatched to the first endpoint with the requested method, a matching namespace and a compatible version number. The first parameter to method invocations is always the request context supplied by the client. The remaining parameters are the arguments supplied to the method by the client. Endpoint methods may return a value. If so the RPC Server will send the returned value back to the requesting client via the transport. The executor parameter controls how incoming messages will be received and dispatched. Refer to the Executor documentation for descriptions of the types of executors. *Note:* If the "eventlet" executor is used, the threading and time library need to be monkeypatched. The RPC reply operation is best-effort: the server will consider the message containing the reply successfully sent once it is accepted by the messaging transport. The server does not guarantee that the reply is processed by the RPC client. If the send fails an error will be logged and the server will continue to processing incoming RPC requests. Parameters to the method invocation and values returned from the method are python primitive types. However the actual encoding of the data in the message may not be in primitive form (e.g. the message payload may be a dictionary encoded as an ASCII string using JSON). A serializer object is used to convert incoming encoded message data to primitive types. The serializer is also used to convert the return value from primitive types to an encoding suitable for the message payload. RPC servers have start(), stop() and wait() methods to begin handling requests, stop handling requests, and wait for all in-process requests to complete after the Server has been stopped. A simple example of an RPC server with multiple endpoints might be:: # NOTE(changzhi): We are using eventlet executor and # time.sleep(1), therefore, the server code needs to be # monkey-patched. import eventlet eventlet.monkey_patch() from oslo_config import cfg import oslo_messaging import time class ServerControlEndpoint(object): target = oslo_messaging.Target(namespace='control', version='2.0') def __init__(self, server): self.server = server def stop(self, ctx): if self.server: self.server.stop() class TestEndpoint(object): def test(self, ctx, arg): return arg transport = oslo_messaging.get_rpc_transport(cfg.CONF) target = oslo_messaging.Target(topic='test', server='server1') endpoints = [ ServerControlEndpoint(None), TestEndpoint(), ] server = oslo_messaging.get_rpc_server(transport, target, endpoints, executor='eventlet') try: server.start() while True: time.sleep(1) except KeyboardInterrupt: print("Stopping server") server.stop() server.wait() """ import logging import sys import time from oslo_messaging import exceptions from oslo_messaging.rpc import dispatcher as rpc_dispatcher from oslo_messaging import server as msg_server from oslo_messaging import transport as msg_transport __all__ = [ 'get_rpc_server', 'expected_exceptions', 'expose' ] LOG = logging.getLogger(__name__) class RPCServer(msg_server.MessageHandlingServer): def __init__(self, transport, target, dispatcher, executor=None): super(RPCServer, self).__init__(transport, dispatcher, executor) if not isinstance(transport, msg_transport.RPCTransport): LOG.warning("Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") self._target = target def _create_listener(self): return self.transport._listen(self._target, 1, None) def _process_incoming(self, incoming): message = incoming[0] rpc_method = message.message.get('method') start = time.time() LOG.debug("Receive incoming message with id %(msg_id)s and " "method: %(method)s.", {"msg_id": message.msg_id, "method": rpc_method}) # TODO(sileht): We should remove that at some point and do # this directly in the driver try: message.acknowledge() except Exception: LOG.exception("Can not acknowledge message. Skip processing") return failure = None try: res = self.dispatcher.dispatch(message) except rpc_dispatcher.ExpectedException as e: # current sys.exc_info() content can be overridden # by another exception raised by a log handler during # LOG.debug(). So keep a copy and delete it later. failure = e.exc_info LOG.debug('Expected exception during message handling (%s)', e) except rpc_dispatcher.NoSuchMethod as e: failure = sys.exc_info() if e.method.endswith('_ignore_errors'): LOG.debug('Method %s not found', e.method) else: LOG.exception('Exception during message handling') except Exception: failure = sys.exc_info() LOG.exception('Exception during message handling') try: if failure is None: message.reply(res) LOG.debug("Replied success message with id %(msg_id)s and " "method: %(method)s. Time elapsed: %(elapsed).3f", {"msg_id": message.msg_id, "method": rpc_method, "elapsed": (time.time() - start)}) else: message.reply(failure=failure) LOG.debug("Replied failure for incoming message with " "id %(msg_id)s and method: %(method)s. " "Time elapsed: %(elapsed).3f", {"msg_id": message.msg_id, "method": rpc_method, "elapsed": (time.time() - start)}) except exceptions.MessageUndeliverable as e: LOG.exception( "MessageUndeliverable error, " "source exception: %s, routing_key: %s, exchange: %s: ", e.exception, e.routing_key, e.exchange ) except Exception: LOG.exception("Can not send reply for message") finally: # NOTE(dhellmann): Remove circular object reference # between the current stack frame and the traceback in # exc_info. del failure def get_rpc_server(transport, target, endpoints, executor=None, serializer=None, access_policy=None, server_cls=RPCServer): """Construct an RPC server. :param transport: the messaging transport :type transport: Transport :param target: the exchange, topic and server to listen on :type target: Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer :param access_policy: an optional access policy. Defaults to DefaultRPCAccessPolicy :type access_policy: RPCAccessPolicyBase :param server_cls: The server class to instantiate :type server_cls: class """ dispatcher = rpc_dispatcher.RPCDispatcher(endpoints, serializer, access_policy) return server_cls(transport, target, dispatcher, executor) def expected_exceptions(*exceptions): """Decorator for RPC endpoint methods that raise expected exceptions. Marking an endpoint method with this decorator allows the declaration of expected exceptions that the RPC server should not consider fatal, and not log as if they were generated in a real error scenario. Note that this will cause listed exceptions to be wrapped in an ExpectedException, which is used internally by the RPC sever. The RPC client will see the original exception type. """ def outer(func): def inner(*args, **kwargs): try: return func(*args, **kwargs) # Take advantage of the fact that we can catch # multiple exception types using a tuple of # exception classes, with subclass detection # for free. Any exception that is not in or # derived from the args passed to us will be # ignored and thrown as normal. except exceptions: raise rpc_dispatcher.ExpectedException() return inner return outer def expose(func): """Decorator for RPC endpoint methods that are exposed to the RPC client. If the dispatcher's access_policy is set to ExplicitRPCAccessPolicy then endpoint methods need to be explicitly exposed.:: # foo() cannot be invoked by an RPC client def foo(self): pass # bar() can be invoked by an RPC client @rpc.expose def bar(self): pass """ func.exposed = True return func ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/rpc/transport.py0000664000175000017500000000406300000000000022766 0ustar00zuulzuul00000000000000# Copyright 2017 OpenStack Foundation. # All Rights Reserved. # Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging import transport as msg_transport __all__ = [ 'get_rpc_transport' ] def get_rpc_transport(conf, url=None, allowed_remote_exmods=None, transport_cls=msg_transport.RPCTransport): """A factory method for Transport objects for RPCs. This method should be used to ensure the correct messaging functionality for RPCs. RPCs and Notifications may use separate messaging systems that utilize different drivers, different access permissions, message delivery, etc. Presently, this function works exactly the same as get_transport. It's use is recommended as disambiguates the intended use for the transport and may in the future extend functionality related to the separation of messaging backends. :param conf: the user configuration :type conf: cfg.ConfigOpts :param url: a transport URL, see :py:class:`transport.TransportURL` :type url: str or TransportURL :param allowed_remote_exmods: a list of modules which a client using this transport will deserialize remote exceptions from :type allowed_remote_exmods: list :param transport_cls: the transport class to instantiate :type transport_cls: class """ return msg_transport._get_transport( conf, url, allowed_remote_exmods, transport_cls=transport_cls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/serializer.py0000664000175000017500000000462500000000000022323 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides the definition of a message serialization handler""" import abc from oslo_serialization import jsonutils __all__ = ['Serializer', 'NoOpSerializer', 'JsonPayloadSerializer'] class Serializer(object, metaclass=abc.ABCMeta): """Generic (de-)serialization definition base class.""" @abc.abstractmethod def serialize_entity(self, ctxt, entity): """Serialize something to primitive form. :param ctxt: Request context, in deserialized form :param entity: Entity to be serialized :returns: Serialized form of entity """ @abc.abstractmethod def deserialize_entity(self, ctxt, entity): """Deserialize something from primitive form. :param ctxt: Request context, in deserialized form :param entity: Primitive to be deserialized :returns: Deserialized form of entity """ @abc.abstractmethod def serialize_context(self, ctxt): """Serialize a request context into a dictionary. :param ctxt: Request context :returns: Serialized form of context """ @abc.abstractmethod def deserialize_context(self, ctxt): """Deserialize a dictionary into a request context. :param ctxt: Request context dictionary :returns: Deserialized form of entity """ class NoOpSerializer(Serializer): """A serializer that does nothing.""" def serialize_entity(self, ctxt, entity): return entity def deserialize_entity(self, ctxt, entity): return entity def serialize_context(self, ctxt): return ctxt def deserialize_context(self, ctxt): return ctxt class JsonPayloadSerializer(NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/server.py0000664000175000017500000004112200000000000021451 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import functools import inspect import logging import threading import traceback from oslo_config import cfg from oslo_service import service from oslo_utils import eventletutils from oslo_utils import timeutils from stevedore import driver from oslo_messaging._drivers import base as driver_base from oslo_messaging import _utils as utils from oslo_messaging import exceptions __all__ = [ 'ExecutorLoadFailure', 'MessageHandlingServer', 'MessagingServerError', 'ServerListenError', ] LOG = logging.getLogger(__name__) # The default number of seconds of waiting after which we will emit a log # message DEFAULT_LOG_AFTER = 30 _pool_opts = [ cfg.IntOpt('executor_thread_pool_size', default=64, deprecated_name="rpc_thread_pool_size", help='Size of executor thread pool when' ' executor is threading or eventlet.'), ] class MessagingServerError(exceptions.MessagingException): """Base class for all MessageHandlingServer exceptions.""" class ExecutorLoadFailure(MessagingServerError): """Raised if an executor can't be loaded.""" def __init__(self, executor, ex): msg = 'Failed to load executor "%s": %s' % (executor, ex) super(ExecutorLoadFailure, self).__init__(msg) self.executor = executor self.ex = ex class ServerListenError(MessagingServerError): """Raised if we failed to listen on a target.""" def __init__(self, target, ex): msg = 'Failed to listen on target "%s": %s' % (target, ex) super(ServerListenError, self).__init__(msg) self.target = target self.ex = ex class TaskTimeout(MessagingServerError): """Raised if we timed out waiting for a task to complete.""" class _OrderedTask(object): """A task which must be executed in a particular order. A caller may wait for this task to complete by calling `wait_for_completion`. A caller may run this task with `run_once`, which will ensure that however many times the task is called it only runs once. Simultaneous callers will block until the running task completes, which means that any caller can be sure that the task has completed after run_once returns. """ INIT = 0 # The task has not yet started RUNNING = 1 # The task is running somewhere COMPLETE = 2 # The task has run somewhere def __init__(self, name): """Create a new _OrderedTask. :param name: The name of this task. Used in log messages. """ super(_OrderedTask, self).__init__() self._name = name self._cond = threading.Condition() self._state = self.INIT def _wait(self, condition, msg, log_after, timeout_timer): """Wait while condition() is true. Write a log message if condition() has not become false within `log_after` seconds. Raise TaskTimeout if timeout_timer expires while waiting. """ log_timer = None if log_after != 0: log_timer = timeutils.StopWatch(duration=log_after) log_timer.start() while condition(): if log_timer is not None and log_timer.expired(): LOG.warning('Possible hang: %s', msg) LOG.debug(''.join(traceback.format_stack())) # Only log once. After than we wait indefinitely without # logging. log_timer = None if timeout_timer is not None and timeout_timer.expired(): raise TaskTimeout(msg) timeouts = [] if log_timer is not None: timeouts.append(log_timer.leftover()) if timeout_timer is not None: timeouts.append(timeout_timer.leftover()) wait = None if timeouts: wait = min(timeouts) self._cond.wait(wait) @property def complete(self): return self._state == self.COMPLETE def wait_for_completion(self, caller, log_after, timeout_timer): """Wait until this task has completed. :param caller: The name of the task which is waiting. :param log_after: Emit a log message if waiting longer than `log_after` seconds. :param timeout_timer: Raise TaskTimeout if StopWatch object `timeout_timer` expires while waiting. """ with self._cond: msg = '%s is waiting for %s to complete' % (caller, self._name) self._wait(lambda: not self.complete, msg, log_after, timeout_timer) def run_once(self, fn, log_after, timeout_timer): """Run a task exactly once. If it is currently running in another thread, wait for it to complete. If it has already run, return immediately without running it again. :param fn: The task to run. It must be a callable taking no arguments. It may optionally return another callable, which also takes no arguments, which will be executed after completion has been signaled to other threads. :param log_after: Emit a log message if waiting longer than `log_after` seconds. :param timeout_timer: Raise TaskTimeout if StopWatch object `timeout_timer` expires while waiting. """ with self._cond: if self._state == self.INIT: self._state = self.RUNNING # Note that nothing waits on RUNNING, so no need to notify # We need to release the condition lock before calling out to # prevent deadlocks. Reacquire it immediately afterwards. self._cond.release() try: post_fn = fn() finally: self._cond.acquire() self._state = self.COMPLETE self._cond.notify_all() if post_fn is not None: # Release the condition lock before calling out to prevent # deadlocks. Reacquire it immediately afterwards. self._cond.release() try: post_fn() finally: self._cond.acquire() elif self._state == self.RUNNING: msg = ('%s is waiting for another thread to complete' % self._name) self._wait(lambda: self._state == self.RUNNING, msg, log_after, timeout_timer) class _OrderedTaskRunner(object): """Mixin for a class which executes ordered tasks.""" def __init__(self, *args, **kwargs): super(_OrderedTaskRunner, self).__init__(*args, **kwargs) # Get a list of methods on this object which have the _ordered # attribute self._tasks = [name for (name, member) in inspect.getmembers(self) if inspect.ismethod(member) and getattr(member, '_ordered', False)] self.reset_states() self._reset_lock = threading.Lock() def reset_states(self): # Create new task states for tasks in reset self._states = {task: _OrderedTask(task) for task in self._tasks} @staticmethod def decorate_ordered(fn, state, after, reset_after): @functools.wraps(fn) def wrapper(self, *args, **kwargs): # If the reset_after state has already completed, reset state so # we can run again. # NOTE(mdbooth): This is ugly and requires external locking to be # deterministic when using multiple threads. Consider a thread that # does: server.stop(), server.wait(). If another thread causes a # reset between stop() and wait(), this will not have the intended # behaviour. It is safe without external locking, if the caller # instantiates a new object. with self._reset_lock: if (reset_after is not None and self._states[reset_after].complete): self.reset_states() # Store the states we started with in case the state wraps on us # while we're sleeping. We must wait and run_once in the same # epoch. If the epoch ended while we were sleeping, run_once will # safely do nothing. states = self._states log_after = kwargs.pop('log_after', DEFAULT_LOG_AFTER) timeout = kwargs.pop('timeout', None) timeout_timer = None if timeout is not None: timeout_timer = timeutils.StopWatch(duration=timeout) timeout_timer.start() # Wait for the given preceding state to complete if after is not None: states[after].wait_for_completion(state, log_after, timeout_timer) # Run this state states[state].run_once(lambda: fn(self, *args, **kwargs), log_after, timeout_timer) return wrapper def ordered(after=None, reset_after=None): """A method which will be executed as an ordered task. The method will be called exactly once, however many times it is called. If it is called multiple times simultaneously it will only be called once, but all callers will wait until execution is complete. If `after` is given, this method will not run until `after` has completed. If `reset_after` is given and the target method has completed, allow this task to run again by resetting all task states. :param after: Optionally, the name of another `ordered` method. Wait for the completion of `after` before executing this method. :param reset_after: Optionally, the name of another `ordered` method. Reset all states when calling this method if `reset_after` has completed. """ def _ordered(fn): # Set an attribute on the method so we can find it later setattr(fn, '_ordered', True) state = fn.__name__ return _OrderedTaskRunner.decorate_ordered(fn, state, after, reset_after) return _ordered class MessageHandlingServer(service.ServiceBase, _OrderedTaskRunner, metaclass=abc.ABCMeta): """Server for handling messages. Connect a transport to a dispatcher that knows how to process the message using an executor that knows how the app wants to create new tasks. """ def __init__(self, transport, dispatcher, executor=None): """Construct a message handling server. The dispatcher parameter is a DispatcherBase instance which is used for routing request to endpoint for processing. The executor parameter controls how incoming messages will be received and dispatched. Executor is automatically detected from execution environment. It handles many message in parallel. If your application need asynchronism then you need to consider to use the eventlet executor. :param transport: the messaging transport :type transport: Transport :param dispatcher: has a dispatch() method which is invoked for each incoming request :type dispatcher: DispatcherBase :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str """ if executor and executor not in ("threading", "eventlet"): raise ExecutorLoadFailure( executor, "Executor should be None or 'eventlet' and 'threading'") if not executor: executor = utils.get_executor_with_context() self.conf = transport.conf self.conf.register_opts(_pool_opts) self.transport = transport self.dispatcher = dispatcher self.executor_type = executor if self.executor_type == "eventlet": eventletutils.warn_eventlet_not_patched( expected_patched_modules=['thread'], what="the 'oslo.messaging eventlet executor'") self.listener = None try: mgr = driver.DriverManager('oslo.messaging.executors', self.executor_type) except RuntimeError as ex: raise ExecutorLoadFailure(self.executor_type, ex) self._executor_cls = mgr.driver self._work_executor = None self._started = False super(MessageHandlingServer, self).__init__() def _on_incoming(self, incoming): """Handles on_incoming event :param incoming: incoming request. """ self._work_executor.submit(self._process_incoming, incoming) @abc.abstractmethod def _process_incoming(self, incoming): """Perform processing incoming request :param incoming: incoming request. """ @abc.abstractmethod def _create_listener(self): """Creates listener object for polling requests :return: MessageListenerAdapter """ @ordered(reset_after='stop') def start(self, override_pool_size=None): """Start handling incoming messages. This method causes the server to begin polling the transport for incoming messages and passing them to the dispatcher. Message processing will continue until the stop() method is called. The executor controls how the server integrates with the applications I/O handling strategy - it may choose to poll for messages in a new process, thread or co-operatively scheduled coroutine or simply by registering a callback with an event loop. Similarly, the executor may choose to dispatch messages in a new thread, coroutine or simply the current thread. """ if self._started: LOG.warning('The server has already been started. Ignoring ' 'the redundant call to start().') return self._started = True executor_opts = {} executor_opts["max_workers"] = ( override_pool_size or self.conf.executor_thread_pool_size ) self._work_executor = self._executor_cls(**executor_opts) try: self.listener = self._create_listener() except driver_base.TransportDriverError as ex: raise ServerListenError(self.target, ex) self.listener.start(self._on_incoming) @ordered(after='start') def stop(self): """Stop handling incoming messages. Once this method returns, no new incoming messages will be handled by the server. However, the server may still be in the process of handling some messages, and underlying driver resources associated to this server are still in use. See 'wait' for more details. """ if self.listener: self.listener.stop() self._started = False @ordered(after='stop') def wait(self): """Wait for message processing to complete. After calling stop(), there may still be some existing messages which have not been completely processed. The wait() method blocks until all message processing has completed. Once it's finished, the underlying driver resources associated to this server are released (like closing useless network connections). """ self._work_executor.shutdown(wait=True) # Close listener connection after processing all messages if self.listener: self.listener.cleanup() def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ # TODO(sergey.vilgelm): implement this method pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/target.py0000664000175000017500000001045700000000000021440 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Target(object): """Identifies the destination of messages. A Target encapsulates all the information to identify where a message should be sent or what messages a server is listening for. Different subsets of the information encapsulated in a Target object is relevant to various aspects of the API: an RPC Server's target: topic and server is required; exchange is optional an RPC endpoint's target: namespace and version is optional an RPC client sending a message: topic is required, all other attributes optional a Notification Server's target: topic is required, exchange is optional; all other attributes ignored a Notifier's target: topic is required, exchange is optional; all other attributes ignored Its attributes are: :param exchange: A scope for topics. Leave unspecified to default to the control_exchange configuration option. :type exchange: str :param topic: A name which identifies the set of interfaces exposed by a server. Multiple servers may listen on a topic and messages will be dispatched to one of the servers selected in a best-effort round-robin fashion (unless fanout is ``True``). :type topic: str :param namespace: Identifies a particular RPC interface (i.e. set of methods) exposed by a server. The default interface has no namespace identifier and is referred to as the null namespace. :type namespace: str :param version: RPC interfaces have a major.minor version number associated with them. A minor number increment indicates a backwards compatible change and an incompatible change is indicated by a major number bump. Servers may implement multiple major versions and clients may require indicate that their message requires a particular minimum minor version. :type version: str :param server: RPC Clients can request that a message be directed to a specific server, rather than just one of a pool of servers listening on the topic. :type server: str :param fanout: Clients may request that a copy of the message be delivered to all servers listening on a topic by setting fanout to ``True``, rather than just one of them. :type fanout: bool :param legacy_namespaces: A server always accepts messages specified via the 'namespace' parameter, and may also accept messages defined via this parameter. This option should be used to switch namespaces safely during rolling upgrades. :type legacy_namespaces: list of strings """ def __init__(self, exchange=None, topic=None, namespace=None, version=None, server=None, fanout=None, legacy_namespaces=None): self.exchange = exchange self.topic = topic self.namespace = namespace self.version = version self.server = server self.fanout = fanout self.accepted_namespaces = [namespace] + (legacy_namespaces or []) def __call__(self, **kwargs): for a in ('exchange', 'topic', 'namespace', 'version', 'server', 'fanout'): kwargs.setdefault(a, getattr(self, a)) return Target(**kwargs) def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: v = getattr(self, a) if v: attrs.append((a, v)) values = ', '.join(['%s=%s' % i for i in attrs]) return '' def __hash__(self): return id(self) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.134673 oslo.messaging-14.9.0/oslo_messaging/tests/0000775000175000017500000000000000000000000020733 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/__init__.py0000664000175000017500000000170100000000000023043 0ustar00zuulzuul00000000000000# Copyright 2014 eNovance # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet eventlet.monkey_patch() # oslotest prepares mock for six in oslotest/__init__.py as follow: # six.add_move(six.MovedModule('mock', 'mock', 'unittest.mock')) and # oslo.messaging imports oslotest before importing test submodules to # setup six.moves for mock, then "from unittest import mock" works well. import oslotest ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.134673 oslo.messaging-14.9.0/oslo_messaging/tests/drivers/0000775000175000017500000000000000000000000022411 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/drivers/__init__.py0000664000175000017500000000000000000000000024510 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/drivers/test_amqp_driver.py0000664000175000017500000027752700000000000026357 0ustar00zuulzuul00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import logging import os import queue import select import shlex import shutil import socket import subprocess import sys import tempfile import threading import time from unittest import mock import uuid from oslo_utils import eventletutils from oslo_utils import importutils from string import Template import testtools import oslo_messaging from oslo_messaging.tests import utils as test_utils # TODO(kgiusti) Conditionally run these tests only if the necessary # dependencies are installed. This should be removed once the proton libraries # are available in the base repos for all supported platforms. pyngus = importutils.try_import("pyngus") if pyngus: from oslo_messaging._drivers.amqp1_driver.addressing \ import AddresserFactory from oslo_messaging._drivers.amqp1_driver.addressing \ import LegacyAddresser from oslo_messaging._drivers.amqp1_driver.addressing \ import RoutableAddresser import oslo_messaging._drivers.impl_amqp1 as amqp_driver # The Cyrus-based SASL tests can only be run if the installed version of proton # has been built with Cyrus SASL support. _proton = importutils.try_import("proton") CYRUS_ENABLED = (pyngus and pyngus.VERSION >= (2, 0, 0) and _proton and getattr(_proton.SASL, "extended", lambda: False)()) # same with SSL SSL_ENABLED = (_proton and getattr(_proton.SSL, "present", lambda: False)()) LOG = logging.getLogger(__name__) def _wait_until(predicate, timeout): deadline = timeout + time.time() while not predicate() and deadline > time.time(): time.sleep(0.1) class _ListenerThread(threading.Thread): """Run a blocking listener in a thread.""" def __init__(self, listener, msg_count, msg_ack=True): super(_ListenerThread, self).__init__() self.listener = listener self.msg_count = msg_count self._msg_ack = msg_ack self.messages = queue.Queue() self.daemon = True self.started = eventletutils.Event() self._done = eventletutils.Event() self.start() self.started.wait() def run(self): LOG.debug("Listener started") self.started.set() while not self._done.is_set(): for in_msg in self.listener.poll(timeout=0.5): self.messages.put(in_msg) self.msg_count -= 1 self.msg_count == 0 and self._done.set() if self._msg_ack: in_msg.acknowledge() if in_msg.message.get('method') == 'echo': in_msg.reply(reply={'correlation-id': in_msg.message.get('id')}) else: in_msg.requeue() LOG.debug("Listener stopped") def get_messages(self): """Returns a list of all received messages.""" msgs = [] try: while True: m = self.messages.get(False) msgs.append(m) except queue.Empty: pass return msgs def kill(self, timeout=30): self._done.set() self.join(timeout) class _SlowResponder(_ListenerThread): # an RPC listener that pauses delay seconds before replying def __init__(self, listener, delay, msg_count=1): self._delay = delay super(_SlowResponder, self).__init__(listener, msg_count) def run(self): LOG.debug("_SlowResponder started") self.started.set() while not self._done.is_set(): for in_msg in self.listener.poll(timeout=0.5): time.sleep(self._delay) in_msg.acknowledge() in_msg.reply(reply={'correlation-id': in_msg.message.get('id')}) self.messages.put(in_msg) self.msg_count -= 1 self.msg_count == 0 and self._done.set() class _CallMonitor(_ListenerThread): # an RPC listener that generates heartbeats before # replying. def __init__(self, listener, delay, hb_count, msg_count=1): self._delay = delay self._hb_count = hb_count super(_CallMonitor, self).__init__(listener, msg_count) def run(self): LOG.debug("_CallMonitor started") self.started.set() while not self._done.is_set(): for in_msg in self.listener.poll(timeout=0.5): hb_rate = in_msg.client_timeout / 2.0 deadline = time.time() + self._delay while deadline > time.time(): if self._done.wait(hb_rate): return if self._hb_count > 0: in_msg.heartbeat() self._hb_count -= 1 in_msg.acknowledge() in_msg.reply(reply={'correlation-id': in_msg.message.get('id')}) self.messages.put(in_msg) self.msg_count -= 1 self.msg_count == 0 and self._done.set() @testtools.skipUnless(pyngus, "proton modules not present") class TestProtonDriverLoad(test_utils.BaseTestCase): def setUp(self): super(TestProtonDriverLoad, self).setUp() self.messaging_conf.transport_url = 'amqp://' def test_driver_load(self): transport = oslo_messaging.get_transport(self.conf) self.assertIsInstance(transport._driver, amqp_driver.ProtonDriver) class _AmqpBrokerTestCase(test_utils.BaseTestCase): """Creates a single FakeBroker for use by the tests""" @testtools.skipUnless(pyngus, "proton modules not present") def setUp(self): super(_AmqpBrokerTestCase, self).setUp() self._broker = FakeBroker(self.conf.oslo_messaging_amqp) self._broker_addr = "amqp://%s:%d" % (self._broker.host, self._broker.port) self._broker_url = oslo_messaging.TransportURL.parse( self.conf, self._broker_addr) def tearDown(self): super(_AmqpBrokerTestCase, self).tearDown() if self._broker: self._broker.stop() class _AmqpBrokerTestCaseAuto(_AmqpBrokerTestCase): """Like _AmqpBrokerTestCase, but starts the broker""" @testtools.skipUnless(pyngus, "proton modules not present") def setUp(self): super(_AmqpBrokerTestCaseAuto, self).setUp() self._broker.start() class TestAmqpSend(_AmqpBrokerTestCaseAuto): """Test sending and receiving messages.""" def test_driver_unconnected_cleanup(self): """Verify the driver can cleanly shutdown even if never connected.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) driver.cleanup() def test_listener_cleanup(self): """Verify unused listener can cleanly shutdown.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = driver.listen(target, None, None)._poll_style_listener self.assertIsInstance(listener, amqp_driver.ProtonListener) driver.cleanup() def test_send_no_reply(self): driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) listener.join(timeout=30) self.assertFalse(listener.is_alive()) self.assertEqual({"msg": "value"}, listener.messages.get().message) predicate = lambda: (self._broker.sender_link_ack_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_send_exchange_with_reply(self): driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target1 = oslo_messaging.Target(topic="test-topic", exchange="e1") listener1 = _ListenerThread( driver.listen(target1, None, None)._poll_style_listener, 1) target2 = oslo_messaging.Target(topic="test-topic", exchange="e2") listener2 = _ListenerThread( driver.listen(target2, None, None)._poll_style_listener, 1) rc = driver.send(target1, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True, timeout=30) self.assertIsNotNone(rc) self.assertEqual('e1', rc.get('correlation-id')) rc = driver.send(target2, {"context": "whatever"}, {"method": "echo", "id": "e2"}, wait_for_reply=True, timeout=30) self.assertIsNotNone(rc) self.assertEqual('e2', rc.get('correlation-id')) listener1.join(timeout=30) self.assertFalse(listener1.is_alive()) listener2.join(timeout=30) self.assertFalse(listener2.is_alive()) driver.cleanup() def test_messaging_patterns(self): """Verify the direct, shared, and fanout message patterns work.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target1 = oslo_messaging.Target(topic="test-topic", server="server1") listener1 = _ListenerThread( driver.listen(target1, None, None)._poll_style_listener, 4) target2 = oslo_messaging.Target(topic="test-topic", server="server2") listener2 = _ListenerThread( driver.listen(target2, None, None)._poll_style_listener, 3) shared_target = oslo_messaging.Target(topic="test-topic") fanout_target = oslo_messaging.Target(topic="test-topic", fanout=True) # this should go to only one server: driver.send(shared_target, {"context": "whatever"}, {"method": "echo", "id": "either-1"}, wait_for_reply=True) self.assertEqual(1, self._broker.topic_count) self.assertEqual(1, self._broker.direct_count) # reply # this should go to the other server: driver.send(shared_target, {"context": "whatever"}, {"method": "echo", "id": "either-2"}, wait_for_reply=True) self.assertEqual(2, self._broker.topic_count) self.assertEqual(2, self._broker.direct_count) # reply # these should only go to listener1: driver.send(target1, {"context": "whatever"}, {"method": "echo", "id": "server1-1"}, wait_for_reply=True) driver.send(target1, {"context": "whatever"}, {"method": "echo", "id": "server1-2"}, wait_for_reply=True) self.assertEqual(6, self._broker.direct_count) # 2X(send+reply) # this should only go to listener2: driver.send(target2, {"context": "whatever"}, {"method": "echo", "id": "server2"}, wait_for_reply=True) self.assertEqual(8, self._broker.direct_count) # both listeners should get a copy: driver.send(fanout_target, {"context": "whatever"}, {"method": "echo", "id": "fanout"}) listener1.join(timeout=30) self.assertFalse(listener1.is_alive()) listener2.join(timeout=30) self.assertFalse(listener2.is_alive()) self.assertEqual(1, self._broker.fanout_count) listener1_ids = [x.message.get('id') for x in listener1.get_messages()] listener2_ids = [x.message.get('id') for x in listener2.get_messages()] self.assertTrue('fanout' in listener1_ids and 'fanout' in listener2_ids) self.assertTrue('server1-1' in listener1_ids and 'server1-1' not in listener2_ids) self.assertTrue('server1-2' in listener1_ids and 'server1-2' not in listener2_ids) self.assertTrue('server2' in listener2_ids and 'server2' not in listener1_ids) if 'either-1' in listener1_ids: self.assertTrue('either-2' in listener2_ids and 'either-2' not in listener1_ids and 'either-1' not in listener2_ids) else: self.assertTrue('either-2' in listener1_ids and 'either-2' not in listener2_ids and 'either-1' in listener2_ids) predicate = lambda: (self._broker.sender_link_ack_count == 12) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_send_timeout(self): """Verify send timeout - no reply sent.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) # the listener will drop this message: self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": "whatever"}, {"method": "drop"}, wait_for_reply=True, timeout=1.0) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def test_released_send(self): """Verify exception thrown if send Nacked.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="no listener") # the broker will send a nack (released) since there is no active # listener for the target: self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": "whatever"}, {"method": "drop"}, wait_for_reply=True, retry=0, timeout=1.0) driver.cleanup() def test_send_not_acked(self): """Verify exception thrown ack dropped.""" self.config(pre_settled=[], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_send_timeout = 2 target = oslo_messaging.Target(topic="!no-ack!") # the broker will silently discard: self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": "whatever"}, {"method": "drop"}, retry=0, wait_for_reply=True) driver.cleanup() def test_no_ack_cast(self): """Verify no exception is thrown if acks are turned off""" # set casts to ignore ack self.config(pre_settled=['rpc-cast'], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_send_timeout = 2 target = oslo_messaging.Target(topic="!no-ack!") # the broker will silently discard this cast, but since ack'ing is # disabled the send does not fail driver.send(target, {"context": "whatever"}, {"method": "drop"}, wait_for_reply=False) driver.cleanup() def test_call_late_reply(self): """What happens if reply arrives after timeout?""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _SlowResponder( driver.listen(target, None, None)._poll_style_listener, delay=3) self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": "whatever"}, {"method": "echo", "id": "???"}, wait_for_reply=True, timeout=1.0) listener.join(timeout=30) self.assertFalse(listener.is_alive()) predicate = lambda: (self._broker.sender_link_ack_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_call_failed_reply(self): """Send back an exception generated at the listener""" class _FailedResponder(_ListenerThread): def __init__(self, listener): super(_FailedResponder, self).__init__(listener, 1) def run(self): self.started.set() while not self._done.is_set(): for in_msg in self.listener.poll(timeout=0.5): try: raise RuntimeError("Oopsie!") except RuntimeError: in_msg.reply(reply=None, failure=sys.exc_info()) self._done.set() driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _FailedResponder( driver.listen(target, None, None)._poll_style_listener) self.assertRaises(RuntimeError, driver.send, target, {"context": "whatever"}, {"method": "echo"}, wait_for_reply=True, timeout=5.0) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def test_call_reply_timeout(self): """What happens if the replier times out?""" class _TimeoutListener(_ListenerThread): def __init__(self, listener): super(_TimeoutListener, self).__init__(listener, 1) def run(self): self.started.set() while not self._done.is_set(): for in_msg in self.listener.poll(timeout=0.5): # reply will never be acked (simulate drop): in_msg._reply_to = "!no-ack!" in_msg.reply(reply={'correlation-id': in_msg.message.get("id")}) self._done.set() driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) driver._default_reply_timeout = 1 target = oslo_messaging.Target(topic="test-topic") listener = _TimeoutListener( driver.listen(target, None, None)._poll_style_listener) self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": "whatever"}, {"method": "echo"}, wait_for_reply=True, timeout=3) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def test_listener_requeue(self): "Emulate Server requeue on listener incoming messages" self.config(pre_settled=[], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) driver.require_features(requeue=True) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1, msg_ack=False) rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) listener.join(timeout=30) self.assertFalse(listener.is_alive()) predicate = lambda: (self._broker.sender_link_requeue_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_sender_minimal_credit(self): # ensure capacity is replenished when only 1 credit is configured self.config(reply_link_credit=1, rpc_server_credit=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic", server="server") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 4) for i in range(4): threading.Thread(target=driver.send, args=(target, {"context": "whatever"}, {"method": "echo"}), kwargs={'wait_for_reply': True}).start() predicate = lambda: (self._broker.direct_count == 8) _wait_until(predicate, 30) self.assertTrue(predicate()) listener.join(timeout=30) driver.cleanup() def test_sender_link_maintenance(self): # ensure links are purged from cache self.config(default_sender_link_timeout=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic-maint") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 3) # the send should create a receiver link on the broker rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) predicate = lambda: (self._broker.receiver_link_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) self.assertTrue(listener.is_alive()) self.assertEqual({"msg": "value"}, listener.messages.get().message) predicate = lambda: (self._broker.receiver_link_count == 0) _wait_until(predicate, 30) self.assertTrue(predicate()) # the next send should create a separate receiver link on the broker rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) predicate = lambda: (self._broker.receiver_link_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) self.assertTrue(listener.is_alive()) self.assertEqual({"msg": "value"}, listener.messages.get().message) predicate = lambda: (self._broker.receiver_link_count == 0) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_call_monitor_ok(self): # verify keepalive by delaying the reply > heartbeat interval driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _CallMonitor( driver.listen(target, None, None)._poll_style_listener, delay=11, hb_count=100) rc = driver.send(target, {"context": True}, {"method": "echo", "id": "1"}, wait_for_reply=True, timeout=60, call_monitor_timeout=5) self.assertIsNotNone(rc) self.assertEqual("1", rc.get('correlation-id')) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def test_call_monitor_bad_no_heartbeat(self): # verify call fails if keepalives stop coming driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _CallMonitor( driver.listen(target, None, None)._poll_style_listener, delay=11, hb_count=1) self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": True}, {"method": "echo", "id": "1"}, wait_for_reply=True, timeout=60, call_monitor_timeout=5) listener.kill() self.assertFalse(listener.is_alive()) driver.cleanup() def test_call_monitor_bad_call_timeout(self): # verify call fails if deadline hit regardless of heartbeat activity driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _CallMonitor( driver.listen(target, None, None)._poll_style_listener, delay=20, hb_count=100) self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": True}, {"method": "echo", "id": "1"}, wait_for_reply=True, timeout=11, call_monitor_timeout=5) listener.kill() self.assertFalse(listener.is_alive()) driver.cleanup() class TestAmqpNotification(_AmqpBrokerTestCaseAuto): """Test sending and receiving notifications.""" def test_notification(self): driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) notifications = [(oslo_messaging.Target(topic="topic-1"), 'info'), (oslo_messaging.Target(topic="topic-1"), 'error'), (oslo_messaging.Target(topic="topic-2"), 'debug')] nl = driver.listen_for_notifications( notifications, None, None, None)._poll_style_listener # send one for each support version: msg_count = len(notifications) * 2 listener = _ListenerThread(nl, msg_count) targets = ['topic-1.info', 'topic-1.bad', # will raise MessageDeliveryFailure 'bad-topic.debug', # will raise MessageDeliveryFailure 'topic-1.error', 'topic-2.debug'] excepted_targets = [] for version in (1.0, 2.0): for t in targets: try: driver.send_notification(oslo_messaging.Target(topic=t), "context", {'target': t}, version, retry=0) except oslo_messaging.MessageDeliveryFailure: excepted_targets.append(t) listener.join(timeout=30) self.assertFalse(listener.is_alive()) topics = [x.message.get('target') for x in listener.get_messages()] self.assertEqual(msg_count, len(topics)) self.assertEqual(2, topics.count('topic-1.info')) self.assertEqual(2, topics.count('topic-1.error')) self.assertEqual(2, topics.count('topic-2.debug')) self.assertEqual(4, self._broker.dropped_count) self.assertEqual(2, excepted_targets.count('topic-1.bad')) self.assertEqual(2, excepted_targets.count('bad-topic.debug')) driver.cleanup() def test_released_notification(self): """Broker sends a Nack (released)""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send_notification, oslo_messaging.Target(topic="bad address"), "context", {'target': "bad address"}, 2.0, retry=0) driver.cleanup() def test_notification_not_acked(self): """Simulate drop of ack from broker""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_notify_timeout = 2 self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send_notification, oslo_messaging.Target(topic="!no-ack!"), "context", {'target': "!no-ack!"}, 2.0, retry=0) driver.cleanup() def test_no_ack_notification(self): """Verify no exception is thrown if acks are turned off""" # add a couple of illegal values for coverage of the warning self.config(pre_settled=['notify', 'fleabag', 'poochie'], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_notify_timeout = 2 driver.send_notification(oslo_messaging.Target(topic="!no-ack!"), "context", {'target': "!no-ack!"}, 2.0) driver.cleanup() @testtools.skipUnless(pyngus and pyngus.VERSION < (2, 0, 0), "pyngus module not present") class TestAuthentication(test_utils.BaseTestCase): """Test user authentication using the old pyngus API""" def setUp(self): super(TestAuthentication, self).setUp() # for simplicity, encode the credentials as they would appear 'on the # wire' in a SASL frame - username and password prefixed by zero. user_credentials = ["\0joe\0secret"] self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sasl_mechanisms="PLAIN", user_credentials=user_credentials) self._broker.start() def tearDown(self): super(TestAuthentication, self).tearDown() self._broker.stop() def test_authentication_ok(self): """Verify that username and password given in TransportHost are accepted by the broker. """ addr = "amqp://joe:secret@%s:%d" % (self._broker.host, self._broker.port) url = oslo_messaging.TransportURL.parse(self.conf, addr) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) rc = driver.send(target, {"context": True}, {"method": "echo"}, wait_for_reply=True) self.assertIsNotNone(rc) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def test_authentication_failure(self): """Verify that a bad password given in TransportHost is rejected by the broker. """ addr = "amqp://joe:badpass@%s:%d" % (self._broker.host, self._broker.port) url = oslo_messaging.TransportURL.parse(self.conf, addr) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": True}, {"method": "echo"}, wait_for_reply=True, retry=2) driver.cleanup() @testtools.skipUnless(CYRUS_ENABLED, "Cyrus SASL not supported") class TestCyrusAuthentication(test_utils.BaseTestCase): """Test the driver's Cyrus SASL integration""" _conf_dir = None # Note: don't add ANONYMOUS or EXTERNAL mechs without updating the # test_authentication_bad_mechs test below _mechs = "DIGEST-MD5 SCRAM-SHA-1 CRAM-MD5 PLAIN" @classmethod def setUpClass(cls): # The Cyrus library can only be initialized once per _process_ # Create a SASL configuration and user database, # add a user 'joe' with password 'secret': cls._conf_dir = "/tmp/amqp1_tests_%s" % os.getpid() # no, we cannot use tempfile.mkdtemp() as it will 'helpfully' remove # the temp dir after the first test is run os.makedirs(cls._conf_dir) db = os.path.join(cls._conf_dir, 'openstack.sasldb') _t = "echo secret | saslpasswd2 -c -p -f ${db} -u myrealm joe" cmd = Template(_t).substitute(db=db) try: subprocess.check_call(args=cmd, shell=True) except Exception: shutil.rmtree(cls._conf_dir, ignore_errors=True) cls._conf_dir = None return # configure the SASL server: conf = os.path.join(cls._conf_dir, 'openstack.conf') t = Template("""sasldb_path: ${db} pwcheck_method: auxprop auxprop_plugin: sasldb mech_list: ${mechs} """) with open(conf, 'w') as f: f.write(t.substitute(db=db, mechs=cls._mechs)) @classmethod def tearDownClass(cls): if cls._conf_dir: shutil.rmtree(cls._conf_dir, ignore_errors=True) def setUp(self): # fire up a test broker with the SASL config: super(TestCyrusAuthentication, self).setUp() if TestCyrusAuthentication._conf_dir is None: self.skipTest("Cyrus SASL tools not installed") _mechs = TestCyrusAuthentication._mechs _dir = TestCyrusAuthentication._conf_dir self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sasl_mechanisms=_mechs, user_credentials=["\0joe@myrealm\0secret"], sasl_config_dir=_dir, sasl_config_name="openstack") self._broker.start() self.messaging_conf.transport_url = 'amqp://' self.conf = self.messaging_conf.conf def tearDown(self): if self._broker: self._broker.stop() self._broker = None super(TestCyrusAuthentication, self).tearDown() def _authentication_test(self, addr, retry=None): url = oslo_messaging.TransportURL.parse(self.conf, addr) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) try: rc = driver.send(target, {"context": True}, {"method": "echo"}, wait_for_reply=True, retry=retry) self.assertIsNotNone(rc) listener.join(timeout=30) self.assertFalse(listener.is_alive()) finally: driver.cleanup() def test_authentication_ok(self): """Verify that username and password given in TransportHost are accepted by the broker. """ addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, self._broker.port) self._authentication_test(addr) def test_authentication_failure(self): """Verify that a bad password given in TransportHost is rejected by the broker. """ addr = "amqp://joe@myrealm:badpass@%s:%d" % (self._broker.host, self._broker.port) try: self._authentication_test(addr, retry=2) except oslo_messaging.MessageDeliveryFailure as e: # verify the exception indicates the failure was an authentication # error self.assertTrue('amqp:unauthorized-access' in str(e)) else: self.assertIsNone("Expected authentication failure") def test_authentication_bad_mechs(self): """Verify that the connection fails if the client's SASL mechanisms do not match the broker's. """ self.config(sasl_mechanisms="EXTERNAL ANONYMOUS", group="oslo_messaging_amqp") addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, self._broker.port) self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._authentication_test, addr, retry=0) def test_authentication_default_realm(self): """Verify that default realm is used if none present in username""" addr = "amqp://joe:secret@%s:%d" % (self._broker.host, self._broker.port) self.config(sasl_default_realm="myrealm", group="oslo_messaging_amqp") self._authentication_test(addr) def test_authentication_ignore_default_realm(self): """Verify that default realm is not used if realm present in username """ addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, self._broker.port) self.config(sasl_default_realm="bad-realm", group="oslo_messaging_amqp") self._authentication_test(addr) @testtools.skipUnless(pyngus, "proton modules not present") class TestFailover(test_utils.BaseTestCase): def setUp(self): super(TestFailover, self).setUp() # configure different addressing modes on the brokers to test failing # over from one type of backend to another self.config(addressing_mode='dynamic', group="oslo_messaging_amqp") self._brokers = self._gen_brokers() self._primary = 0 self._backup = 1 hosts = [] for broker in self._brokers: hosts.append(oslo_messaging.TransportHost(hostname=broker.host, port=broker.port)) self._broker_url = self._gen_transport_url(hosts) def tearDown(self): super(TestFailover, self).tearDown() for broker in self._brokers: if broker.is_alive(): broker.stop() def _gen_brokers(self): return [FakeBroker(self.conf.oslo_messaging_amqp, product="qpid-cpp"), FakeBroker(self.conf.oslo_messaging_amqp, product="routable")] def _gen_transport_url(self, hosts): return oslo_messaging.TransportURL(self.conf, transport="amqp", hosts=hosts) def _failover(self, fail_broker): self._brokers[0].start() self._brokers[1].start() # self.config(trace=True, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="my-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 2) # wait for listener links to come up on either broker # 4 == 3 links per listener + 1 for the global reply queue predicate = lambda: ((self._brokers[0].sender_link_count == 4) or (self._brokers[1].sender_link_count == 4)) _wait_until(predicate, 30) self.assertTrue(predicate()) if self._brokers[1].sender_link_count == 4: self._primary = 1 self._backup = 0 rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "echo-1"}, wait_for_reply=True, timeout=30) self.assertIsNotNone(rc) self.assertEqual('echo-1', rc.get('correlation-id')) # 1 request msg, 1 response: self.assertEqual(1, self._brokers[self._primary].topic_count) self.assertEqual(1, self._brokers[self._primary].direct_count) # invoke failover method fail_broker(self._brokers[self._primary]) # wait for listener links to re-establish on broker 1 # 4 = 3 links per listener + 1 for the global reply queue predicate = lambda: self._brokers[self._backup].sender_link_count == 4 _wait_until(predicate, 30) self.assertTrue(predicate()) rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "echo-2"}, wait_for_reply=True, timeout=2) self.assertIsNotNone(rc) self.assertEqual('echo-2', rc.get('correlation-id')) # 1 request msg, 1 response: self.assertEqual(1, self._brokers[self._backup].topic_count) self.assertEqual(1, self._brokers[self._backup].direct_count) listener.join(timeout=30) self.assertFalse(listener.is_alive()) # note: stopping the broker first tests cleaning up driver without a # connection active self._brokers[self._backup].stop() driver.cleanup() def test_broker_crash(self): """Simulate a failure of one broker.""" def _meth(broker): # fail broker: broker.stop() time.sleep(0.5) self._failover(_meth) def test_broker_shutdown(self): """Simulate a normal shutdown of a broker.""" def _meth(broker): broker.stop(clean=True) time.sleep(0.5) self._failover(_meth) def test_heartbeat_failover(self): """Simulate broker heartbeat timeout.""" def _meth(broker): # keep alive heartbeat from primary broker will stop, which should # force failover to backup broker in about two seconds broker.pause() self.config(idle_timeout=2, group="oslo_messaging_amqp") self._failover(_meth) self._brokers[self._primary].stop() def test_listener_failover(self): """Verify that Listeners sharing the same topic are re-established after failover. """ self._brokers[0].start() # self.config(trace=True, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="my-topic") bcast = oslo_messaging.Target(topic="my-topic", fanout=True) listener1 = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 2) listener2 = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 2) # wait for 7 sending links to become active on the broker. # 7 = 3 links per Listener + 1 global reply link predicate = lambda: self._brokers[0].sender_link_count == 7 _wait_until(predicate, 30) self.assertTrue(predicate()) driver.send(bcast, {"context": "whatever"}, {"method": "ignore", "id": "echo-1"}) # 1 message per listener predicate = lambda: self._brokers[0].fanout_sent_count == 2 _wait_until(predicate, 30) self.assertTrue(predicate()) # start broker 1 then shutdown broker 0: self._brokers[1].start() self._brokers[0].stop(clean=True) # wait again for 7 sending links to re-establish on broker 1 predicate = lambda: self._brokers[1].sender_link_count == 7 _wait_until(predicate, 30) self.assertTrue(predicate()) driver.send(bcast, {"context": "whatever"}, {"method": "ignore", "id": "echo-2"}) # 1 message per listener predicate = lambda: self._brokers[1].fanout_sent_count == 2 _wait_until(predicate, 30) self.assertTrue(predicate()) listener1.join(timeout=30) listener2.join(timeout=30) self.assertFalse(listener1.is_alive() or listener2.is_alive()) driver.cleanup() self._brokers[1].stop() @testtools.skipUnless(pyngus, "proton modules not present") class TestLinkRecovery(_AmqpBrokerTestCase): def _send_retry(self, reject, retries): self._reject = reject def on_active(link): if self._reject > 0: link.close() self._reject -= 1 else: link.add_capacity(10) self._broker.on_receiver_active = on_active self._broker.start() self.config(link_retry_delay=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 1) try: rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True, retry=retries) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e1') except Exception: listener.kill() driver.cleanup() raise listener.join(timeout=30) self.assertFalse(listener.is_alive()) self.assertEqual(listener.messages.get().message.get('method'), "echo") driver.cleanup() def test_send_retry_ok(self): # verify sender with retry=3 survives 2 link failures: self._send_retry(reject=2, retries=3) def test_send_retry_fail(self): # verify sender fails if retries exhausted self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._send_retry, reject=3, retries=2) def test_listener_recovery(self): # verify a listener recovers if all links fail: self._addrs = {'unicast.test-topic': 2, 'broadcast.test-topic.all': 2, 'exclusive.test-topic.server': 2} self._recovered = eventletutils.Event() self._count = 0 def _on_active(link): t = link.target_address if t in self._addrs: if self._addrs[t] > 0: link.close() self._addrs[t] -= 1 else: self._count += 1 if self._count == len(self._addrs): self._recovered.set() self._broker.on_sender_active = _on_active self._broker.start() self.config(link_retry_delay=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic", server="server") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 3) # wait for recovery self.assertTrue(self._recovered.wait(timeout=30)) # verify server RPC: rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e1') # verify balanced RPC: target.server = None rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e2"}, wait_for_reply=True) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e2') # verify fanout: target.fanout = True driver.send(target, {"context": "whatever"}, {"msg": "value"}, wait_for_reply=False) listener.join(timeout=30) self.assertTrue(self._broker.fanout_count == 1) self.assertFalse(listener.is_alive()) self.assertEqual(listener.messages.get().message.get('method'), "echo") driver.cleanup() def test_sender_credit_blocked(self): # ensure send requests resume once credit is provided self._blocked_links = set() def _on_active(link): # refuse granting credit for the broadcast link if self._broker._addresser._is_multicast(link.source_address): self._blocked_links.add(link) else: # unblock all link when RPC call is made link.add_capacity(10) for li in self._blocked_links: li.add_capacity(10) self._broker.on_receiver_active = _on_active self._broker.on_credit_exhausted = lambda link: None self._broker.start() driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic", server="server") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 4) target.fanout = True target.server = None # these threads will share the same link for i in range(3): t = threading.Thread(target=driver.send, args=(target, {"context": "whatever"}, {"msg": "n=%d" % i}), kwargs={'wait_for_reply': False}) t.start() # casts return once message is put on active link t.join(timeout=30) time.sleep(1) # ensure messages are going nowhere self.assertEqual(self._broker.fanout_sent_count, 0) # this will trigger the release of credit for the previous links target.fanout = False rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e1') listener.join(timeout=30) self.assertTrue(self._broker.fanout_count == 3) self.assertFalse(listener.is_alive()) driver.cleanup() @testtools.skipUnless(pyngus, "proton modules not present") class TestAddressing(test_utils.BaseTestCase): # Verify the addressing modes supported by the driver def _address_test(self, rpc_target, targets_priorities): # verify proper messaging semantics for a given addressing mode broker = FakeBroker(self.conf.oslo_messaging_amqp) broker.start() url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (broker.host, broker.port)) driver = amqp_driver.ProtonDriver(self.conf, url) rl = [] for server in ["Server1", "Server2"]: _ = driver.listen(rpc_target(server=server), None, None)._poll_style_listener # 3 == 1 msg to server + 1 fanout msg + 1 anycast msg rl.append(_ListenerThread(_, 3)) nl = [] for n in range(2): _ = driver.listen_for_notifications(targets_priorities, None, None, None)._poll_style_listener nl.append(_ListenerThread(_, len(targets_priorities))) driver.send(rpc_target(server="Server1"), {"context": "whatever"}, {"msg": "Server1"}) driver.send(rpc_target(server="Server2"), {"context": "whatever"}, {"msg": "Server2"}) driver.send(rpc_target(fanout=True), {"context": "whatever"}, {"msg": "Fanout"}) # FakeBroker should evenly distribute these across the servers driver.send(rpc_target(server=None), {"context": "whatever"}, {"msg": "Anycast1"}) driver.send(rpc_target(server=None), {"context": "whatever"}, {"msg": "Anycast2"}) expected = [] for n in targets_priorities: # this is how the notifier creates an address: topic = "%s.%s" % (n[0].topic, n[1]) target = oslo_messaging.Target(topic=topic) driver.send_notification(target, {"context": "whatever"}, {"msg": topic}, 2.0) expected.append(topic) for li in rl: li.join(timeout=30) # anycast will not evenly distribute an odd number of msgs predicate = lambda: len(expected) == (nl[0].messages.qsize() + nl[1].messages.qsize()) _wait_until(predicate, 30) for li in nl: li.kill(timeout=30) s1_payload = [m.message.get('msg') for m in rl[0].get_messages()] s2_payload = [m.message.get('msg') for m in rl[1].get_messages()] self.assertTrue("Server1" in s1_payload and "Server2" not in s1_payload) self.assertTrue("Server2" in s2_payload and "Server1" not in s2_payload) self.assertEqual(s1_payload.count("Fanout"), 1) self.assertEqual(s2_payload.count("Fanout"), 1) self.assertEqual((s1_payload + s2_payload).count("Anycast1"), 1) self.assertEqual((s1_payload + s2_payload).count("Anycast2"), 1) n1_payload = [m.message.get('msg') for m in nl[0].get_messages()] n2_payload = [m.message.get('msg') for m in nl[1].get_messages()] self.assertEqual((n1_payload + n2_payload).sort(), expected.sort()) driver.cleanup() broker.stop() return broker.message_log def test_routable_address(self): # verify routable address mode self.config(addressing_mode='routable', group="oslo_messaging_amqp") _opts = self.conf.oslo_messaging_amqp notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), (oslo_messaging.Target(topic="test-topic"), 'error'), (oslo_messaging.Target(topic="test-topic"), 'debug')] msgs = self._address_test(oslo_messaging.Target(exchange="ex", topic="test-topic"), notifications) addrs = [m.address for m in msgs] notify_addrs = [a for a in addrs if a.startswith(_opts.notify_address_prefix)] self.assertEqual(len(notify_addrs), len(notifications)) # expect all notifications to be 'anycast' self.assertEqual(len(notifications), len([a for a in notify_addrs if _opts.anycast_address in a])) rpc_addrs = [a for a in addrs if a.startswith(_opts.rpc_address_prefix)] # 2 anycast messages self.assertEqual(2, len([a for a in rpc_addrs if _opts.anycast_address in a])) # 1 fanout sent self.assertEqual(1, len([a for a in rpc_addrs if _opts.multicast_address in a])) # 2 unicast messages (1 for each server) self.assertEqual(2, len([a for a in rpc_addrs if _opts.unicast_address in a])) def test_legacy_address(self): # verify legacy address mode self.config(addressing_mode='legacy', group="oslo_messaging_amqp") _opts = self.conf.oslo_messaging_amqp notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), (oslo_messaging.Target(topic="test-topic"), 'error'), (oslo_messaging.Target(topic="test-topic"), 'debug')] msgs = self._address_test(oslo_messaging.Target(exchange="ex", topic="test-topic"), notifications) addrs = [m.address for m in msgs] server_addrs = [a for a in addrs if a.startswith(_opts.server_request_prefix)] broadcast_addrs = [a for a in addrs if a.startswith(_opts.broadcast_prefix)] group_addrs = [a for a in addrs if a.startswith(_opts.group_request_prefix)] # 2 server address messages sent self.assertEqual(len(server_addrs), 2) # 1 fanout address message sent self.assertEqual(len(broadcast_addrs), 1) # group messages: 2 rpc + all notifications self.assertEqual(len(group_addrs), 2 + len(notifications)) def test_address_options(self): # verify addressing configuration options self.config(addressing_mode='routable', group="oslo_messaging_amqp") self.config(rpc_address_prefix="RPC-PREFIX", group="oslo_messaging_amqp") self.config(notify_address_prefix="NOTIFY-PREFIX", group="oslo_messaging_amqp") self.config(multicast_address="MULTI-CAST", group="oslo_messaging_amqp") self.config(unicast_address="UNI-CAST", group="oslo_messaging_amqp") self.config(anycast_address="ANY-CAST", group="oslo_messaging_amqp") self.config(default_notification_exchange="NOTIFY-EXCHANGE", group="oslo_messaging_amqp") self.config(default_rpc_exchange="RPC-EXCHANGE", group="oslo_messaging_amqp") notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), (oslo_messaging.Target(topic="test-topic"), 'error'), (oslo_messaging.Target(topic="test-topic"), 'debug')] msgs = self._address_test(oslo_messaging.Target(exchange=None, topic="test-topic"), notifications) addrs = [m.address for m in msgs] notify_addrs = [a for a in addrs if a.startswith("NOTIFY-PREFIX")] self.assertEqual(len(notify_addrs), len(notifications)) # expect all notifications to be 'anycast' self.assertEqual(len(notifications), len([a for a in notify_addrs if "ANY-CAST" in a])) # and all should contain the default exchange: self.assertEqual(len(notifications), len([a for a in notify_addrs if "NOTIFY-EXCHANGE" in a])) rpc_addrs = [a for a in addrs if a.startswith("RPC-PREFIX")] # 2 RPC anycast messages self.assertEqual(2, len([a for a in rpc_addrs if "ANY-CAST" in a])) # 1 RPC fanout sent self.assertEqual(1, len([a for a in rpc_addrs if "MULTI-CAST" in a])) # 2 RPC unicast messages (1 for each server) self.assertEqual(2, len([a for a in rpc_addrs if "UNI-CAST" in a])) self.assertEqual(len(rpc_addrs), len([a for a in rpc_addrs if "RPC-EXCHANGE" in a])) def _dynamic_test(self, product): # return the addresser used when connected to 'product' broker = FakeBroker(self.conf.oslo_messaging_amqp, product=product) broker.start() url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (broker.host, broker.port)) driver = amqp_driver.ProtonDriver(self.conf, url) # need to send a message to initate the connection to the broker target = oslo_messaging.Target(topic="test-topic", server="Server") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) listener.join(timeout=30) addresser = driver._ctrl.addresser driver.cleanup() broker.stop() # clears the driver's addresser return addresser def test_dynamic_addressing(self): # simply check that the correct addresser is provided based on the # identity of the messaging back-end self.config(addressing_mode='dynamic', group="oslo_messaging_amqp") self.assertIsInstance(self._dynamic_test("router"), RoutableAddresser) self.assertIsInstance(self._dynamic_test("qpid-cpp"), LegacyAddresser) @testtools.skipUnless(pyngus, "proton modules not present") class TestMessageRetransmit(_AmqpBrokerTestCase): # test message is retransmitted if safe to do so def _test_retransmit(self, nack_method): self._nack_count = 2 def _on_message(message, handle, link): if self._nack_count: self._nack_count -= 1 nack_method(link, handle) else: self._broker.forward_message(message, handle, link) self._broker.on_message = _on_message self._broker.start() self.config(link_retry_delay=1, pre_settled=[], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 1) try: rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "blah"}, wait_for_reply=True, retry=2) # initial send + up to 2 resends except Exception: # Some test runs are expected to raise an exception, # clean up the listener since no message was received listener.kill(timeout=30) raise else: self.assertIsNotNone(rc) self.assertEqual(0, self._nack_count) self.assertEqual(rc.get('correlation-id'), 'blah') listener.join(timeout=30) finally: self.assertFalse(listener.is_alive()) driver.cleanup() def test_released(self): # should retry and succeed self._test_retransmit(lambda link, handle: link.message_released(handle)) def test_modified(self): # should retry and succeed self._test_retransmit(lambda link, handle: link.message_modified(handle, False, False, {})) def test_modified_failed(self): # since delivery_failed is set to True, should fail self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._test_retransmit, lambda link, handle: link.message_modified(handle, True, False, {})) def test_rejected(self): # rejected - should fail self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._test_retransmit, lambda link, handle: link.message_rejected(handle, {})) @testtools.skipUnless(SSL_ENABLED, "OpenSSL not supported") class TestSSL(TestFailover): """Test the driver's OpenSSL integration""" def setUp(self): self._broker = None # Create the CA, server, and client SSL certificates: self._tmpdir = tempfile.mkdtemp(prefix='amqp1') files = ['ca_key', 'ca_cert', 's_key', 's_req', 's_cert', 's2_key', 's2_req', 's2_cert', 'c_key', 'c_req', 'c_cert', 'bad_cert', 'bad_req', 'bad_key'] conf = dict(zip(files, [os.path.join(self._tmpdir, "%s.pem" % f) for f in files])) conf['pw'] = 'password' conf['s_name'] = '127.0.0.1' conf['s2_name'] = '127.0.0.2' conf['c_name'] = 'client.com' self._ssl_config = conf ssl_setup = [ # create self-signed CA certificate: Template('openssl req -x509 -nodes -newkey rsa:2048' ' -subj "/CN=Trusted.CA.com" -keyout ${ca_key}' ' -out ${ca_cert}').substitute(conf), # create Server keys and certificates: Template('openssl genrsa -out ${s_key} 2048').substitute(conf), Template('openssl req -new -key ${s_key} -subj /CN=${s_name}' ' -passin pass:${pw} -out ${s_req}').substitute(conf), Template('openssl x509 -req -in ${s_req} -CA ${ca_cert}' ' -CAkey ${ca_key} -CAcreateserial -out' ' ${s_cert}').substitute(conf), Template('openssl genrsa -out ${s2_key} 2048').substitute(conf), Template('openssl req -new -key ${s2_key} -subj /CN=${s2_name}' ' -passin pass:${pw} -out ${s2_req}').substitute(conf), Template('openssl x509 -req -in ${s2_req} -CA ${ca_cert}' ' -CAkey ${ca_key} -CAcreateserial -out' ' ${s2_cert}').substitute(conf), # create a "bad" Server cert for testing CN validation: Template('openssl genrsa -out ${bad_key} 2048').substitute(conf), Template('openssl req -new -key ${bad_key} -subj /CN=Invalid' ' -passin pass:${pw} -out ${bad_req}').substitute(conf), Template('openssl x509 -req -in ${bad_req} -CA ${ca_cert}' ' -CAkey ${ca_key} -CAcreateserial -out' ' ${bad_cert}').substitute(conf), # create Client key and certificate for client authentication: Template('openssl genrsa -out ${c_key} 2048').substitute(conf), Template('openssl req -new -key ${c_key} -subj /CN=${c_name}' ' -passin pass:${pw} -out' ' ${c_req}').substitute(conf), Template('openssl x509 -req -in ${c_req} -CA ${ca_cert}' ' -CAkey ${ca_key} -CAcreateserial -out' ' ${c_cert}').substitute(conf) ] for cmd in ssl_setup: try: subprocess.check_call(args=shlex.split(cmd)) except Exception: shutil.rmtree(self._tmpdir, ignore_errors=True) self._tmpdir = None self.skipTest("OpenSSL tools not installed - skipping") super(TestSSL, self).setUp() self.config(ssl_ca_file=self._ssl_config['ca_cert'], group='oslo_messaging_amqp') def _gen_brokers(self): s2_conf = self._ssl_config.copy() for item in ['name', 'key', 'req', 'cert']: s2_conf["s_%s" % item] = s2_conf["s2_%s" % item] return [FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config), FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=s2_conf['s_name'], ssl_config=s2_conf)] def _gen_transport_url(self, hosts): url = "amqp://%s" % (",".join(map(lambda x: "%s:%d" % (x.hostname, x.port), hosts))) return oslo_messaging.TransportURL.parse(self.conf, url) def _ssl_server_ok(self, url): self._broker.start() tport_url = oslo_messaging.TransportURL.parse(self.conf, url) driver = amqp_driver.ProtonDriver(self.conf, tport_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=True, timeout=30) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def test_server_ok(self): # test client authenticates server self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = "amqp://%s:%d" % (self._broker.host, self._broker.port) self._ssl_server_ok(url) def test_server_ignore_vhost_ok(self): # test client authenticates server and ignores vhost self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = "amqp://%s:%d/my-vhost" % (self._broker.host, self._broker.port) self._ssl_server_ok(url) def test_server_check_vhost_ok(self): # test client authenticates server using vhost as CN # Use 'Invalid' from bad_cert CN self.config(ssl_verify_vhost=True, group='oslo_messaging_amqp') self._ssl_config['s_cert'] = self._ssl_config['bad_cert'] self._ssl_config['s_key'] = self._ssl_config['bad_key'] self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = "amqp://%s:%d/Invalid" % (self._broker.host, self._broker.port) self._ssl_server_ok(url) @mock.patch('ssl.get_default_verify_paths') def test_server_ok_with_ssl_set_in_transport_url(self, mock_verify_paths): # test client authenticates server self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = oslo_messaging.TransportURL.parse( self.conf, "amqp://%s:%d?ssl=1" % (self._broker.host, self._broker.port)) self._broker.start() mock_verify_paths.return_value = mock.Mock( cafile=self._ssl_config['ca_cert']) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=True, timeout=30) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def test_bad_server_fail(self): # test client does not connect to invalid server self._ssl_config['s_cert'] = self._ssl_config['bad_cert'] self._ssl_config['s_key'] = self._ssl_config['bad_key'] self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (self._broker.host, self._broker.port)) self._broker.start() self.config(ssl_ca_file=self._ssl_config['ca_cert'], group='oslo_messaging_amqp') driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=False, retry=1) driver.cleanup() def test_client_auth_ok(self): # test server authenticates client self._ssl_config['authenticate_client'] = True self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (self._broker.host, self._broker.port)) self._broker.start() self.config(ssl_ca_file=self._ssl_config['ca_cert'], ssl_cert_file=self._ssl_config['c_cert'], ssl_key_file=self._ssl_config['c_key'], ssl_key_password=self._ssl_config['pw'], group='oslo_messaging_amqp') driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=True, timeout=30) listener.join(timeout=30) self.assertFalse(listener.is_alive()) driver.cleanup() def tearDown(self): if self._broker: self._broker.stop() self._broker = None if self._tmpdir: shutil.rmtree(self._tmpdir, ignore_errors=True) super(TestSSL, self).tearDown() @testtools.skipUnless(pyngus, "proton modules not present") class TestVHost(_AmqpBrokerTestCaseAuto): """Verify the pseudo virtual host behavior""" def _vhost_test(self): """Verify that all messaging for a particular vhost stays on that vhost """ self.config(pseudo_vhost=True, group="oslo_messaging_amqp") vhosts = ["None", "HOSTA", "HOSTB", "HOSTC"] target = oslo_messaging.Target(topic="test-topic") fanout = oslo_messaging.Target(topic="test-topic", fanout=True) listeners = {} ldrivers = {} sdrivers = {} replies = {} msgs = {} for vhost in vhosts: url = copy.copy(self._broker_url) url.virtual_host = vhost if vhost != "None" else None ldriver = amqp_driver.ProtonDriver(self.conf, url) listeners[vhost] = _ListenerThread( ldriver.listen(target, None, None)._poll_style_listener, 10) ldrivers[vhost] = ldriver sdrivers[vhost] = amqp_driver.ProtonDriver(self.conf, url) replies[vhost] = [] msgs[vhost] = [] # send a fanout and a single rpc call to each listener for vhost in vhosts: if vhost == "HOSTC": # expect no messages to HOSTC continue sdrivers[vhost].send(fanout, {"context": vhost}, {"vhost": vhost, "fanout": True, "id": vhost}) replies[vhost].append(sdrivers[vhost].send(target, {"context": vhost}, {"method": "echo", "id": vhost}, wait_for_reply=True)) time.sleep(1) for vhost in vhosts: msgs[vhost] += listeners[vhost].get_messages() if vhost == "HOSTC": # HOSTC should get nothing self.assertEqual(0, len(msgs[vhost])) self.assertEqual(0, len(replies[vhost])) continue self.assertEqual(2, len(msgs[vhost])) for m in msgs[vhost]: # the id must match the vhost self.assertEqual(vhost, m.message.get("id")) self.assertEqual(1, len(replies[vhost])) for m in replies[vhost]: # same for correlation id self.assertEqual(vhost, m.get("correlation-id")) for vhost in vhosts: listeners[vhost].kill() ldrivers[vhost].cleanup sdrivers[vhost].cleanup() def test_vhost_routing(self): """Test vhost using routable addresses """ self.config(addressing_mode='routable', group="oslo_messaging_amqp") self._vhost_test() def test_vhost_legacy(self): """Test vhost using legacy addresses """ self.config(addressing_mode='legacy', group="oslo_messaging_amqp") self._vhost_test() class FakeBroker(threading.Thread): """A test AMQP message 'broker'.""" if pyngus: class Connection(pyngus.ConnectionEventHandler): """A single AMQP connection.""" def __init__(self, server, socket_, name, product, sasl_mechanisms, user_credentials, sasl_config_dir, sasl_config_name): """Create a Connection using socket_.""" self.socket = socket_ self.name = name self.server = server self.sasl_mechanisms = sasl_mechanisms self.user_credentials = user_credentials properties = {'x-server': True} # setup SASL: if self.sasl_mechanisms: properties['x-sasl-mechs'] = self.sasl_mechanisms if "ANONYMOUS" not in self.sasl_mechanisms: properties['x-require-auth'] = True if sasl_config_dir: properties['x-sasl-config-dir'] = sasl_config_dir if sasl_config_name: properties['x-sasl-config-name'] = sasl_config_name # setup SSL if self.server._ssl_config: ssl = self.server._ssl_config properties['x-ssl-server'] = True properties['x-ssl-identity'] = (ssl['s_cert'], ssl['s_key'], ssl['pw']) # check for client authentication if ssl.get('authenticate_client'): properties['x-ssl-ca-file'] = ssl['ca_cert'] properties['x-ssl-verify-mode'] = 'verify-peer' properties['x-ssl-peer-name'] = ssl['c_name'] # misc connection properties if product: properties['properties'] = {'product': product} self.connection = server.container.create_connection( name, self, properties) self.connection.user_context = self if pyngus.VERSION < (2, 0, 0): # older versions of pyngus don't recognize the sasl # connection properties, so configure them manually: if sasl_mechanisms: self.connection.pn_sasl.mechanisms(sasl_mechanisms) self.connection.pn_sasl.server() self.connection.open() self.sender_links = set() self.receiver_links = set() self.dead_links = set() def destroy(self): """Destroy the test connection.""" for link in self.sender_links | self.receiver_links: link.destroy() self.sender_links.clear() self.receiver_links.clear() self.dead_links.clear() self.connection.destroy() self.connection = None self.socket.close() self.socket = None def fileno(self): """Allows use of this in a select() call.""" return self.socket.fileno() def process_input(self): """Called when socket is read-ready.""" try: pyngus.read_socket_input(self.connection, self.socket) self.connection.process(time.time()) except socket.error: self._socket_error() def send_output(self): """Called when socket is write-ready.""" try: pyngus.write_socket_output(self.connection, self.socket) self.connection.process(time.time()) except socket.error: self._socket_error() def _socket_error(self): self.connection.close_input() self.connection.close_output() # the broker will clean up in its main loop # Pyngus ConnectionEventHandler callbacks: def connection_active(self, connection): self.server.connection_count += 1 def connection_remote_closed(self, connection, reason): """Peer has closed the connection.""" self.connection.close() def connection_closed(self, connection): """Connection close completed.""" self.server.connection_count -= 1 def connection_failed(self, connection, error): """Connection failure detected.""" self.connection_closed(connection) def sender_requested(self, connection, link_handle, name, requested_source, properties): """Create a new message source.""" addr = requested_source or "source-" + uuid.uuid4().hex link = FakeBroker.SenderLink(self.server, self, link_handle, addr) self.sender_links.add(link) def receiver_requested(self, connection, link_handle, name, requested_target, properties): """Create a new message consumer.""" addr = requested_target or "target-" + uuid.uuid4().hex FakeBroker.ReceiverLink(self.server, self, link_handle, addr) def sasl_step(self, connection, pn_sasl): # only called if not using Cyrus SASL if 'PLAIN' in self.sasl_mechanisms: credentials = pn_sasl.recv() if not credentials: return # wait until some arrives if credentials not in self.user_credentials: # failed return pn_sasl.done(pn_sasl.AUTH) pn_sasl.done(pn_sasl.OK) class SenderLink(pyngus.SenderEventHandler): """An AMQP sending link.""" def __init__(self, server, conn, handle, src_addr=None): self.server = server self.conn = conn cnn = conn.connection self.link = cnn.accept_sender(handle, source_override=src_addr, event_handler=self) conn.sender_links.add(self) self.link.open() self.routed = False def destroy(self): """Destroy the link.""" conn = self.conn self.conn = None conn.sender_links.remove(self) conn.dead_links.discard(self) if self.link: self.link.destroy() self.link = None def send_message(self, message): """Send a message over this link.""" def pyngus_callback(link, handle, state, info): if state == pyngus.SenderLink.ACCEPTED: self.server.sender_link_ack_count += 1 elif state == pyngus.SenderLink.RELEASED: self.server.sender_link_requeue_count += 1 self.link.send(message, delivery_callback=pyngus_callback) def _cleanup(self): if self.routed: self.server.remove_route(self.link.source_address, self) self.routed = False self.conn.dead_links.add(self) # Pyngus SenderEventHandler callbacks: def sender_active(self, sender_link): self.server.sender_link_count += 1 self.server.add_route(self.link.source_address, self) self.routed = True self.server.on_sender_active(sender_link) def sender_remote_closed(self, sender_link, error): self.link.close() def sender_closed(self, sender_link): self.server.sender_link_count -= 1 self._cleanup() def sender_failed(self, sender_link, error): self.sender_closed(sender_link) class ReceiverLink(pyngus.ReceiverEventHandler): """An AMQP Receiving link.""" def __init__(self, server, conn, handle, addr=None): self.server = server self.conn = conn cnn = conn.connection self.link = cnn.accept_receiver(handle, target_override=addr, event_handler=self) conn.receiver_links.add(self) self.link.open() def destroy(self): """Destroy the link.""" conn = self.conn self.conn = None conn.receiver_links.remove(self) conn.dead_links.discard(self) if self.link: self.link.destroy() self.link = None # ReceiverEventHandler callbacks: def receiver_active(self, receiver_link): self.server.receiver_link_count += 1 self.server.on_receiver_active(receiver_link) def receiver_remote_closed(self, receiver_link, error): self.link.close() def receiver_closed(self, receiver_link): self.server.receiver_link_count -= 1 self.conn.dead_links.add(self) def receiver_failed(self, receiver_link, error): self.receiver_closed(receiver_link) def message_received(self, receiver_link, message, handle): """Forward this message out the proper sending link.""" self.server.on_message(message, handle, receiver_link) if self.link.capacity < 1: self.server.on_credit_exhausted(self.link) def __init__(self, cfg, sock_addr="", sock_port=0, product=None, default_exchange="Test-Exchange", sasl_mechanisms="ANONYMOUS", user_credentials=None, sasl_config_dir=None, sasl_config_name=None, ssl_config=None): """Create a fake broker listening on sock_addr:sock_port.""" if not pyngus: raise AssertionError("pyngus module not present") threading.Thread.__init__(self) self._product = product self._sasl_mechanisms = sasl_mechanisms self._sasl_config_dir = sasl_config_dir self._sasl_config_name = sasl_config_name self._user_credentials = user_credentials self._ssl_config = ssl_config self._wakeup_pipe = os.pipe() self._my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._my_socket.bind((sock_addr, sock_port)) self.host, self.port = self._my_socket.getsockname() self.container = pyngus.Container("test_server_%s:%d" % (self.host, self.port)) # create an addresser using the test client's config and expected # message bus so the broker can parse the message addresses af = AddresserFactory(default_exchange, cfg.addressing_mode, legacy_server_prefix=cfg.server_request_prefix, legacy_broadcast_prefix=cfg.broadcast_prefix, legacy_group_prefix=cfg.group_request_prefix, rpc_prefix=cfg.rpc_address_prefix, notify_prefix=cfg.notify_address_prefix, multicast=cfg.multicast_address, unicast=cfg.unicast_address, anycast=cfg.anycast_address) props = {'product': product} if product else {} self._addresser = af(props) self._connections = {} self._sources = {} self._pause = eventletutils.Event() # count of messages forwarded, by messaging pattern self.direct_count = 0 self.topic_count = 0 self.fanout_count = 0 self.fanout_sent_count = 0 self.dropped_count = 0 # counts for active links and connections: self.connection_count = 0 self.sender_link_count = 0 self.receiver_link_count = 0 self.sender_link_ack_count = 0 self.sender_link_requeue_count = 0 # log of all messages received by the broker self.message_log = [] # callback hooks self.on_sender_active = lambda link: None self.on_receiver_active = lambda link: link.add_capacity(10) self.on_credit_exhausted = lambda link: link.add_capacity(10) self.on_message = lambda message, handle, link: self.forward_message( message, handle, link) def start(self): """Start the server.""" LOG.debug("Starting Test Broker on %s:%d", self.host, self.port) self._shutdown = False self._closing = False self.daemon = True self._pause.set() self._my_socket.listen(10) super(FakeBroker, self).start() def pause(self): self._pause.clear() os.write(self._wakeup_pipe[1], b'!') def unpause(self): self._pause.set() def stop(self, clean=False): """Stop the server.""" # If clean is True, attempt a clean shutdown by closing all open # links/connections first. Otherwise force an immediate disconnect LOG.debug("Stopping test Broker %s:%d", self.host, self.port) if clean: self._closing = 1 else: self._shutdown = True self._pause.set() os.write(self._wakeup_pipe[1], b'!') self.join() LOG.debug("Test Broker %s:%d stopped", self.host, self.port) def run(self): """Process I/O and timer events until the broker is stopped.""" LOG.debug("Test Broker on %s:%d started", self.host, self.port) while not self._shutdown: self._pause.wait() readers, writers, timers = self.container.need_processing() # map pyngus Connections back to _TestConnections: readfd = [c.user_context for c in readers] readfd.extend([self._my_socket, self._wakeup_pipe[0]]) writefd = [c.user_context for c in writers] timeout = None if timers: # [0] == next expiring timer deadline = timers[0].next_tick now = time.time() timeout = 0 if deadline <= now else deadline - now readable, writable, ignore = select.select(readfd, writefd, [], timeout) worked = set() for r in readable: if r is self._my_socket: # new inbound connection request received sock, addr = self._my_socket.accept() if not self._closing: # create a new Connection for it: name = str(addr) conn = FakeBroker.Connection(self, sock, name, self._product, self._sasl_mechanisms, self._user_credentials, self._sasl_config_dir, self._sasl_config_name) self._connections[conn.name] = conn else: sock.close() # drop it elif r is self._wakeup_pipe[0]: os.read(self._wakeup_pipe[0], 512) else: r.process_input() worked.add(r) for t in timers: now = time.time() if t.next_tick > now: break t.process(now) conn = t.user_context worked.add(conn) for w in writable: w.send_output() worked.add(w) # clean up any closed connections or links: while worked: conn = worked.pop() if conn.connection.closed: del self._connections[conn.name] conn.destroy() else: while conn.dead_links: conn.dead_links.pop().destroy() if self._closing and not self._connections: self._shutdown = True elif self._closing == 1: # start closing connections self._closing = 2 for conn in self._connections.values(): conn.connection.close() # Shutting down. Any open links are just disconnected - the peer will # see a socket close. self._my_socket.close() for conn in self._connections.values(): conn.destroy() self._connections = None self.container.destroy() self.container = None return 0 def add_route(self, address, link): # route from address -> link[, link ...] if address not in self._sources: self._sources[address] = [link] elif link not in self._sources[address]: self._sources[address].append(link) def remove_route(self, address, link): if address in self._sources: if link in self._sources[address]: self._sources[address].remove(link) if not self._sources[address]: del self._sources[address] def forward_message(self, message, handle, rlink): # returns True if message was routed self.message_log.append(message) dest = message.address if dest not in self._sources: # can't forward self.dropped_count += 1 # observe magic "don't ack" address if '!no-ack!' not in dest: rlink.message_released(handle) return LOG.debug("Forwarding [%s]", dest) # route "behavior" determined by address prefix: if self._addresser._is_multicast(dest): self.fanout_count += 1 for link in self._sources[dest]: self.fanout_sent_count += 1 LOG.debug("Broadcast to %s", dest) link.send_message(message) elif self._addresser._is_anycast(dest): # round-robin: self.topic_count += 1 link = self._sources[dest].pop(0) link.send_message(message) LOG.debug("Send to %s", dest) self._sources[dest].append(link) else: # unicast: self.direct_count += 1 LOG.debug("Unicast to %s", dest) self._sources[dest][0].send_message(message) rlink.message_accepted(handle) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/drivers/test_impl_kafka.py0000664000175000017500000002144000000000000026121 0ustar00zuulzuul00000000000000# Copyright (C) 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from unittest import mock from confluent_kafka import KafkaException import oslo_messaging from oslo_messaging._drivers import impl_kafka as kafka_driver from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestKafkaDriverLoad(test_utils.BaseTestCase): def setUp(self): super(TestKafkaDriverLoad, self).setUp() self.messaging_conf.transport_url = 'kafka:/' def test_driver_load(self): transport = oslo_messaging.get_notification_transport(self.conf) self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver) class TestKafkaTransportURL(test_utils.BaseTestCase): scenarios = [ ('port', dict(url='kafka://localhost:1234', expected=dict(hostaddrs=['localhost:1234'], username=None, password=None, vhost=None))), ('vhost', dict(url='kafka://localhost:1234/my_host', expected=dict(hostaddrs=['localhost:1234'], username=None, password=None, vhost='my_host'))), ('two', dict(url='kafka://localhost:1234,localhost2:1234', expected=dict(hostaddrs=['localhost:1234', 'localhost2:1234'], username=None, password=None, vhost=None))), ('user', dict(url='kafka://stack:stacksecret@localhost:9092/my_host', expected=dict(hostaddrs=['localhost:9092'], username='stack', password='stacksecret', vhost='my_host'))), ('user2', dict(url='kafka://stack:stacksecret@localhost:9092,' 'stack2:stacksecret2@localhost:1234/my_host', expected=dict(hostaddrs=['localhost:9092', 'localhost:1234'], username='stack', password='stacksecret', vhost='my_host'))), ('ipv4', dict(url='kafka://127.0.0.1:1234', expected=dict(hostaddrs=['127.0.0.1:1234'], username=None, password=None, vhost=None))), ('ipv6', dict(url='kafka://[::1]:1234', expected=dict(hostaddrs=['[::1]:1234'], username=None, password=None, vhost=None))), ] def setUp(self): super(TestKafkaTransportURL, self).setUp() self.messaging_conf.transport_url = 'kafka:/' def test_transport_url(self): transport = oslo_messaging.get_notification_transport(self.conf, self.url) self.addCleanup(transport.cleanup) driver = transport._driver self.assertIsInstance(driver, kafka_driver.KafkaDriver) self.assertEqual(self.expected['hostaddrs'], driver.pconn.hostaddrs) self.assertEqual(self.expected['username'], driver.pconn.username) self.assertEqual(self.expected['password'], driver.pconn.password) self.assertEqual(self.expected['vhost'], driver.virtual_host) class TestKafkaDriver(test_utils.BaseTestCase): """Unit Test cases to test the kafka driver """ def setUp(self): super(TestKafkaDriver, self).setUp() self.messaging_conf.transport_url = 'kafka:/' transport = oslo_messaging.get_notification_transport(self.conf) self.driver = transport._driver def test_send(self): target = oslo_messaging.Target(topic="topic_test") self.assertRaises(NotImplementedError, self.driver.send, target, {}, {}) def test_send_notification(self): target = oslo_messaging.Target(topic="topic_test") with mock.patch("confluent_kafka.Producer") as producer: self.driver.send_notification( target, {}, {"payload": ["test_1"]}, None, retry=3) producer.assert_called_once_with({ 'bootstrap.servers': '', 'linger.ms': mock.ANY, 'batch.num.messages': mock.ANY, 'compression.codec': 'none', 'security.protocol': 'PLAINTEXT', 'sasl.mechanism': 'PLAIN', 'sasl.username': mock.ANY, 'sasl.password': mock.ANY, 'ssl.ca.location': '', 'ssl.certificate.location': '', 'ssl.key.location': '', 'ssl.key.password': '', }) def test_send_notification_retries_on_buffer_error(self): target = oslo_messaging.Target(topic="topic_test") with mock.patch("confluent_kafka.Producer") as producer: fake_producer = mock.MagicMock() fake_producer.produce = mock.Mock( side_effect=[BufferError, BufferError, None]) producer.return_value = fake_producer self.driver.send_notification( target, {}, {"payload": ["test_1"]}, None, retry=3) assert fake_producer.produce.call_count == 3 def test_send_notification_stops_on_kafka_error(self): target = oslo_messaging.Target(topic="topic_test") with mock.patch("confluent_kafka.Producer") as producer: fake_producer = mock.MagicMock() fake_producer.produce = mock.Mock( side_effect=[KafkaException, None]) producer.return_value = fake_producer self.driver.send_notification( target, {}, {"payload": ["test_1"]}, None, retry=3) assert fake_producer.produce.call_count == 1 def test_listen(self): target = oslo_messaging.Target(topic="topic_test") self.assertRaises(NotImplementedError, self.driver.listen, target, None, None) def test_listen_for_notifications(self): targets_and_priorities = [ (oslo_messaging.Target(topic="topic_test_1"), "sample"), ] with mock.patch("confluent_kafka.Consumer") as consumer: self.driver.listen_for_notifications( targets_and_priorities, "kafka_test", 1000, 10) consumer.assert_called_once_with({ 'bootstrap.servers': '', 'enable.partition.eof': False, 'group.id': 'kafka_test', 'enable.auto.commit': mock.ANY, 'max.partition.fetch.bytes': mock.ANY, 'security.protocol': 'PLAINTEXT', 'sasl.mechanism': 'PLAIN', 'sasl.username': mock.ANY, 'sasl.password': mock.ANY, 'ssl.ca.location': '', 'ssl.certificate.location': '', 'ssl.key.location': '', 'ssl.key.password': '', 'default.topic.config': {'auto.offset.reset': 'latest'} }) def test_cleanup(self): listeners = [mock.MagicMock(), mock.MagicMock()] self.driver.listeners.extend(listeners) self.driver.cleanup() for listener in listeners: listener.close.assert_called_once_with() class TestKafkaConnection(test_utils.BaseTestCase): def setUp(self): super(TestKafkaConnection, self).setUp() self.messaging_conf.transport_url = 'kafka:/' transport = oslo_messaging.get_notification_transport(self.conf) self.driver = transport._driver def test_notify(self): with mock.patch("confluent_kafka.Producer") as producer: self.driver.pconn.notify_send("fake_topic", {"fake_ctxt": "fake_param"}, {"fake_text": "fake_message_1"}, 10) assert producer.call_count == 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/drivers/test_impl_rabbit.py0000664000175000017500000013734300000000000026321 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import ssl import sys import threading import time import uuid import fixtures import kombu import kombu.connection import kombu.transport.memory from oslo_serialization import jsonutils from oslo_utils import eventletutils import testscenarios import oslo_messaging from oslo_messaging._drivers import amqpdriver from oslo_messaging._drivers import common as driver_common from oslo_messaging._drivers import impl_rabbit as rabbit_driver from oslo_messaging.exceptions import ConfigurationError from oslo_messaging.exceptions import MessageDeliveryFailure from oslo_messaging.tests import utils as test_utils from oslo_messaging.transport import DriverLoadFailure from unittest import mock load_tests = testscenarios.load_tests_apply_scenarios class TestHeartbeat(test_utils.BaseTestCase): @mock.patch('oslo_messaging._drivers.impl_rabbit.LOG') @mock.patch('kombu.connection.Connection.heartbeat_check') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.' '_heartbeat_supported_and_enabled', return_value=True) @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.' 'ensure_connection') def _do_test_heartbeat_sent(self, fake_ensure_connection, fake_heartbeat_support, fake_heartbeat, fake_logger, heartbeat_side_effect=None, info=None): event = eventletutils.Event() def heartbeat_check(rate=2): event.set() if heartbeat_side_effect: raise heartbeat_side_effect fake_heartbeat.side_effect = heartbeat_check transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) conn = transport._driver._get_connection() conn.ensure(method=lambda: True) event.wait() conn._heartbeat_stop() # check heartbeat have been called self.assertLess(0, fake_heartbeat.call_count) if not heartbeat_side_effect: self.assertEqual(1, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(0, fake_logger.info.call_count) else: self.assertEqual(2, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(1, fake_logger.info.call_count) self.assertIn(mock.call(info, mock.ANY), fake_logger.info.mock_calls) def test_test_heartbeat_sent_default(self): self._do_test_heartbeat_sent() def test_test_heartbeat_sent_connection_fail(self): self._do_test_heartbeat_sent( heartbeat_side_effect=kombu.exceptions.OperationalError, info='A recoverable connection/channel error occurred, ' 'trying to reconnect: %s') def test_run_heartbeat_in_pthread(self): self.config(heartbeat_in_pthread=True, group="oslo_messaging_rabbit") self._do_test_heartbeat_sent() class TestRabbitQos(test_utils.BaseTestCase): def connection_with(self, prefetch, purpose): self.config(rabbit_qos_prefetch_count=prefetch, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') transport._driver._get_connection(purpose) @mock.patch('kombu.transport.memory.Channel.basic_qos') def test_qos_sent_on_listen_connection(self, fake_basic_qos): self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_LISTEN) fake_basic_qos.assert_called_once_with(0, 1, False) @mock.patch('kombu.transport.memory.Channel.basic_qos') def test_qos_not_sent_when_cfg_zero(self, fake_basic_qos): self.connection_with(prefetch=0, purpose=driver_common.PURPOSE_LISTEN) fake_basic_qos.assert_not_called() @mock.patch('kombu.transport.memory.Channel.basic_qos') def test_qos_not_sent_on_send_connection(self, fake_basic_qos): self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_SEND) fake_basic_qos.assert_not_called() class TestRabbitDriverLoad(test_utils.BaseTestCase): scenarios = [ ('rabbit', dict(transport_url='rabbit:/guest:guest@localhost:5672//')), ('kombu', dict(transport_url='kombu:/guest:guest@localhost:5672//')), ('rabbit+memory', dict(transport_url='kombu+memory:/')) ] @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection' '.ensure_connection') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset') def test_driver_load(self, fake_ensure, fake_reset): self.config(heartbeat_timeout_threshold=60, group='oslo_messaging_rabbit') self.messaging_conf.transport_url = self.transport_url transport = oslo_messaging.get_transport(self.conf) self.addCleanup(transport.cleanup) driver = transport._driver self.assertIsInstance(driver, rabbit_driver.RabbitDriver) @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection' '.ensure_connection') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset') def test_driver_load_max_less_than_min(self, fake_ensure, fake_reset): self.config( rpc_conn_pool_size=1, conn_pool_min_size=2, group='oslo_messaging_rabbit') self.messaging_conf.transport_url = self.transport_url error = self.assertRaises( DriverLoadFailure, oslo_messaging.get_transport, self.conf) self.assertIn( "rpc_conn_pool_size: 1 must be greater than or equal " "to conn_pool_min_size: 2", str(error)) class TestRabbitDriverLoadSSL(test_utils.BaseTestCase): scenarios = [ ('no_ssl', dict(options=dict(), expected=False)), ('no_ssl_with_options', dict(options=dict(ssl_version='TLSv1'), expected=False)), ('just_ssl', dict(options=dict(ssl=True), expected=True)), ('ssl_with_options', dict(options=dict(ssl=True, ssl_version='TLSv1', ssl_key_file='foo', ssl_cert_file='bar', ssl_ca_file='foobar'), expected=dict(ssl_version=3, keyfile='foo', certfile='bar', ca_certs='foobar', cert_reqs=ssl.CERT_REQUIRED))), ] @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection' '.ensure_connection') @mock.patch('kombu.connection.Connection') def test_driver_load(self, connection_klass, fake_ensure): self.config(group="oslo_messaging_rabbit", **self.options) transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) connection = transport._driver._get_connection() connection_klass.assert_called_once_with( 'memory:///', transport_options={ 'client_properties': { 'capabilities': { 'connection.blocked': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, }, 'connection_name': connection.name}, 'confirm_publish': True, 'on_blocked': mock.ANY, 'on_unblocked': mock.ANY}, ssl=self.expected, login_method='AMQPLAIN', heartbeat=60, failover_strategy='round-robin' ) class TestRabbitDriverLoadSSLWithFIPS(test_utils.BaseTestCase): scenarios = [ ('ssl_fips_mode', dict(options=dict(ssl=True, ssl_enforce_fips_mode=True), expected=True)), ] @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection' '.ensure_connection') @mock.patch('kombu.connection.Connection') def test_driver_load_with_fips_supported(self, connection_klass, fake_ensure): self.config(ssl=True, ssl_enforce_fips_mode=True, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) with mock.patch.object(ssl, 'FIPS_mode', create=True, return_value=True): with mock.patch.object(ssl, 'FIPS_mode_set', create=True): connection = transport._driver._get_connection() connection_klass.assert_called_once_with( 'memory:///', transport_options={ 'client_properties': { 'capabilities': { 'connection.blocked': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, }, 'connection_name': connection.name}, 'confirm_publish': True, 'on_blocked': mock.ANY, 'on_unblocked': mock.ANY}, ssl=self.expected, login_method='AMQPLAIN', heartbeat=60, failover_strategy='round-robin' ) @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection' '.ensure_connection') @mock.patch('oslo_messaging._drivers.impl_rabbit.ssl') @mock.patch('kombu.connection.Connection') def test_fips_unsupported(self, connection_klass, fake_ssl, fake_ensure): self.config(ssl=True, ssl_enforce_fips_mode=True, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) del fake_ssl.FIPS_mode # We do this test only if FIPS mode is not supported to # ensure that we hard fail. self.assertRaises( ConfigurationError, transport._driver._get_connection) class TestRabbitPublisher(test_utils.BaseTestCase): @mock.patch('kombu.messaging.Producer.publish') def test_send_with_timeout(self, fake_publish): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') exchange_mock = mock.Mock() with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection conn._publish(exchange_mock, 'msg', routing_key='routing_key', timeout=1) fake_publish.assert_called_with( 'msg', expiration=1, exchange=exchange_mock, compression=self.conf.oslo_messaging_rabbit.kombu_compression, mandatory=False, routing_key='routing_key') @mock.patch('kombu.messaging.Producer.publish') def test_send_no_timeout(self, fake_publish): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') exchange_mock = mock.Mock() with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection conn._publish(exchange_mock, 'msg', routing_key='routing_key') fake_publish.assert_called_with( 'msg', expiration=None, mandatory=False, compression=self.conf.oslo_messaging_rabbit.kombu_compression, exchange=exchange_mock, routing_key='routing_key') def test_declared_queue_publisher(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) e_passive = kombu.entity.Exchange( name='foobar', type='topic', passive=True) e_active = kombu.entity.Exchange( name='foobar', type='topic', passive=False) with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection exc = conn.connection.channel_errors[0] def try_send(exchange): conn._ensure_publishing( conn._publish_and_creates_default_queue, exchange, {}, routing_key='foobar') with mock.patch('kombu.transport.virtual.Channel.close'): # Ensure the exchange does not exists self.assertRaises(oslo_messaging.MessageDeliveryFailure, try_send, e_passive) # Create it try_send(e_active) # Ensure it creates it try_send(e_passive) with mock.patch('kombu.messaging.Producer.publish', side_effect=exc): with mock.patch('kombu.transport.virtual.Channel.close'): # Ensure the exchange is already in cache self.assertIn('foobar', conn._declared_exchanges) # Reset connection self.assertRaises(oslo_messaging.MessageDeliveryFailure, try_send, e_passive) # Ensure the cache is empty self.assertEqual(0, len(conn._declared_exchanges)) try_send(e_active) self.assertIn('foobar', conn._declared_exchanges) def test_send_exception_remap(self): bad_exc = Exception("Non-oslo.messaging exception") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') exchange_mock = mock.Mock() with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection with mock.patch('kombu.messaging.Producer.publish', side_effect=bad_exc): self.assertRaises(MessageDeliveryFailure, conn._ensure_publishing, conn._publish, exchange_mock, 'msg') class TestRabbitConsume(test_utils.BaseTestCase): def test_consume_timeout(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) deadline = time.time() + 6 with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: self.assertRaises(driver_common.Timeout, conn.consume, timeout=3) # kombu memory transport doesn't really raise error # so just simulate a real driver behavior conn.connection.connection.recoverable_channel_errors = (IOError,) conn.declare_fanout_consumer("notif.info", lambda msg: True) with mock.patch('kombu.connection.Connection.drain_events', side_effect=IOError): self.assertRaises(driver_common.Timeout, conn.consume, timeout=3) self.assertEqual(0, int(deadline - time.time())) def test_consume_from_missing_queue(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://') self.addCleanup(transport.cleanup) with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: with mock.patch('kombu.Queue.consume') as consume, mock.patch( 'kombu.Queue.declare') as declare: conn.declare_topic_consumer(exchange_name='test', topic='test', callback=lambda msg: True) import amqp consume.side_effect = [amqp.NotFound, None] conn.connection.connection.recoverable_connection_errors = () conn.connection.connection.recoverable_channel_errors = () self.assertEqual(1, declare.call_count) conn.connection.connection.drain_events = mock.Mock() # Ensure that a queue will be re-declared if the consume method # of kombu.Queue raise amqp.NotFound conn.consume() self.assertEqual(2, declare.call_count) def test_consume_from_missing_queue_with_io_error_on_redeclaration(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://') self.addCleanup(transport.cleanup) with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: with mock.patch('kombu.Queue.consume') as consume, mock.patch( 'kombu.Queue.declare') as declare: conn.declare_topic_consumer(exchange_name='test', topic='test', callback=lambda msg: True) import amqp consume.side_effect = [amqp.NotFound, None] declare.side_effect = [IOError, None] conn.connection.connection.recoverable_connection_errors = ( IOError,) conn.connection.connection.recoverable_channel_errors = () self.assertEqual(1, declare.call_count) conn.connection.connection.drain_events = mock.Mock() # Ensure that a queue will be re-declared after # 'queue not found' exception despite on connection error. conn.consume() self.assertEqual(3, declare.call_count) def test_connection_ack_have_disconnected_kombu_connection(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: channel = conn.connection.channel with mock.patch('kombu.connection.Connection.connected', new_callable=mock.PropertyMock, return_value=False): self.assertRaises(driver_common.Timeout, conn.connection.consume, timeout=0.01) # Ensure a new channel have been setuped self.assertNotEqual(channel, conn.connection.channel) class TestRabbitTransportURL(test_utils.BaseTestCase): scenarios = [ ('none', dict(url=None, expected=["amqp://guest:guest@localhost:5672/"])), ('memory', dict(url='kombu+memory:////', expected=["memory:///"])), ('empty', dict(url='rabbit:///', expected=['amqp://guest:guest@localhost:5672/'])), ('localhost', dict(url='rabbit://localhost/', expected=['amqp://:@localhost:5672/'])), ('virtual_host', dict(url='rabbit:///vhost', expected=['amqp://guest:guest@localhost:5672/vhost'])), ('no_creds', dict(url='rabbit://host/virtual_host', expected=['amqp://:@host:5672/virtual_host'])), ('no_port', dict(url='rabbit://user:password@host/virtual_host', expected=['amqp://user:password@host:5672/virtual_host'])), ('full_url', dict(url='rabbit://user:password@host:10/virtual_host', expected=['amqp://user:password@host:10/virtual_host'])), ('full_two_url', dict(url='rabbit://user:password@host:10,' 'user2:password2@host2:12/virtual_host', expected=["amqp://user:password@host:10/virtual_host", "amqp://user2:password2@host2:12/virtual_host"] )), ('rabbit_ipv6', dict(url='rabbit://u:p@[fd00:beef:dead:55::133]:10/vhost', expected=['amqp://u:p@[fd00:beef:dead:55::133]:10/vhost'])), ('rabbit_ipv4', dict(url='rabbit://user:password@10.20.30.40:10/vhost', expected=['amqp://user:password@10.20.30.40:10/vhost'])), ('rabbit_no_vhost_slash', dict(url='rabbit://user:password@10.20.30.40:10', expected=['amqp://user:password@10.20.30.40:10/'])), ] def setUp(self): super(TestRabbitTransportURL, self).setUp() self.messaging_conf.transport_url = 'rabbit:/' self.config(heartbeat_timeout_threshold=0, group='oslo_messaging_rabbit') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection' '.ensure_connection') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset') def test_transport_url(self, fake_reset, fake_ensure): transport = oslo_messaging.get_transport(self.conf, self.url) self.addCleanup(transport.cleanup) driver = transport._driver urls = driver._get_connection()._url.split(";") self.assertEqual(sorted(self.expected), sorted(urls)) class TestSendReceive(test_utils.BaseTestCase): _n_senders = [ ('single_sender', dict(n_senders=1)), ('multiple_senders', dict(n_senders=10)), ] _context = [ ('empty_context', dict(ctxt={})), ('with_context', dict(ctxt={'user': 'mark'})), ] _reply = [ ('rx_id', dict(rx_id=True, reply=None)), ('none', dict(rx_id=False, reply=None)), ('empty_list', dict(rx_id=False, reply=[])), ('empty_dict', dict(rx_id=False, reply={})), ('false', dict(rx_id=False, reply=False)), ('zero', dict(rx_id=False, reply=0)), ] _failure = [ ('success', dict(failure=False)), ('failure', dict(failure=True, expected=False)), ('expected_failure', dict(failure=True, expected=True)), ] _timeout = [ ('no_timeout', dict(timeout=None, call_monitor_timeout=None)), ('timeout', dict(timeout=0.01, # FIXME(markmc): timeout=0 is broken? call_monitor_timeout=None)), ('call_monitor_timeout', dict(timeout=0.01, call_monitor_timeout=0.02)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders, cls._context, cls._reply, cls._failure, cls._timeout) def test_send_receive(self): self.config(kombu_missing_consumer_retry_timeout=0.5, group="oslo_messaging_rabbit") self.config(heartbeat_timeout_threshold=0, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener senders = [] replies = [] msgs = [] # FIXME(danms): Surely this is not the right way to do this... self.ctxt['client_timeout'] = self.call_monitor_timeout def send_and_wait_for_reply(i): try: timeout = self.timeout cm_timeout = self.call_monitor_timeout replies.append(driver.send(target, self.ctxt, {'tx_id': i}, wait_for_reply=True, timeout=timeout, call_monitor_timeout=cm_timeout)) self.assertFalse(self.failure) self.assertIsNone(self.timeout) except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e: replies.append(e) self.assertTrue(self.failure or self.timeout is not None) while len(senders) < self.n_senders: senders.append(threading.Thread(target=send_and_wait_for_reply, args=(len(senders), ))) for i in range(len(senders)): senders[i].start() received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual(self.ctxt, received.ctxt) self.assertEqual({'tx_id': i}, received.message) msgs.append(received) # reply in reverse, except reply to the first guy second from last order = list(range(len(senders) - 1, -1, -1)) if len(order) > 1: order[-1], order[-2] = order[-2], order[-1] for i in order: if self.timeout is None: if self.failure: try: raise ZeroDivisionError except Exception: failure = sys.exc_info() msgs[i].reply(failure=failure) elif self.rx_id: msgs[i].reply({'rx_id': i}) else: msgs[i].reply(self.reply) senders[i].join() self.assertEqual(len(senders), len(replies)) for i, reply in enumerate(replies): if self.timeout is not None: self.assertIsInstance(reply, oslo_messaging.MessagingTimeout) elif self.failure: self.assertIsInstance(reply, ZeroDivisionError) elif self.rx_id: self.assertEqual({'rx_id': order[i]}, reply) else: self.assertEqual(self.reply, reply) TestSendReceive.generate_scenarios() class TestPollAsync(test_utils.BaseTestCase): def test_poll_timeout(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener received = listener.poll(timeout=0.050) self.assertEqual([], received) class TestRacyWaitForReply(test_utils.BaseTestCase): def test_send_receive(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener senders = [] replies = [] msgs = [] wait_conditions = [] orig_reply_waiter = amqpdriver.ReplyWaiter.wait def reply_waiter(self, msg_id, timeout, call_monitor_timeout, reply_q): if wait_conditions: cond = wait_conditions.pop() with cond: cond.notify() with cond: cond.wait() return orig_reply_waiter(self, msg_id, timeout, call_monitor_timeout, reply_q) self.useFixture(fixtures.MockPatchObject( amqpdriver.ReplyWaiter, 'wait', reply_waiter)) def send_and_wait_for_reply(i, wait_for_reply): replies.append(driver.send(target, {}, {'tx_id': i}, wait_for_reply=wait_for_reply, timeout=None)) while len(senders) < 2: t = threading.Thread(target=send_and_wait_for_reply, args=(len(senders), True)) t.daemon = True senders.append(t) # test the case then msg_id is not set t = threading.Thread(target=send_and_wait_for_reply, args=(len(senders), False)) t.daemon = True senders.append(t) # Start the first guy, receive his message, but delay his polling notify_condition = threading.Condition() wait_conditions.append(notify_condition) with notify_condition: senders[0].start() notify_condition.wait() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 0}, msgs[-1].message) # Start the second guy, receive his message senders[1].start() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 1}, msgs[-1].message) # Reply to both in order, making the second thread queue # the reply meant for the first thread msgs[0].reply({'rx_id': 0}) msgs[1].reply({'rx_id': 1}) # Wait for the second thread to finish senders[1].join() # Start the 3rd guy, receive his message senders[2].start() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 2}, msgs[-1].message) # Verify the _send_reply was not invoked by driver: with mock.patch.object(msgs[2], '_send_reply') as method: msgs[2].reply({'rx_id': 2}) self.assertEqual(0, method.call_count) # Wait for the 3rd thread to finish senders[2].join() # Let the first thread continue with notify_condition: notify_condition.notify() # Wait for the first thread to finish senders[0].join() # Verify replies were received out of order self.assertEqual(len(senders), len(replies)) self.assertEqual({'rx_id': 1}, replies[0]) self.assertIsNone(replies[1]) self.assertEqual({'rx_id': 0}, replies[2]) def _declare_queue(target): connection = kombu.connection.BrokerConnection(transport='memory') # Kludge to speed up tests. connection.transport.polling_interval = 0.0 connection.connect() channel = connection.channel() # work around 'memory' transport bug in 1.1.3 channel._new_queue('ae.undeliver') if target.fanout: exchange = kombu.entity.Exchange(name=target.topic + '_fanout', type='fanout', durable=False, auto_delete=True) queue = kombu.entity.Queue(name=target.topic + '_fanout_12345', channel=channel, exchange=exchange, routing_key=target.topic) elif target.server: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) topic = '%s.%s' % (target.topic, target.server) queue = kombu.entity.Queue(name=topic, channel=channel, exchange=exchange, routing_key=topic) else: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) queue = kombu.entity.Queue(name=target.topic, channel=channel, exchange=exchange, routing_key=target.topic) queue.declare() return connection, channel, queue class TestRequestWireFormat(test_utils.BaseTestCase): _target = [ ('topic_target', dict(topic='testtopic', server=None, fanout=False)), ('server_target', dict(topic='testtopic', server='testserver', fanout=False)), ('fanout_target', dict(topic='testtopic', server=None, fanout=True)), ] _msg = [ ('empty_msg', dict(msg={}, expected={})), ('primitive_msg', dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), ('complex_msg', dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}}, expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), ] _context = [ ('empty_ctxt', dict(ctxt={}, expected_ctxt={})), ('user_project_ctxt', dict(ctxt={'user': 'mark', 'project': 'snarkybunch'}, expected_ctxt={'_context_user': 'mark', '_context_project': 'snarkybunch'})), ] _compression = [ ('gzip_compression', dict(compression='gzip')), ('without_compression', dict(compression=None)) ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._msg, cls._context, cls._target, cls._compression) def setUp(self): super(TestRequestWireFormat, self).setUp() self.uuids = [] self.orig_uuid4 = uuid.uuid4 self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4)) def mock_uuid4(self): self.uuids.append(self.orig_uuid4()) return self.uuids[-1] def test_request_wire_format(self): self.conf.oslo_messaging_rabbit.kombu_compression = self.compression transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic=self.topic, server=self.server, fanout=self.fanout) connection, channel, queue = _declare_queue(target) self.addCleanup(connection.release) driver.send(target, self.ctxt, self.msg) msgs = [] def callback(msg): msg = channel.message_to_python(msg) msg.ack() msgs.append(msg.payload) queue.consume(callback=callback, consumer_tag='1', nowait=False) connection.drain_events() self.assertEqual(1, len(msgs)) self.assertIn('oslo.message', msgs[0]) received = msgs[0] received['oslo.message'] = jsonutils.loads(received['oslo.message']) # FIXME(markmc): add _msg_id and _reply_q check expected_msg = { '_unique_id': self.uuids[0].hex, } expected_msg.update(self.expected) expected_msg.update(self.expected_ctxt) expected = { 'oslo.version': '2.0', 'oslo.message': expected_msg, } self.assertEqual(expected, received) TestRequestWireFormat.generate_scenarios() def _create_producer(target): connection = kombu.connection.BrokerConnection(transport='memory') # Kludge to speed up tests. connection.transport.polling_interval = 0.0 connection.connect() channel = connection.channel() # work around 'memory' transport bug in 1.1.3 channel._new_queue('ae.undeliver') if target.fanout: exchange = kombu.entity.Exchange(name=target.topic + '_fanout', type='fanout', durable=False, auto_delete=True) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=target.topic) elif target.server: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) topic = '%s.%s' % (target.topic, target.server) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=topic) else: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=target.topic) return connection, producer class TestReplyWireFormat(test_utils.BaseTestCase): _target = [ ('topic_target', dict(topic='testtopic', server=None, fanout=False)), ('server_target', dict(topic='testtopic', server='testserver', fanout=False)), ('fanout_target', dict(topic='testtopic', server=None, fanout=True)), ] _msg = [ ('empty_msg', dict(msg={}, expected={})), ('primitive_msg', dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), ('complex_msg', dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}}, expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), ] _context = [ ('empty_ctxt', dict(ctxt={}, expected_ctxt={'client_timeout': None})), ('user_project_ctxt', dict(ctxt={'_context_user': 'mark', '_context_project': 'snarkybunch'}, expected_ctxt={'user': 'mark', 'project': 'snarkybunch', 'client_timeout': None})), ] _compression = [ ('gzip_compression', dict(compression='gzip')), ('without_compression', dict(compression=None)) ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._msg, cls._context, cls._target, cls._compression) def test_reply_wire_format(self): self.conf.oslo_messaging_rabbit.kombu_compression = self.compression transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic=self.topic, server=self.server, fanout=self.fanout) listener = driver.listen(target, None, None)._poll_style_listener connection, producer = _create_producer(target) self.addCleanup(connection.release) msg = { 'oslo.version': '2.0', 'oslo.message': {} } msg['oslo.message'].update(self.msg) msg['oslo.message'].update(self.ctxt) msg['oslo.message'].update({ '_msg_id': uuid.uuid4().hex, '_unique_id': uuid.uuid4().hex, '_reply_q': 'reply_' + uuid.uuid4().hex, '_timeout': None, }) msg['oslo.message'] = jsonutils.dumps(msg['oslo.message']) producer.publish(msg) received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual(self.expected_ctxt, received.ctxt) self.assertEqual(self.expected, received.message) TestReplyWireFormat.generate_scenarios() class RpcKombuHATestCase(test_utils.BaseTestCase): def setUp(self): super(RpcKombuHATestCase, self).setUp() transport_url = 'rabbit:/host1,host2,host3,host4,host5/' self.messaging_conf.transport_url = transport_url self.config(rabbit_retry_interval=0.01, rabbit_retry_backoff=0.01, kombu_reconnect_delay=0, heartbeat_timeout_threshold=0, group="oslo_messaging_rabbit") self.useFixture(fixtures.MockPatch( 'kombu.connection.Connection.connection')) self.useFixture(fixtures.MockPatch( 'kombu.connection.Connection.channel')) # TODO(stephenfin): Drop hasattr when we drop support for kombo < 4.6.8 if hasattr(kombu.connection.Connection, '_connection_factory'): self.useFixture(fixtures.MockPatch( 'kombu.connection.Connection._connection_factory')) # starting from the first broker in the list url = oslo_messaging.TransportURL.parse(self.conf, None) self.connection = rabbit_driver.Connection(self.conf, url, driver_common.PURPOSE_SEND) # TODO(stephenfin): Remove when we drop support for kombo < 4.6.8 if hasattr(kombu.connection.Connection, 'connect'): self.useFixture(fixtures.MockPatch( 'kombu.connection.Connection.connect')) self.addCleanup(self.connection.close) def test_ensure_four_retry(self): mock_callback = mock.Mock(side_effect=IOError) self.assertRaises(oslo_messaging.MessageDeliveryFailure, self.connection.ensure, mock_callback, retry=4) # TODO(stephenfin): Remove when we drop support for kombu < 5.2.4 expected = 5 if kombu.VERSION < (5, 2, 4): expected = 6 self.assertEqual(expected, mock_callback.call_count) def test_ensure_one_retry(self): mock_callback = mock.Mock(side_effect=IOError) self.assertRaises(oslo_messaging.MessageDeliveryFailure, self.connection.ensure, mock_callback, retry=1) # TODO(stephenfin): Remove when we drop support for kombu < 5.2.4 expected = 2 if kombu.VERSION < (5, 2, 4): expected = 3 self.assertEqual(expected, mock_callback.call_count) def test_ensure_no_retry(self): mock_callback = mock.Mock(side_effect=IOError) self.assertRaises( oslo_messaging.MessageDeliveryFailure, self.connection.ensure, mock_callback, retry=0, ) # TODO(stephenfin): Remove when we drop support for kombu < 5.2.4 expected = 1 if kombu.VERSION < (5, 2, 4): expected = 2 self.assertEqual(expected, mock_callback.call_count) class ConnectionLockTestCase(test_utils.BaseTestCase): def _thread(self, lock, sleep, heartbeat=False): def thread_task(): if heartbeat: with lock.for_heartbeat(): time.sleep(sleep) else: with lock: time.sleep(sleep) t = threading.Thread(target=thread_task) t.daemon = True t.start() start = time.time() def get_elapsed_time(): t.join() return time.time() - start return get_elapsed_time def test_workers_only(self): lock = rabbit_driver.ConnectionLock() t1 = self._thread(lock, 1) t2 = self._thread(lock, 1) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t2(), places=0) def test_worker_and_heartbeat(self): lock = rabbit_driver.ConnectionLock() t1 = self._thread(lock, 1) t2 = self._thread(lock, 1, heartbeat=True) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t2(), places=0) def test_workers_and_heartbeat(self): lock = rabbit_driver.ConnectionLock() t1 = self._thread(lock, 1) t2 = self._thread(lock, 1) t3 = self._thread(lock, 1) t4 = self._thread(lock, 1, heartbeat=True) t5 = self._thread(lock, 1) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t4(), places=0) self.assertAlmostEqual(3, t2(), places=0) self.assertAlmostEqual(4, t3(), places=0) self.assertAlmostEqual(5, t5(), places=0) def test_heartbeat(self): lock = rabbit_driver.ConnectionLock() t1 = self._thread(lock, 1, heartbeat=True) t2 = self._thread(lock, 1) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t2(), places=0) class TestPollTimeoutLimit(test_utils.BaseTestCase): def test_poll_timeout_limit(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener thread = threading.Thread(target=listener.poll) thread.daemon = True thread.start() time.sleep(amqpdriver.ACK_REQUEUE_EVERY_SECONDS_MAX * 2) try: # timeout should not grow past the maximum self.assertEqual(amqpdriver.ACK_REQUEUE_EVERY_SECONDS_MAX, listener._current_timeout) finally: # gracefully stop waiting driver.send(target, {}, {'tx_id': 'test'}) thread.join() class TestMsgIdCache(test_utils.BaseTestCase): @mock.patch('kombu.message.Message.reject') def test_reply_wire_format(self, reject_mock): self.conf.oslo_messaging_rabbit.kombu_compression = None transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic', server=None, fanout=False) listener = driver.listen(target, None, None)._poll_style_listener connection, producer = _create_producer(target) self.addCleanup(connection.release) msg = { 'oslo.version': '2.0', 'oslo.message': {} } msg['oslo.message'].update({ '_msg_id': uuid.uuid4().hex, '_unique_id': uuid.uuid4().hex, '_reply_q': 'reply_' + uuid.uuid4().hex, '_timeout': None, }) msg['oslo.message'] = jsonutils.dumps(msg['oslo.message']) producer.publish(msg) received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual({}, received.message) # publish the same message a second time producer.publish(msg) received = listener.poll(timeout=1) # duplicate message is ignored self.assertEqual(len(received), 0) # we should not reject duplicate message reject_mock.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/drivers/test_pool.py0000664000175000017500000000706400000000000025002 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import uuid import fixtures import testscenarios from oslo_messaging._drivers import pool from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class PoolTestCase(test_utils.BaseTestCase): _max_size = [ ('default_size', dict(max_size=None, n_iters=4)), ('set_max_size', dict(max_size=10, n_iters=10)), ] _create_error = [ ('no_create_error', dict(create_error=False)), ('create_error', dict(create_error=True)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._max_size, cls._create_error) class TestPool(pool.Pool): def create(self, retry=None): return uuid.uuid4() class ThreadWaitWaiter(object): """A gross hack. Stub out the condition variable's wait() method and spin until it has been called by each thread. """ def __init__(self, cond, n_threads, test): self.cond = cond self.test = test self.n_threads = n_threads self.n_waits = 0 self.orig_wait = cond.wait def count_waits(**kwargs): self.n_waits += 1 self.orig_wait(**kwargs) self.test.useFixture(fixtures.MockPatchObject( self.cond, 'wait', count_waits)) def wait(self): while self.n_waits < self.n_threads: pass self.test.useFixture(fixtures.MockPatchObject( self.cond, 'wait', self.orig_wait)) def test_pool(self): kwargs = {} if self.max_size is not None: kwargs['max_size'] = self.max_size p = self.TestPool(**kwargs) if self.create_error: def create_error(retry=None): raise RuntimeError orig_create = p.create self.useFixture(fixtures.MockPatchObject( p, 'create', create_error)) self.assertRaises(RuntimeError, p.get) self.useFixture(fixtures.MockPatchObject( p, 'create', orig_create)) objs = [] for i in range(self.n_iters): objs.append(p.get()) self.assertIsInstance(objs[i], uuid.UUID) def wait_for_obj(): o = p.get() self.assertIn(o, objs) waiter = self.ThreadWaitWaiter(p._cond, self.n_iters, self) threads = [] for i in range(self.n_iters): t = threading.Thread(target=wait_for_obj) t.start() threads.append(t) waiter.wait() for o in objs: p.put(o) for t in threads: t.join() for o in objs: p.put(o) for o in p.iter_free(): self.assertIn(o, objs) objs.remove(o) self.assertEqual([], objs) PoolTestCase.generate_scenarios() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.134673 oslo.messaging-14.9.0/oslo_messaging/tests/functional/0000775000175000017500000000000000000000000023075 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/functional/__init__.py0000664000175000017500000000000000000000000025174 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.134673 oslo.messaging-14.9.0/oslo_messaging/tests/functional/notify/0000775000175000017500000000000000000000000024405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/functional/notify/__init__.py0000664000175000017500000000000000000000000026504 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/functional/notify/test_logger.py0000664000175000017500000000603000000000000027274 0ustar00zuulzuul00000000000000# Copyright 2015 NetEase Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import testscenarios import oslo_messaging from oslo_messaging.tests.functional import utils load_tests = testscenarios.load_tests_apply_scenarios class LoggingNotificationHandlerTestCase(utils.SkipIfNoTransportURL): """Test case for `oslo_messaging.LoggingNotificationHandler` Build up a logger using this handler, then test logging under messaging and messagingv2 driver. Make sure receive expected logging notifications. """ _priority = [ ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('critical', dict(priority='critical')), ] _driver = [ ('messaging', dict(driver='messaging')), ('messagingv2', dict(driver='messagingv2')), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._priority, cls._driver) def test_logging(self): # NOTE(gtt): Using different topic to make tests run in parallel topic = 'test_logging_%s_driver_%s' % (self.priority, self.driver) if self.notify_url.startswith("kafka://"): self.conf.set_override('consumer_group', str(uuid.uuid4()), group='oslo_messaging_kafka') self.config(driver=[self.driver], topics=[topic], group='oslo_messaging_notifications') listener = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, [topic])) log_notify = oslo_messaging.LoggingNotificationHandler(self.notify_url) log = logging.getLogger(topic) log.setLevel(logging.DEBUG) log.addHandler(log_notify) log_method = getattr(log, self.priority) log_method('Test logging at priority: %s' % self.priority) events = listener.get_events(timeout=15) self.assertEqual(1, len(events)) info_event = events[0] self.assertEqual(self.priority, info_event[0]) self.assertEqual('logrecord', info_event[1]) for key in ['name', 'thread', 'extra', 'process', 'funcName', 'levelno', 'processName', 'pathname', 'lineno', 'msg', 'exc_info', 'levelname']: self.assertIn(key, info_event[2]) LoggingNotificationHandlerTestCase.generate_scenarios() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/functional/test_functional.py0000664000175000017500000006027700000000000026664 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import requests import subprocess import time import uuid import concurrent.futures from oslo_config import cfg from testtools import matchers import oslo_messaging from oslo_messaging.tests.functional import utils class CallTestCase(utils.SkipIfNoTransportURL): def setUp(self): super(CallTestCase, self).setUp(conf=cfg.ConfigOpts()) if self.rpc_url.startswith("kafka://"): self.skipTest("kafka does not support RPC API") self.conf.prog = "test_prog" self.conf.project = "test_project" self.config(heartbeat_timeout_threshold=0, group='oslo_messaging_rabbit') def test_specific_server(self): group = self.useFixture(utils.RpcServerGroupFixture( self.conf, self.rpc_url) ) client = group.client(1) client.append(text='open') self.assertEqual('openstack', client.append(text='stack')) client.add(increment=2) self.assertEqual(12, client.add(increment=10)) self.assertEqual(9, client.subtract(increment=3)) self.assertEqual('openstack', group.servers[1].endpoint.sval) self.assertEqual(9, group.servers[1].endpoint.ival) for i in [0, 2]: self.assertEqual('', group.servers[i].endpoint.sval) self.assertEqual(0, group.servers[i].endpoint.ival) def test_server_in_group(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url) ) client = group.client() data = [c for c in 'abcdefghijklmn'] for i in data: client.append(text=i) for s in group.servers: self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) actual = [[c for c in s.endpoint.sval] for s in group.servers] self.assertThat(actual, utils.IsValidDistributionOf(data)) def test_different_exchanges(self): # If the different exchanges are not honoured, then the # teardown may hang unless we broadcast all control messages # to each server group1 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url, use_fanout_ctrl=True)) group2 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url, exchange="a", use_fanout_ctrl=True)) group3 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url, exchange="b", use_fanout_ctrl=True)) client1 = group1.client(1) data1 = [c for c in 'abcdefghijklmn'] for i in data1: client1.append(text=i) client2 = group2.client() data2 = [c for c in 'opqrstuvwxyz'] for i in data2: client2.append(text=i) actual1 = [[c for c in s.endpoint.sval] for s in group1.servers] self.assertThat(actual1, utils.IsValidDistributionOf(data1)) actual1 = [c for c in group1.servers[1].endpoint.sval] self.assertThat([actual1], utils.IsValidDistributionOf(data1)) for s in group1.servers: expected = len(data1) if group1.servers.index(s) == 1 else 0 self.assertEqual(expected, len(s.endpoint.sval)) self.assertEqual(0, s.endpoint.ival) actual2 = [[c for c in s.endpoint.sval] for s in group2.servers] for s in group2.servers: self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) self.assertEqual(0, s.endpoint.ival) self.assertThat(actual2, utils.IsValidDistributionOf(data2)) for s in group3.servers: self.assertEqual(0, len(s.endpoint.sval)) self.assertEqual(0, s.endpoint.ival) def test_timeout(self): transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.rpc_url) ) target = oslo_messaging.Target(topic="no_such_topic") c = utils.ClientStub(transport.transport, target, timeout=1) self.assertThat(c.ping, matchers.raises(oslo_messaging.MessagingTimeout)) def test_exception(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url) ) client = group.client(1) client.add(increment=2) self.assertRaises(ValueError, client.subtract, increment=3) def test_timeout_with_concurrently_queues(self): transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.rpc_url) ) target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), server="server_" + str(uuid.uuid4())) server = self.useFixture( utils.RpcServerFixture(self.conf, self.rpc_url, target, executor="threading")) client = utils.ClientStub(transport.transport, target, cast=False, timeout=5) def short_periodical_tasks(): for i in range(10): client.add(increment=1) time.sleep(1) with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: future = executor.submit(client.long_running_task, seconds=10) executor.submit(short_periodical_tasks) self.assertRaises(oslo_messaging.MessagingTimeout, future.result) self.assertEqual(10, server.endpoint.ival) def test_mandatory_call(self): if not self.rpc_url.startswith("rabbit://"): self.skipTest("backend does not support call monitoring") transport = self.useFixture(utils.RPCTransportFixture(self.conf, self.rpc_url)) target = oslo_messaging.Target(topic='topic_' + str(uuid.uuid4()), server='server_' + str(uuid.uuid4())) # test for mandatory flag using transport-options, see: # https://blueprints.launchpad.net/oslo.messaging/+spec/transport-options # first test with `at_least_once=False` raises a "MessagingTimeout" # error since there is no control if the queue actually exists. # (Default behavior) options = oslo_messaging.TransportOptions(at_least_once=False) client1 = utils.ClientStub(transport.transport, target, cast=False, timeout=1, transport_options=options) self.assertRaises(oslo_messaging.MessagingTimeout, client1.delay) # second test with `at_least_once=True` raises a "MessageUndeliverable" # caused by mandatory flag. # the MessageUndeliverable error is raised immediately without waiting # any timeout options2 = oslo_messaging.TransportOptions(at_least_once=True) client2 = utils.ClientStub(transport.transport, target, cast=False, timeout=60, transport_options=options2) self.assertRaises(oslo_messaging.MessageUndeliverable, client2.delay) def test_monitor_long_call(self): if not (self.rpc_url.startswith("rabbit://") or self.rpc_url.startswith("amqp://")): self.skipTest("backend does not support call monitoring") transport = self.useFixture(utils.RPCTransportFixture(self.conf, self.rpc_url)) target = oslo_messaging.Target(topic='topic_' + str(uuid.uuid4()), server='server_' + str(uuid.uuid4())) class _endpoint(object): def delay(self, ctxt, seconds): time.sleep(seconds) return seconds self.useFixture( utils.RpcServerFixture(self.conf, self.rpc_url, target, executor='threading', endpoint=_endpoint())) # First case, no monitoring, ensure we timeout normally when the # server side runs long client1 = utils.ClientStub(transport.transport, target, cast=False, timeout=1) self.assertRaises(oslo_messaging.MessagingTimeout, client1.delay, seconds=4) # Second case, set a short call monitor timeout and a very # long overall timeout. If we didn't honor the call monitor # timeout, we would wait an hour, past the test timeout. If # the server was not sending message heartbeats, we'd time out # after two seconds. client2 = utils.ClientStub(transport.transport, target, cast=False, timeout=3600, call_monitor_timeout=2) self.assertEqual(4, client2.delay(seconds=4)) def test_endpoint_version_namespace(self): # verify endpoint version and namespace are checked target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), server="server_" + str(uuid.uuid4()), namespace="Name1", version="7.5") class _endpoint(object): def __init__(self, target): self.target = target() def test(self, ctxt, echo): return echo transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.rpc_url) ) self.useFixture( utils.RpcServerFixture(self.conf, self.rpc_url, target, executor="threading", endpoint=_endpoint(target))) client1 = utils.ClientStub(transport.transport, target, cast=False, timeout=5) self.assertEqual("Hi there", client1.test(echo="Hi there")) # unsupported version target2 = target() target2.version = "7.6" client2 = utils.ClientStub(transport.transport, target2, cast=False, timeout=5) self.assertRaises(oslo_messaging.rpc.client.RemoteError, client2.test, echo="Expect failure") # no matching namespace target3 = oslo_messaging.Target(topic=target.topic, server=target.server, version=target.version, namespace="Name2") client3 = utils.ClientStub(transport.transport, target3, cast=False, timeout=5) self.assertRaises(oslo_messaging.rpc.client.RemoteError, client3.test, echo="Expect failure") def test_bad_endpoint(self): # 'target' attribute is reserved and should be of type Target class _endpoint(object): def target(self, ctxt, echo): return echo target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), server="server_" + str(uuid.uuid4())) transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.rpc_url) ) self.assertRaises(TypeError, oslo_messaging.get_rpc_server, transport=transport.transport, target=target, endpoints=[_endpoint()], executor="threading") class CastTestCase(utils.SkipIfNoTransportURL): # Note: casts return immediately, so these tests utilise a special # internal sync() cast to ensure prior casts are complete before # making the necessary assertions. def setUp(self): super(CastTestCase, self).setUp() if self.rpc_url.startswith("kafka://"): self.skipTest("kafka does not support RPC API") def test_specific_server(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url) ) client = group.client(1, cast=True) client.append(text='open') client.append(text='stack') client.add(increment=2) client.add(increment=10) time.sleep(0.3) client.sync() group.sync(1) self.assertIn(group.servers[1].endpoint.sval, ["openstack", "stackopen"]) self.assertEqual(12, group.servers[1].endpoint.ival) for i in [0, 2]: self.assertEqual('', group.servers[i].endpoint.sval) self.assertEqual(0, group.servers[i].endpoint.ival) def test_server_in_group(self): if self.rpc_url.startswith("amqp:"): self.skipTest("QPID-6307") group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url) ) client = group.client(cast=True) for i in range(20): client.add(increment=1) for i in range(len(group.servers)): # expect each server to get a sync client.sync() group.sync(server="all") total = 0 for s in group.servers: ival = s.endpoint.ival self.assertThat(ival, matchers.GreaterThan(0)) self.assertThat(ival, matchers.LessThan(20)) total += ival self.assertEqual(20, total) def test_fanout(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url) ) client = group.client('all', cast=True) client.append(text='open') client.append(text='stack') client.add(increment=2) client.add(increment=10) time.sleep(0.3) client.sync() group.sync(server='all') for s in group.servers: self.assertIn(s.endpoint.sval, ["openstack", "stackopen"]) self.assertEqual(12, s.endpoint.ival) class NotifyTestCase(utils.SkipIfNoTransportURL): # NOTE(sileht): Each test must not use the same topics # to be run in parallel # NOTE(ansmith): kafka partition assignment delay requires # longer timeouts for test completion def test_simple(self): get_timeout = 1 if self.notify_url.startswith("kafka://"): get_timeout = 5 self.conf.set_override('consumer_group', 'test_simple', group='oslo_messaging_kafka') listener = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, ['test_simple'])) notifier = listener.notifier('abc') notifier.info({}, 'test', 'Hello World!') event = listener.events.get(timeout=get_timeout) self.assertEqual('info', event[0]) self.assertEqual('test', event[1]) self.assertEqual('Hello World!', event[2]) self.assertEqual('abc', event[3]) def test_multiple_topics(self): get_timeout = 1 if self.notify_url.startswith("kafka://"): get_timeout = 5 self.conf.set_override('consumer_group', 'test_multiple_topics', group='oslo_messaging_kafka') listener = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, ['a', 'b'])) a = listener.notifier('pub-a', topics=['a']) b = listener.notifier('pub-b', topics=['b']) sent = { 'pub-a': [a, 'test-a', 'payload-a'], 'pub-b': [b, 'test-b', 'payload-b'] } for e in sent.values(): e[0].info({}, e[1], e[2]) received = {} while len(received) < len(sent): e = listener.events.get(timeout=get_timeout) received[e[3]] = e for key in received: actual = received[key] expected = sent[key] self.assertEqual('info', actual[0]) self.assertEqual(expected[1], actual[1]) self.assertEqual(expected[2], actual[2]) def test_multiple_servers(self): timeout = 0.5 if self.notify_url.startswith("amqp:"): self.skipTest("QPID-6307") if self.notify_url.startswith("kafka://"): self.skipTest("Kafka: needs to be fixed") timeout = 5 self.conf.set_override('consumer_group', 'test_multiple_servers', group='oslo_messaging_kafka') listener_a = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, ['test-topic'])) listener_b = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, ['test-topic'])) n = listener_a.notifier('pub') events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh'] for event_type, payload in events_out: n.info({}, event_type, payload) events_in = [[(e[1], e[2]) for e in listener_a.get_events(timeout)], [(e[1], e[2]) for e in listener_b.get_events(timeout)]] self.assertThat(events_in, utils.IsValidDistributionOf(events_out)) for stream in events_in: self.assertThat(len(stream), matchers.GreaterThan(0)) def test_independent_topics(self): get_timeout = 0.5 if self.notify_url.startswith("kafka://"): get_timeout = 5 self.conf.set_override('consumer_group', 'test_independent_topics_a', group='oslo_messaging_kafka') listener_a = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, ['1'])) if self.notify_url.startswith("kafka://"): self.conf.set_override('consumer_group', 'test_independent_topics_b', group='oslo_messaging_kafka') listener_b = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, ['2'])) a = listener_a.notifier('pub-1', topics=['1']) b = listener_b.notifier('pub-2', topics=['2']) a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh'] for event_type, payload in a_out: a.info({}, event_type, payload) b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop'] for event_type, payload in b_out: b.info({}, event_type, payload) def check_received(listener, publisher, messages): actuals = sorted([listener.events.get(timeout=get_timeout) for __ in range(len(a_out))]) expected = sorted([['info', m[0], m[1], publisher] for m in messages]) self.assertEqual(expected, actuals) check_received(listener_a, "pub-1", a_out) check_received(listener_b, "pub-2", b_out) def test_all_categories(self): get_timeout = 1 if self.notify_url.startswith("kafka://"): get_timeout = 5 self.conf.set_override('consumer_group', 'test_all_categories', group='oslo_messaging_kafka') listener = self.useFixture(utils.NotificationFixture( self.conf, self.notify_url, ['test_all_categories'])) n = listener.notifier('abc') cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical'] events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats] for e in events: e[0]({}, e[2], e[3]) # order between events with different categories is not guaranteed received = {} for expected in events: e = listener.events.get(timeout=get_timeout) received[e[0]] = e for expected in events: actual = received[expected[1]] self.assertEqual(expected[1], actual[0]) self.assertEqual(expected[2], actual[1]) self.assertEqual(expected[3], actual[2]) def test_simple_batch(self): get_timeout = 3 batch_timeout = 2 if self.notify_url.startswith("amqp:"): backend = os.environ.get("AMQP1_BACKEND") if backend == "qdrouterd": # end-to-end acknowledgement with router intermediary # sender pends until batch_size or timeout reached self.skipTest("qdrouterd backend") if self.notify_url.startswith("kafka://"): get_timeout = 10 batch_timeout = 5 self.conf.set_override('consumer_group', 'test_simple_batch', group='oslo_messaging_kafka') listener = self.useFixture( utils.BatchNotificationFixture(self.conf, self.notify_url, ['test_simple_batch'], batch_size=100, batch_timeout=batch_timeout)) notifier = listener.notifier('abc') for i in range(0, 205): notifier.info({}, 'test%s' % i, 'Hello World!') events = listener.get_events(timeout=get_timeout) self.assertEqual(3, len(events)) self.assertEqual(100, len(events[0][1])) self.assertEqual(100, len(events[1][1])) self.assertEqual(5, len(events[2][1])) def test_compression(self): get_timeout = 1 if self.notify_url.startswith("amqp:"): self.conf.set_override('kombu_compression', 'gzip', group='oslo_messaging_rabbit') if self.notify_url.startswith("kafka://"): get_timeout = 5 self.conf.set_override('compression_codec', 'gzip', group='oslo_messaging_kafka') self.conf.set_override('consumer_group', 'test_compression', group='oslo_messaging_kafka') listener = self.useFixture( utils.NotificationFixture(self.conf, self.notify_url, ['test_compression'])) notifier = listener.notifier('abc') notifier.info({}, 'test', 'Hello World!') event = listener.events.get(timeout=get_timeout) self.assertEqual('info', event[0]) self.assertEqual('test', event[1]) self.assertEqual('Hello World!', event[2]) self.assertEqual('abc', event[3]) class MetricsTestCase(utils.SkipIfNoTransportURL): def setUp(self): super(MetricsTestCase, self).setUp(conf=cfg.ConfigOpts()) if self.rpc_url.startswith("kafka://"): self.skipTest("kafka does not support RPC API") self.config(metrics_enabled=True, group='oslo_messaging_metrics') def test_functional(self): # verify call metrics is sent and reflected in oslo.metrics self.config(metrics_socket_file='/var/tmp/metrics_collector.sock', group='oslo_messaging_metrics') metric_server = subprocess.Popen(["python3", "-m", "oslo_metrics"]) time.sleep(1) group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.rpc_url)) client = group.client(1) client.add(increment=1) time.sleep(1) r = requests.get('http://localhost:3000', timeout=10) for line in r.text.split('\n'): if 'client_invocation_start_total{' in line: self.assertEqual('1.0', line[-3:]) elif 'client_invocation_end_total{' in line: self.assertEqual('1.0', line[-3:]) elif 'client_processing_seconds_count{' in line: self.assertEqual('1.0', line[-3:]) metric_server.terminate() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/functional/test_rabbitmq.py0000664000175000017500000001176100000000000026315 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import time import fixtures from pifpaf.drivers import rabbitmq from oslo_messaging.tests.functional import utils from oslo_messaging.tests import utils as test_utils class ConnectedPortMatcher(object): def __init__(self, port): self.port = port def __eq__(self, data): return data.get("port") == self.port def __repr__(self): return "" % self.port class RabbitMQFailoverTests(test_utils.BaseTestCase): DRIVERS = [ "rabbit", ] def test_failover_scenario(self): self._test_failover_scenario() def test_failover_scenario_enable_cancel_on_failover(self): self._test_failover_scenario(enable_cancel_on_failover=True) def _test_failover_scenario(self, enable_cancel_on_failover=False): # NOTE(sileht): run this test only if functional suite run of a driver # that use rabbitmq as backend self.driver = os.environ.get('TRANSPORT_DRIVER') if self.driver not in self.DRIVERS: self.skipTest("TRANSPORT_DRIVER is not set to a rabbit driver") # NOTE(sileht): Allow only one response at a time, to # have only one tcp connection for reply and ensure it will failover # correctly self.config(heartbeat_timeout_threshold=1, rpc_conn_pool_size=1, kombu_reconnect_delay=0, rabbit_retry_interval=0, rabbit_retry_backoff=0, enable_cancel_on_failover=enable_cancel_on_failover, group='oslo_messaging_rabbit') self.pifpaf = self.useFixture(rabbitmq.RabbitMQDriver(cluster=True, port=5692)) self.url = self.pifpaf.env["PIFPAF_URL"] self.n1 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME1"] self.n2 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME2"] self.n3 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME3"] # ensure connections will be establish to the first node self.pifpaf.stop_node(self.n2) self.pifpaf.stop_node(self.n3) self.servers = self.useFixture(utils.RpcServerGroupFixture( self.conf, self.url, endpoint=self, names=["server"])) # Don't randomize rabbit hosts self.useFixture(fixtures.MockPatch( 'oslo_messaging._drivers.impl_rabbit.random', side_effect=lambda x: x)) # NOTE(sileht): this connects server connections and reply # connection to nodename n1 self.client = self.servers.client(0) self.client.ping() self._check_ports(self.pifpaf.port) # Switch to node n2 self.pifpaf.start_node(self.n2) self.assertEqual("callback done", self.client.kill_and_process()) self.assertEqual("callback done", self.client.just_process()) self._check_ports(self.pifpaf.get_port(self.n2)) # Switch to node n3 self.pifpaf.start_node(self.n3) time.sleep(0.1) self.pifpaf.kill_node(self.n2, signal=signal.SIGKILL) time.sleep(0.1) self.assertEqual("callback done", self.client.just_process()) self._check_ports(self.pifpaf.get_port(self.n3)) self.pifpaf.start_node(self.n1) time.sleep(0.1) self.pifpaf.kill_node(self.n3, signal=signal.SIGKILL) time.sleep(0.1) self.assertEqual("callback done", self.client.just_process()) self._check_ports(self.pifpaf.get_port(self.n1)) def kill_and_process(self, *args, **kargs): self.pifpaf.kill_node(self.n1, signal=signal.SIGKILL) time.sleep(0.1) return "callback done" def just_process(self, *args, **kargs): return "callback done" def _check_ports(self, port): rpc_server = self.servers.servers[0].server connection_contexts = [ # rpc server rpc_server.listener._poll_style_listener.conn, # rpc client self.client.client.transport._driver._get_connection(), # rpc client replies waiter self.client.client.transport._driver._reply_q_conn, ] ports = [cctxt.connection.channel.connection.sock.getpeername()[1] for cctxt in connection_contexts] self.assertEqual([port] * len(ports), ports, "expected: %s, rpc-server: %s, rpc-client: %s, " "rpc-replies: %s" % tuple([port] + ports)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/functional/utils.py0000664000175000017500000003444200000000000024616 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import queue import time import uuid import fixtures from oslo_config import cfg import oslo_messaging from oslo_messaging._drivers.kafka_driver import kafka_options from oslo_messaging.notify import notifier from oslo_messaging.tests import utils as test_utils class TestServerEndpoint(object): """This MessagingServer that will be used during functional testing.""" def __init__(self): self.ival = 0 self.sval = '' def add(self, ctxt, increment): self.ival += increment return self.ival def subtract(self, ctxt, increment): if self.ival < increment: raise ValueError("ival can't go negative!") self.ival -= increment return self.ival def append(self, ctxt, text): self.sval += text return self.sval def long_running_task(self, ctxt, seconds): time.sleep(seconds) class TransportFixture(fixtures.Fixture): """Fixture defined to setup the oslo_messaging transport.""" def __init__(self, conf, url): self.conf = conf self.url = url def setUp(self): super(TransportFixture, self).setUp() self.transport = oslo_messaging.get_transport(self.conf, url=self.url) def cleanUp(self): try: self.transport.cleanup() except fixtures.TimeoutException: pass super(TransportFixture, self).cleanUp() def wait(self): # allow time for the server to connect to the broker time.sleep(0.5) class RPCTransportFixture(TransportFixture): """Fixture defined to setup RPC transport.""" def setUp(self): super(RPCTransportFixture, self).setUp() self.transport = oslo_messaging.get_rpc_transport(self.conf, url=self.url) class NotificationTransportFixture(TransportFixture): """Fixture defined to setup notification transport.""" def setUp(self): super(NotificationTransportFixture, self).setUp() self.transport = oslo_messaging.get_notification_transport( self.conf, url=self.url) class RpcServerFixture(fixtures.Fixture): """Fixture to setup the TestServerEndpoint.""" def __init__(self, conf, url, target, endpoint=None, ctrl_target=None, executor='eventlet'): super(RpcServerFixture, self).__init__() self.conf = conf self.url = url self.target = target self.endpoint = endpoint or TestServerEndpoint() self.executor = executor self.syncq = queue.Queue() self.ctrl_target = ctrl_target or self.target def setUp(self): super(RpcServerFixture, self).setUp() endpoints = [self.endpoint, self] transport = self.useFixture(RPCTransportFixture(self.conf, self.url)) self.server = oslo_messaging.get_rpc_server( transport=transport.transport, target=self.target, endpoints=endpoints, executor=self.executor) self._ctrl = oslo_messaging.get_rpc_client(transport.transport, self.ctrl_target) self._start() transport.wait() def cleanUp(self): self._stop() super(RpcServerFixture, self).cleanUp() def _start(self): self.thread = test_utils.ServerThreadHelper(self.server) self.thread.start() def _stop(self): self.thread.stop() self.thread.join(timeout=30) if self.thread.is_alive(): raise Exception("Server did not shutdown correctly") def ping(self, ctxt): pass def sync(self, ctxt): self.syncq.put('x') class RpcServerGroupFixture(fixtures.Fixture): def __init__(self, conf, url, topic=None, names=None, exchange=None, use_fanout_ctrl=False, endpoint=None): self.conf = conf self.url = url # NOTE(sileht): topic and server_name must be unique # to be able to run all tests in parallel self.topic = topic or str(uuid.uuid4()) self.names = names or ["server_%i_%s" % (i, str(uuid.uuid4())[:8]) for i in range(3)] self.exchange = exchange self.targets = [self._target(server=n) for n in self.names] self.use_fanout_ctrl = use_fanout_ctrl self.endpoint = endpoint def setUp(self): super(RpcServerGroupFixture, self).setUp() self.servers = [self.useFixture(self._server(t)) for t in self.targets] def _target(self, server=None, fanout=False): t = oslo_messaging.Target(exchange=self.exchange, topic=self.topic) t.server = server t.fanout = fanout return t def _server(self, target): ctrl = None if self.use_fanout_ctrl: ctrl = self._target(fanout=True) server = RpcServerFixture(self.conf, self.url, target, endpoint=self.endpoint, ctrl_target=ctrl) return server def client(self, server=None, cast=False): if server is None: target = self._target() else: if server == 'all': target = self._target(fanout=True) elif 0 <= server < len(self.targets): target = self.targets[server] else: raise ValueError("Invalid value for server: %r" % server) transport = self.useFixture(RPCTransportFixture(self.conf, self.url)) client = ClientStub(transport.transport, target, cast=cast, timeout=5) transport.wait() return client def sync(self, server=None): if server is None: for i in range(len(self.servers)): self.client(i).ping() else: if server == 'all': for s in self.servers: s.syncq.get(timeout=5) elif 0 <= server < len(self.targets): self.servers[server].syncq.get(timeout=5) else: raise ValueError("Invalid value for server: %r" % server) class RpcCall(object): def __init__(self, client, method, context): self.client = client self.method = method self.context = context def __call__(self, **kwargs): self.context['time'] = time.ctime() self.context['cast'] = False result = self.client.call(self.context, self.method, **kwargs) return result class RpcCast(RpcCall): def __call__(self, **kwargs): self.context['time'] = time.ctime() self.context['cast'] = True self.client.cast(self.context, self.method, **kwargs) class ClientStub(object): def __init__(self, transport, target, cast=False, name=None, transport_options=None, **kwargs): self.name = name or "functional-tests" self.cast = cast self.client = oslo_messaging.get_rpc_client( transport=transport, target=target, transport_options=transport_options, **kwargs) def __getattr__(self, name): context = {"application": self.name} if self.cast: return RpcCast(self.client, name, context) else: return RpcCall(self.client, name, context) class InvalidDistribution(object): def __init__(self, original, received): self.original = original self.received = received self.missing = [] self.extra = [] self.wrong_order = [] def describe(self): text = "Sent %s, got %s; " % (self.original, self.received) e1 = ["%r was missing" % m for m in self.missing] e2 = ["%r was not expected" % m for m in self.extra] e3 = ["%r expected before %r" % (m[0], m[1]) for m in self.wrong_order] return text + ", ".join(e1 + e2 + e3) def __len__(self): return len(self.extra) + len(self.missing) + len(self.wrong_order) def get_details(self): return {} class IsValidDistributionOf(object): """Test whether a given list can be split into particular sub-lists. All items in the original list must be in exactly one sub-list, and must appear in that sub-list in the same order with respect to any other items as in the original list. """ def __init__(self, original): self.original = original def __str__(self): return 'IsValidDistribution(%s)' % self.original def match(self, actual): errors = InvalidDistribution(self.original, actual) received = [[idx for idx in act] for act in actual] def _remove(obj, lists): for li in lists: if obj in li: front = li[0] li.remove(obj) return front return None for item in self.original: o = _remove(item, received) if not o: errors.missing += item elif item != o: errors.wrong_order.append([item, o]) for li in received: errors.extra += li return errors or None class SkipIfNoTransportURL(test_utils.BaseTestCase): def setUp(self, conf=cfg.CONF): super(SkipIfNoTransportURL, self).setUp(conf=conf) self.rpc_url = os.environ.get('RPC_TRANSPORT_URL') self.notify_url = os.environ.get('NOTIFY_TRANSPORT_URL') if not (self.rpc_url or self.notify_url): self.skipTest("No transport url configured") transport_url = oslo_messaging.TransportURL.parse(conf, self.notify_url) kafka_options.register_opts(conf, transport_url) class NotificationFixture(fixtures.Fixture): def __init__(self, conf, url, topics, batch=None): super(NotificationFixture, self).__init__() self.conf = conf self.url = url self.topics = topics self.events = queue.Queue() self.name = str(id(self)) self.batch = batch def setUp(self): super(NotificationFixture, self).setUp() targets = [oslo_messaging.Target(topic=t) for t in self.topics] # add a special topic for internal notifications targets.append(oslo_messaging.Target(topic=self.name)) transport = self.useFixture(NotificationTransportFixture(self.conf, self.url)) self.server = self._get_server(transport, targets) self._ctrl = self.notifier('internal', topics=[self.name]) self._start() transport.wait() def cleanUp(self): self._stop() super(NotificationFixture, self).cleanUp() def _get_server(self, transport, targets): return oslo_messaging.get_notification_listener( transport.transport, targets, [self], 'eventlet') def _start(self): self.thread = test_utils.ServerThreadHelper(self.server) self.thread.start() def _stop(self): self.thread.stop() self.thread.join(timeout=30) if self.thread.is_alive(): raise Exception("Server did not shutdown properly") def notifier(self, publisher, topics=None): transport = self.useFixture(NotificationTransportFixture(self.conf, self.url)) n = notifier.Notifier(transport.transport, publisher, driver='messaging', topics=topics or self.topics) transport.wait() return n def debug(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['debug', event_type, payload, publisher]) def audit(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['audit', event_type, payload, publisher]) def info(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['info', event_type, payload, publisher]) def warn(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['warn', event_type, payload, publisher]) def error(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['error', event_type, payload, publisher]) def critical(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['critical', event_type, payload, publisher]) def sample(self, ctxt, publisher, event_type, payload, metadata): pass # Just used for internal shutdown control def get_events(self, timeout=0.5): results = [] try: while True: results.append(self.events.get(timeout=timeout)) except queue.Empty: pass return results class BatchNotificationFixture(NotificationFixture): def __init__(self, conf, url, topics, batch_size=5, batch_timeout=2): super(BatchNotificationFixture, self).__init__(conf, url, topics) self.batch_size = batch_size self.batch_timeout = batch_timeout def _get_server(self, transport, targets): return oslo_messaging.get_batch_notification_listener( transport.transport, targets, [self], 'eventlet', batch_timeout=self.batch_timeout, batch_size=self.batch_size) def debug(self, messages): self.events.put(['debug', messages]) def audit(self, messages): self.events.put(['audit', messages]) def info(self, messages): self.events.put(['info', messages]) def warn(self, messages): self.events.put(['warn', messages]) def error(self, messages): self.events.put(['error', messages]) def critical(self, messages): self.events.put(['critical', messages]) def sample(self, messages): pass # Just used for internal shutdown control ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1386733 oslo.messaging-14.9.0/oslo_messaging/tests/notify/0000775000175000017500000000000000000000000022243 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/notify/__init__.py0000664000175000017500000000000000000000000024342 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/notify/test_dispatcher.py0000664000175000017500000002277200000000000026014 0ustar00zuulzuul00000000000000 # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import testscenarios import oslo_messaging from oslo_messaging.notify import dispatcher as notify_dispatcher from oslo_messaging.tests import utils as test_utils from unittest import mock load_tests = testscenarios.load_tests_apply_scenarios notification_msg = dict( publisher_id="publisher_id", event_type="compute.start", payload={"info": "fuu"}, message_id="uuid", timestamp=str(timeutils.utcnow()) ) class TestDispatcher(test_utils.BaseTestCase): scenarios = [ ('no_endpoints', dict(endpoints=[], endpoints_expect_calls=[], priority='info', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('one_endpoints', dict(endpoints=[['warn']], endpoints_expect_calls=['warn'], priority='warn', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('two_endpoints_only_one_match', dict(endpoints=[['warn'], ['info']], endpoints_expect_calls=[None, 'info'], priority='info', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('two_endpoints_both_match', dict(endpoints=[['debug', 'info'], ['info', 'debug']], endpoints_expect_calls=['debug', 'debug'], priority='debug', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('no_return_value', dict(endpoints=[['warn']], endpoints_expect_calls=['warn'], priority='warn', ex=None, return_value=None)), ('requeue', dict(endpoints=[['debug', 'warn']], endpoints_expect_calls=['debug'], priority='debug', msg=notification_msg, ex=None, return_value=oslo_messaging.NotificationResult.REQUEUE)), ('exception', dict(endpoints=[['debug', 'warn']], endpoints_expect_calls=['debug'], priority='debug', msg=notification_msg, ex=Exception, return_value=oslo_messaging.NotificationResult.HANDLED)), ] def test_dispatcher(self): endpoints = [] for endpoint_methods in self.endpoints: e = mock.Mock(spec=endpoint_methods) endpoints.append(e) for m in endpoint_methods: method = getattr(e, m) if self.ex: method.side_effect = self.ex() else: method.return_value = self.return_value msg = notification_msg.copy() msg['priority'] = self.priority dispatcher = notify_dispatcher.NotificationDispatcher(endpoints, None) incoming = mock.Mock(ctxt={}, message=msg) res = dispatcher.dispatch(incoming) expected_res = ( notify_dispatcher.NotificationResult.REQUEUE if (self.return_value == notify_dispatcher.NotificationResult.REQUEUE or self.ex is not None) else notify_dispatcher.NotificationResult.HANDLED ) self.assertEqual(expected_res, res) # check endpoint callbacks are called or not for i, endpoint_methods in enumerate(self.endpoints): for m in endpoint_methods: if m == self.endpoints_expect_calls[i]: method = getattr(endpoints[i], m) method.assert_called_once_with( {}, msg['publisher_id'], msg['event_type'], msg['payload'], { 'timestamp': mock.ANY, 'message_id': mock.ANY }) else: self.assertEqual(0, endpoints[i].call_count) @mock.patch('oslo_messaging.notify.dispatcher.LOG') def test_dispatcher_unknown_prio(self, mylog): msg = notification_msg.copy() msg['priority'] = 'what???' dispatcher = notify_dispatcher.NotificationDispatcher( [mock.Mock()], None) res = dispatcher.dispatch(mock.Mock(ctxt={}, message=msg)) self.assertIsNone(res) mylog.warning.assert_called_once_with('Unknown priority "%s"', 'what???') class TestDispatcherFilter(test_utils.BaseTestCase): scenarios = [ ('publisher_id_match', dict(filter_rule=dict(publisher_id='^compute.*'), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('publisher_id_nomatch', dict(filter_rule=dict(publisher_id='^compute.*'), publisher_id='network01.manager', event_type='instance.create.start', context={}, match=False)), ('event_type_match', dict(filter_rule=dict(event_type=r'^instance\.create'), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('event_type_nomatch', dict(filter_rule=dict(event_type=r'^instance\.delete'), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), # this is only for simulation ('event_type_not_string', dict(filter_rule=dict(event_type=r'^instance\.delete'), publisher_id='compute01.manager', event_type=['instance.swim', 'instance.fly'], context={}, match=False)), ('context_match', dict(filter_rule=dict(context={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={'user': 'admin'}, match=True)), ('context_key_missing', dict(filter_rule=dict(context={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={'project': 'admin'}, metadata={}, match=False)), ('metadata_match', dict(filter_rule=dict(metadata={'message_id': '^99'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('metadata_key_missing', dict(filter_rule=dict(metadata={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('payload_match', dict(filter_rule=dict(payload={'state': '^active$'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('payload_no_match', dict(filter_rule=dict(payload={'state': '^deleted$'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('payload_key_missing', dict(filter_rule=dict(payload={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('payload_value_none', dict(filter_rule=dict(payload={'virtual_size': '2048'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('mix_match', dict(filter_rule=dict(event_type=r'^instance\.create', publisher_id='^compute', context={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={'user': 'admin'}, match=True)), ] def test_filters(self): notification_filter = oslo_messaging.NotificationFilter( **self.filter_rule) endpoint = mock.Mock(spec=['info'], filter_rule=notification_filter) dispatcher = notify_dispatcher.NotificationDispatcher( [endpoint], serializer=None) message = {'payload': {'state': 'active', 'virtual_size': None}, 'priority': 'info', 'publisher_id': self.publisher_id, 'event_type': self.event_type, 'timestamp': '2014-03-03 18:21:04.369234', 'message_id': '99863dda-97f0-443a-a0c1-6ed317b7fd45'} incoming = mock.Mock(ctxt=self.context, message=message) dispatcher.dispatch(incoming) if self.match: self.assertEqual(1, endpoint.info.call_count) else: self.assertEqual(0, endpoint.info.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/notify/test_listener.py0000664000175000017500000005151700000000000025512 0ustar00zuulzuul00000000000000 # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import threading from oslo_config import cfg import testscenarios import oslo_messaging from oslo_messaging.notify import dispatcher from oslo_messaging.notify import notifier as msg_notifier from oslo_messaging.tests import utils as test_utils from unittest import mock load_tests = testscenarios.load_tests_apply_scenarios class RestartableServerThread(object): def __init__(self, server): self.server = server self.thread = None def start(self): if self.thread is None: self.thread = test_utils.ServerThreadHelper(self.server) self.thread.start() def stop(self): if self.thread is not None: self.thread.stop() self.thread.join(timeout=15) ret = self.thread.is_alive() self.thread = None return ret return True class ListenerSetupMixin(object): class ThreadTracker(object): def __init__(self): self._received_msgs = 0 self.threads = [] self.lock = threading.Condition() def info(self, *args, **kwargs): # NOTE(sileht): this run into an other thread with self.lock: self._received_msgs += 1 self.lock.notify_all() def wait_for_messages(self, expect_messages): with self.lock: while self._received_msgs < expect_messages: self.lock.wait() def stop(self): for thread in self.threads: thread.stop() self.threads = [] def start(self, thread): self.threads.append(thread) thread.start() def setUp(self): self.trackers = {} self.addCleanup(self._stop_trackers) def _stop_trackers(self): for pool in self.trackers: self.trackers[pool].stop() self.trackers = {} def _setup_listener(self, transport, endpoints, targets=None, pool=None, batch=False): if pool is None: tracker_name = '__default__' else: tracker_name = pool if targets is None: targets = [oslo_messaging.Target(topic='testtopic')] tracker = self.trackers.setdefault( tracker_name, self.ThreadTracker()) if batch: listener = oslo_messaging.get_batch_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet', batch_size=batch[0], batch_timeout=batch[1]) else: listener = oslo_messaging.get_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet') thread = RestartableServerThread(listener) tracker.start(thread) return thread def wait_for_messages(self, expect_messages, tracker_name='__default__'): self.trackers[tracker_name].wait_for_messages(expect_messages) def _setup_notifier(self, transport, topics=['testtopic'], publisher_id='testpublisher'): return oslo_messaging.Notifier(transport, topics=topics, driver='messaging', publisher_id=publisher_id) class TestNotifyListener(test_utils.BaseTestCase, ListenerSetupMixin): def __init__(self, *args): super(TestNotifyListener, self).__init__(*args) ListenerSetupMixin.__init__(self) def setUp(self): super(TestNotifyListener, self).setUp(conf=cfg.ConfigOpts()) ListenerSetupMixin.setUp(self) self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging._drivers.impl_fake.FakeExchangeManager._exchanges', new_value={})) def test_constructor(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') target = oslo_messaging.Target(topic='foo') endpoints = [object()] listener = oslo_messaging.get_notification_listener( transport, [target], endpoints, executor='threading') self.assertIs(listener.conf, self.conf) self.assertIs(listener.transport, transport) self.assertIsInstance(listener.dispatcher, dispatcher.NotificationDispatcher) self.assertIs(listener.dispatcher.endpoints, endpoints) self.assertEqual('threading', listener.executor_type) def test_no_target_topic(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') listener = oslo_messaging.get_notification_listener( transport, [oslo_messaging.Target()], [mock.Mock()]) try: listener.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) else: self.assertTrue(False) def test_unknown_executor(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') try: oslo_messaging.get_notification_listener(transport, [], [], executor='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) self.assertEqual('foo', ex.executor) else: self.assertTrue(False) def test_batch_timeout(self): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint], batch=(5, 1)) notifier = self._setup_notifier(transport) cxt = test_utils.TestContext() for _ in range(12): notifier.info(cxt, 'an_event.start', 'test message') self.wait_for_messages(3) self.assertFalse(listener_thread.stop()) messages = [dict(ctxt=cxt, publisher_id='testpublisher', event_type='an_event.start', payload='test message', metadata={'message_id': mock.ANY, 'timestamp': mock.ANY})] endpoint.info.assert_has_calls([mock.call(messages * 5), mock.call(messages * 5), mock.call(messages * 2)]) def test_batch_size(self): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint], batch=(5, None)) notifier = self._setup_notifier(transport) ctxt = test_utils.TestContext() for _ in range(10): notifier.info(ctxt, 'an_event.start', 'test message') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) messages = [dict(ctxt=ctxt, publisher_id='testpublisher', event_type='an_event.start', payload='test message', metadata={'message_id': mock.ANY, 'timestamp': mock.ANY})] endpoint.info.assert_has_calls([mock.call(messages * 5), mock.call(messages * 5)]) def test_batch_size_exception_path(self): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.side_effect = [None, Exception('boom!')] listener_thread = self._setup_listener(transport, [endpoint], batch=(5, None)) notifier = self._setup_notifier(transport) ctxt = test_utils.TestContext() for _ in range(10): notifier.info(ctxt, 'an_event.start', 'test message') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) messages = [dict(ctxt=ctxt, publisher_id='testpublisher', event_type='an_event.start', payload='test message', metadata={'message_id': mock.ANY, 'timestamp': mock.ANY})] endpoint.info.assert_has_calls([mock.call(messages * 5)]) def test_one_topic(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint]) notifier = self._setup_notifier(transport) cxt = test_utils.TestContext() notifier.info(cxt, 'an_event.start', 'test message') self.wait_for_messages(1) self.assertFalse(listener_thread.stop()) endpoint.info.assert_called_once_with( cxt, 'testpublisher', 'an_event.start', 'test message', {'message_id': mock.ANY, 'timestamp': mock.ANY}) def test_two_topics(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None targets = [oslo_messaging.Target(topic="topic1"), oslo_messaging.Target(topic="topic2")] listener_thread = self._setup_listener(transport, [endpoint], targets=targets) notifier = self._setup_notifier(transport, topics=['topic1']) cxt1 = test_utils.TestContext(user_name='bob') notifier.info(cxt1, 'an_event.start1', 'test') notifier = self._setup_notifier(transport, topics=['topic2']) cxt2 = test_utils.TestContext(user_name='bob2') notifier.info(cxt2, 'an_event.start2', 'test') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls([ mock.call(cxt1, 'testpublisher', 'an_event.start1', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call(cxt2, 'testpublisher', 'an_event.start2', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY})], any_order=True) def test_two_exchanges(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None targets = [oslo_messaging.Target(topic="topic", exchange="exchange1"), oslo_messaging.Target(topic="topic", exchange="exchange2")] listener_thread = self._setup_listener(transport, [endpoint], targets=targets) notifier = self._setup_notifier(transport, topics=["topic"]) def mock_notifier_exchange(name): def side_effect(target, ctxt, message, version, retry): target.exchange = name return transport._driver.send_notification(target, ctxt, message, version, retry=retry) transport._send_notification = mock.MagicMock( side_effect=side_effect) notifier.info(test_utils.TestContext(user_name='bob0'), 'an_event.start', 'test message default exchange') mock_notifier_exchange('exchange1') ctxt1 = test_utils.TestContext(user_name='bob1') notifier.info(ctxt1, 'an_event.start', 'test message exchange1') mock_notifier_exchange('exchange2') ctxt2 = test_utils.TestContext(user_name='bob2') notifier.info(ctxt2, 'an_event.start', 'test message exchange2') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls([ mock.call(ctxt1, 'testpublisher', 'an_event.start', 'test message exchange1', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call(ctxt2, 'testpublisher', 'an_event.start', 'test message exchange2', {'timestamp': mock.ANY, 'message_id': mock.ANY})], any_order=True) def test_two_endpoints(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = oslo_messaging.NotificationResult.HANDLED listener_thread = self._setup_listener(transport, [endpoint1, endpoint2]) notifier = self._setup_notifier(transport) cxt = test_utils.TestContext() notifier.info(cxt, 'an_event.start', 'test') self.wait_for_messages(1) self.assertFalse(listener_thread.stop()) endpoint1.info.assert_called_once_with( cxt, 'testpublisher', 'an_event.start', 'test', { 'timestamp': mock.ANY, 'message_id': mock.ANY}) endpoint2.info.assert_called_once_with( cxt, 'testpublisher', 'an_event.start', 'test', { 'timestamp': mock.ANY, 'message_id': mock.ANY}) def test_requeue(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info = mock.Mock() def side_effect_requeue(*args, **kwargs): if endpoint.info.call_count == 1: return oslo_messaging.NotificationResult.REQUEUE return oslo_messaging.NotificationResult.HANDLED endpoint.info.side_effect = side_effect_requeue listener_thread = self._setup_listener(transport, [endpoint]) notifier = self._setup_notifier(transport) cxt = test_utils.TestContext() notifier.info(cxt, 'an_event.start', 'test') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls([ mock.call(cxt, 'testpublisher', 'an_event.start', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call(cxt, 'testpublisher', 'an_event.start', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY})]) def test_two_pools(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = None targets = [oslo_messaging.Target(topic="topic")] listener1_thread = self._setup_listener(transport, [endpoint1], targets=targets, pool="pool1") listener2_thread = self._setup_listener(transport, [endpoint2], targets=targets, pool="pool2") notifier = self._setup_notifier(transport, topics=["topic"]) ctxts = [ test_utils.TestContext(user_name='bob0'), test_utils.TestContext(user_name='bob1') ] notifier.info(ctxts[0], 'an_event.start', 'test message0') notifier.info(ctxts[1], 'an_event.start', 'test message1') self.wait_for_messages(2, "pool1") self.wait_for_messages(2, "pool2") self.assertFalse(listener2_thread.stop()) self.assertFalse(listener1_thread.stop()) def mocked_endpoint_call(i, ctxts): return mock.call(ctxts[i], 'testpublisher', 'an_event.start', 'test message%d' % i, {'timestamp': mock.ANY, 'message_id': mock.ANY}) endpoint1.info.assert_has_calls([mocked_endpoint_call(0, ctxts), mocked_endpoint_call(1, ctxts)]) endpoint2.info.assert_has_calls([mocked_endpoint_call(0, ctxts), mocked_endpoint_call(1, ctxts)]) def test_two_pools_three_listener(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = None endpoint3 = mock.Mock() endpoint3.info.return_value = None targets = [oslo_messaging.Target(topic="topic")] listener1_thread = self._setup_listener(transport, [endpoint1], targets=targets, pool="pool1") listener2_thread = self._setup_listener(transport, [endpoint2], targets=targets, pool="pool2") listener3_thread = self._setup_listener(transport, [endpoint3], targets=targets, pool="pool2") def mocked_endpoint_call(i, ctxt): return mock.call(ctxt, 'testpublisher', 'an_event.start', 'test message%d' % i, {'timestamp': mock.ANY, 'message_id': mock.ANY}) notifier = self._setup_notifier(transport, topics=["topic"]) mocked_endpoint1_calls = [] for i in range(0, 25): ctxt = test_utils.TestContext(user_name='bob%d' % i) notifier.info(ctxt, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i, ctxt)) self.wait_for_messages(25, 'pool2') listener2_thread.stop() for i in range(0, 25): cxt = test_utils.TestContext(user_name='bob%d' % i) notifier.info(cxt, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i, cxt)) self.wait_for_messages(50, 'pool2') listener2_thread.start() listener3_thread.stop() for i in range(0, 25): ctxt = test_utils.TestContext(user_name='bob%d' % i) notifier.info(ctxt, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i, ctxt)) self.wait_for_messages(75, 'pool2') listener3_thread.start() for i in range(0, 25): ctxt = test_utils.TestContext(user_name='bob%d' % i) notifier.info(ctxt, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i, ctxt)) self.wait_for_messages(100, 'pool1') self.wait_for_messages(100, 'pool2') self.assertFalse(listener3_thread.stop()) self.assertFalse(listener2_thread.stop()) self.assertFalse(listener1_thread.stop()) self.assertEqual(100, endpoint1.info.call_count) endpoint1.info.assert_has_calls(mocked_endpoint1_calls) self.assertLessEqual(25, endpoint2.info.call_count) self.assertLessEqual(25, endpoint3.info.call_count) self.assertEqual(100, endpoint2.info.call_count + endpoint3.info.call_count) for call in mocked_endpoint1_calls: self.assertIn(call, endpoint2.info.mock_calls + endpoint3.info.mock_calls) class TestListenerTransportWarning(test_utils.BaseTestCase): @mock.patch('oslo_messaging.notify.listener.LOG') def test_warning_when_rpc_transport(self, log): transport = oslo_messaging.get_rpc_transport(self.conf) target = oslo_messaging.Target(topic='foo') endpoints = [object()] oslo_messaging.get_notification_listener( transport, [target], endpoints) log.warning.assert_called_once_with( "Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/notify/test_log_handler.py0000664000175000017500000000474500000000000026144 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import fixtures import oslo_messaging from oslo_messaging.notify import log_handler from oslo_messaging.tests import utils as test_utils from unittest import mock class PublishErrorsHandlerTestCase(test_utils.BaseTestCase): """Tests for log.PublishErrorsHandler""" def setUp(self): super(PublishErrorsHandlerTestCase, self).setUp() self.publisherrorshandler = (log_handler. PublishErrorsHandler(logging.ERROR)) def test_emit_cfg_log_notifier_in_notifier_drivers(self): drivers = ['messaging', 'log'] self.config(driver=drivers, group='oslo_messaging_notifications') self.stub_flg = True transport = oslo_messaging.get_notification_transport(self.conf) notifier = oslo_messaging.Notifier(transport) def fake_notifier(*args, **kwargs): self.stub_flg = False self.useFixture(fixtures.MockPatchObject( notifier, 'error', fake_notifier)) logrecord = logging.LogRecord(name='name', level='WARN', pathname='/tmp', lineno=1, msg='Message', args=None, exc_info=None) self.publisherrorshandler.emit(logrecord) self.assertTrue(self.stub_flg) @mock.patch('oslo_messaging.notify.notifier.Notifier._notify') def test_emit_notification(self, mock_notify): logrecord = logging.LogRecord(name='name', level='ERROR', pathname='/tmp', lineno=1, msg='Message', args=None, exc_info=None) self.publisherrorshandler.emit(logrecord) self.assertEqual('error.publisher', self.publisherrorshandler._notifier.publisher_id) mock_notify.assert_called_with({}, 'error_notification', {'error': 'Message'}, 'ERROR') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/notify/test_logger.py0000664000175000017500000001300200000000000025127 0ustar00zuulzuul00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import logging.config import os import sys from oslo_utils import timeutils import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils from unittest import mock load_tests = testscenarios.load_tests_apply_scenarios # Stolen from oslo.log logging.AUDIT = logging.INFO + 1 logging.addLevelName(logging.AUDIT, 'AUDIT') class TestLogNotifier(test_utils.BaseTestCase): scenarios = [ ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warning', dict(priority='warning', queue='WARN')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('critical', dict(priority='critical')), ('audit', dict(priority='audit')), ] def setUp(self): super(TestLogNotifier, self).setUp() self.addCleanup(oslo_messaging.notify._impl_test.reset) self.config(driver=['test'], group='oslo_messaging_notifications') # NOTE(jamespage) disable thread information logging for testing # as this can cause test failures when monkey_patch via eventlet logging.logThreads = 0 @mock.patch('oslo_utils.timeutils.utcnow') def test_logger(self, mock_utcnow): fake_transport = oslo_messaging.get_notification_transport(self.conf) with mock.patch('oslo_messaging.transport._get_transport', return_value=fake_transport): self.logger = oslo_messaging.LoggingNotificationHandler('test://') mock_utcnow.return_value = datetime.datetime.utcnow() levelno = getattr(logging, self.priority.upper(), 42) record = logging.LogRecord('foo', levelno, '/foo/bar', 42, 'Something happened', None, None) self.logger.emit(record) context = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][0] self.assertEqual({}, context) n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1] self.assertEqual(getattr(self, 'queue', self.priority.upper()), n['priority']) self.assertEqual('logrecord', n['event_type']) self.assertEqual(str(timeutils.utcnow()), n['timestamp']) self.assertIsNone(n['publisher_id']) self.assertEqual( {'process': os.getpid(), 'funcName': None, 'name': 'foo', 'thread': None, 'levelno': levelno, 'processName': 'MainProcess', 'pathname': '/foo/bar', 'lineno': 42, 'msg': 'Something happened', 'exc_info': None, 'levelname': logging.getLevelName(levelno), 'extra': None}, n['payload']) @mock.patch('oslo_utils.timeutils.utcnow') def test_logging_conf(self, mock_utcnow): fake_transport = oslo_messaging.get_notification_transport(self.conf) with mock.patch('oslo_messaging.transport._get_transport', return_value=fake_transport): logging.config.dictConfig({ 'version': 1, 'handlers': { 'notification': { 'class': 'oslo_messaging.LoggingNotificationHandler', 'level': self.priority.upper(), 'url': 'test://', }, }, 'loggers': { 'default': { 'handlers': ['notification'], 'level': self.priority.upper(), }, }, }) mock_utcnow.return_value = datetime.datetime.utcnow() levelno = getattr(logging, self.priority.upper()) logger = logging.getLogger('default') lineno = sys._getframe().f_lineno + 1 logger.log(levelno, 'foobar') n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1] self.assertEqual(getattr(self, 'queue', self.priority.upper()), n['priority']) self.assertEqual('logrecord', n['event_type']) self.assertEqual(str(timeutils.utcnow()), n['timestamp']) self.assertIsNone(n['publisher_id']) pathname = __file__ if pathname.endswith(('.pyc', '.pyo')): pathname = pathname[:-1] self.assertDictEqual( n['payload'], {'process': os.getpid(), 'funcName': 'test_logging_conf', 'name': 'default', 'thread': None, 'levelno': levelno, 'processName': 'MainProcess', 'pathname': pathname, 'lineno': lineno, 'msg': 'foobar', 'exc_info': None, 'levelname': logging.getLevelName(levelno), 'extra': None}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/notify/test_middleware.py0000664000175000017500000002065700000000000026003 0ustar00zuulzuul00000000000000# Copyright 2013-2014 eNovance # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import webob from oslo_messaging.notify import middleware from oslo_messaging.tests import utils from unittest import mock class FakeApp(object): def __call__(self, env, start_response): body = 'Some response' start_response('200 OK', [ ('Content-Type', 'text/plain'), ('Content-Length', str(sum(map(len, body)))) ]) return [body] class FakeFailingApp(object): def __call__(self, env, start_response): raise Exception("It happens!") class NotifierMiddlewareTest(utils.BaseTestCase): def test_notification(self): m = middleware.RequestNotifier(FakeApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_AUTH_TOKEN': uuid.uuid4()}) with mock.patch( 'oslo_messaging.notify.notifier.Notifier._notify') as notify: m(req) # Check first notification with only 'request' call_args = notify.call_args_list[0][0] self.assertEqual('http.request', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") # Check second notification with request + response call_args = notify.call_args_list[1][0] self.assertEqual('http.response', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request', 'response']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") response = call_args[2]['response'] self.assertEqual('200 OK', response['status']) self.assertEqual('13', response['headers']['content-length']) def test_notification_response_failure(self): m = middleware.RequestNotifier(FakeFailingApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_AUTH_TOKEN': uuid.uuid4()}) with mock.patch( 'oslo_messaging.notify.notifier.Notifier._notify') as notify: try: m(req) self.fail("Application exception has not been re-raised") except Exception: pass # Check first notification with only 'request' call_args = notify.call_args_list[0][0] self.assertEqual('http.request', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") # Check second notification with 'request' and 'exception' call_args = notify.call_args_list[1][0] self.assertEqual('http.response', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request', 'exception']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") exception = call_args[2]['exception'] self.assertIn('middleware.py', exception['traceback'][0]) self.assertIn('It happens!', exception['traceback'][-1]) self.assertTrue(exception['value'] in ("Exception('It happens!',)", "Exception('It happens!')")) def test_process_request_fail(self): def notify_error(context, publisher_id, event_type, priority, payload): raise Exception('error') with mock.patch('oslo_messaging.notify.notifier.Notifier._notify', notify_error): m = middleware.RequestNotifier(FakeApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET'}) m.process_request(req) def test_process_response_fail(self): def notify_error(context, publisher_id, event_type, priority, payload): raise Exception('error') with mock.patch('oslo_messaging.notify.notifier.Notifier._notify', notify_error): m = middleware.RequestNotifier(FakeApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET'}) m.process_response(req, webob.response.Response()) def test_ignore_req_opt(self): m = middleware.RequestNotifier(FakeApp(), ignore_req_list='get, PUT') req = webob.Request.blank('/skip/foo', environ={'REQUEST_METHOD': 'GET'}) req1 = webob.Request.blank('/skip/foo', environ={'REQUEST_METHOD': 'PUT'}) req2 = webob.Request.blank('/accept/foo', environ={'REQUEST_METHOD': 'POST'}) with mock.patch( 'oslo_messaging.notify.notifier.Notifier._notify') as notify: # Check GET request does not send notification m(req) m(req1) self.assertEqual(0, len(notify.call_args_list)) # Check non-GET request does send notification m(req2) self.assertEqual(2, len(notify.call_args_list)) call_args = notify.call_args_list[0][0] self.assertEqual('http.request', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/accept/foo', request['PATH_INFO']) self.assertEqual('POST', request['REQUEST_METHOD']) call_args = notify.call_args_list[1][0] self.assertEqual('http.response', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request', 'response']), set(call_args[2].keys())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/notify/test_notifier.py0000664000175000017500000006365100000000000025506 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import sys import uuid import fixtures from kombu import connection from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import timeutils from stevedore import dispatch from stevedore import extension import testscenarios import yaml import oslo_messaging from oslo_messaging.notify import _impl_log from oslo_messaging.notify import _impl_test from oslo_messaging.notify import messaging from oslo_messaging.notify import notifier as msg_notifier from oslo_messaging import serializer as msg_serializer from oslo_messaging.tests import utils as test_utils from unittest import mock load_tests = testscenarios.load_tests_apply_scenarios class JsonMessageMatcher(object): def __init__(self, message): self.message = message def __eq__(self, other): return self.message == jsonutils.loads(other) class _ReRaiseLoggedExceptionsFixture(fixtures.Fixture): """Record logged exceptions and re-raise in cleanup. The notifier just logs notification send errors so, for the sake of debugging test failures, we record any exceptions logged and re-raise them during cleanup. """ class FakeLogger(object): def __init__(self): self.exceptions = [] def exception(self, msg, *args, **kwargs): self.exceptions.append(sys.exc_info()[1]) def warning(self, msg, *args, **kwargs): return def setUp(self): super(_ReRaiseLoggedExceptionsFixture, self).setUp() self.logger = self.FakeLogger() def reraise_exceptions(): for ex in self.logger.exceptions: raise ex self.addCleanup(reraise_exceptions) class TestMessagingNotifier(test_utils.BaseTestCase): _v1 = [ ('v1', dict(v1=True)), ('not_v1', dict(v1=False)), ] _v2 = [ ('v2', dict(v2=True)), ('not_v2', dict(v2=False)), ] _publisher_id = [ ('ctor_pub_id', dict(ctor_pub_id='test', expected_pub_id='test')), ('prep_pub_id', dict(prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ('override', dict(ctor_pub_id='test', prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ] _topics = [ ('no_topics', dict(topics=[])), ('single_topic', dict(topics=['notifications'])), ('multiple_topic2', dict(topics=['foo', 'bar'])), ] _priority = [ ('audit', dict(priority='audit')), ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('sample', dict(priority='sample')), ('critical', dict(priority='critical')), ] _payload = [ ('payload', dict(payload={'foo': 'bar'})), ] _context = [ ('ctxt', dict(ctxt=test_utils.TestContext(user_name='bob'))), ] _retry = [ ('unconfigured', dict()), ('None', dict(retry=None)), ('0', dict(retry=0)), ('5', dict(retry=5)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._v1, cls._v2, cls._publisher_id, cls._topics, cls._priority, cls._payload, cls._context, cls._retry) def setUp(self): super(TestMessagingNotifier, self).setUp() self.logger = self.useFixture(_ReRaiseLoggedExceptionsFixture()).logger self.useFixture(fixtures.MockPatchObject( messaging, 'LOG', self.logger)) self.useFixture(fixtures.MockPatchObject( msg_notifier, '_LOG', self.logger)) @mock.patch('oslo_utils.timeutils.utcnow') def test_notifier(self, mock_utcnow): drivers = [] if self.v1: drivers.append('messaging') if self.v2: drivers.append('messagingv2') self.config(driver=drivers, topics=self.topics, group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') if hasattr(self, 'ctor_pub_id'): notifier = oslo_messaging.Notifier(transport, publisher_id=self.ctor_pub_id) else: notifier = oslo_messaging.Notifier(transport) prepare_kwds = {} if hasattr(self, 'retry'): prepare_kwds['retry'] = self.retry if hasattr(self, 'prep_pub_id'): prepare_kwds['publisher_id'] = self.prep_pub_id if prepare_kwds: notifier = notifier.prepare(**prepare_kwds) transport._send_notification = mock.Mock() message_id = uuid.uuid4() uuid.uuid4 = mock.Mock(return_value=message_id) mock_utcnow.return_value = datetime.datetime.utcnow() message = { 'message_id': str(message_id), 'publisher_id': self.expected_pub_id, 'event_type': 'test.notify', 'priority': self.priority.upper(), 'payload': self.payload, 'timestamp': str(timeutils.utcnow()), } sends = [] if self.v1: sends.append(dict(version=1.0)) if self.v2: sends.append(dict(version=2.0)) calls = [] for send_kwargs in sends: for topic in self.topics: if hasattr(self, 'retry'): send_kwargs['retry'] = self.retry else: send_kwargs['retry'] = -1 target = oslo_messaging.Target(topic='%s.%s' % (topic, self.priority)) calls.append(mock.call(target, self.ctxt, message, **send_kwargs)) method = getattr(notifier, self.priority) method(self.ctxt, 'test.notify', self.payload) uuid.uuid4.assert_called_once_with() transport._send_notification.assert_has_calls(calls, any_order=True) self.assertTrue(notifier.is_enabled()) TestMessagingNotifier.generate_scenarios() class TestMessagingNotifierRetry(test_utils.BaseTestCase): class TestingException(BaseException): pass def test_notifier_retry_connection_fails_rabbit(self): """This test sets a small retry number for notification sending and configures a non reachable message bus. The expectation that after the configured number of retries the driver gives up the message sending. """ self.config( driver=["messagingv2"], topics=["test-retry"], retry=2, group="oslo_messaging_notifications") self.config( # just to speed up the test execution rabbit_retry_backoff=0, group="oslo_messaging_rabbit") transport = oslo_messaging.get_notification_transport( self.conf, url='rabbit://') notifier = oslo_messaging.Notifier(transport) orig_establish_connection = connection.Connection._establish_connection calls = [] def wrapped_establish_connection(*args, **kwargs): if len(calls) > 2: raise self.TestingException( "Connection should only be retried twice due to " "configuration") else: calls.append((args, kwargs)) orig_establish_connection(*args, **kwargs) with mock.patch( 'kombu.connection.Connection._establish_connection', new=wrapped_establish_connection ): with mock.patch( 'oslo_messaging.notify.messaging.LOG.exception' ) as mock_log: notifier.info(test_utils.TestContext(), "test", {}) # one normal call plus two retries self.assertEqual(3, len(calls)) # the error was caught and logged mock_log.assert_called_once() def test_notifier_retry_connection_fails_kafka(self): """This test sets a small retry number for notification sending and configures a non reachable message bus. The expectation that after the configured number of retries the driver gives up the message sending. """ self.config( driver=["messagingv2"], topics=["test-retry"], retry=2, group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport( self.conf, url='kafka://') notifier = oslo_messaging.Notifier(transport) # Kafka's message producer interface is async, and there is no way # from the oslo interface to force sending a pending message. So this # call simply returns without i) failing to deliver the message to # the non existent kafka bus ii) retrying the message delivery twice # as the configuration requested it. notifier.info(test_utils.TestContext(), "test", {}) class TestSerializer(test_utils.BaseTestCase): def setUp(self): super(TestSerializer, self).setUp() self.addCleanup(_impl_test.reset) @mock.patch('oslo_utils.timeutils.utcnow') def test_serializer(self, mock_utcnow): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') serializer = msg_serializer.NoOpSerializer() notifier = oslo_messaging.Notifier(transport, 'test.localhost', driver='test', topics=['test'], serializer=serializer) message_id = uuid.uuid4() uuid.uuid4 = mock.Mock(return_value=message_id) mock_utcnow.return_value = datetime.datetime.utcnow() serializer.serialize_context = mock.Mock() serializer.serialize_context.return_value = dict(user_name='alice') serializer.serialize_entity = mock.Mock() serializer.serialize_entity.return_value = 'sbar' ctxt = test_utils.TestContext(user_name='bob') notifier.info(ctxt, 'test.notify', 'bar') message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'sbar', 'timestamp': str(timeutils.utcnow()), } self.assertEqual([(dict(user_name='alice'), message, 'INFO', -1)], _impl_test.NOTIFICATIONS) # NOTE(JayF): This is also called when we create a TestContext uuid.uuid4.assert_has_calls([mock.call(), mock.call()]) serializer.serialize_context.assert_called_once_with(ctxt) serializer.serialize_entity.assert_called_once_with(ctxt, 'bar') class TestNotifierTopics(test_utils.BaseTestCase): def test_topics_from_config(self): self.config(driver=['log'], group='oslo_messaging_notifications') self.config(topics=['topic1', 'topic2'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost') self.assertEqual(['topic1', 'topic2'], notifier._topics) def test_topics_from_kwargs(self): self.config(driver=['log'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost', topics=['topic1', 'topic2']) self.assertEqual(['topic1', 'topic2'], notifier._topics) class TestLogNotifier(test_utils.BaseTestCase): @mock.patch('oslo_utils.timeutils.utcnow') def test_notifier(self, mock_utcnow): self.config(driver=['log'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost') message_id = uuid.uuid4() uuid.uuid4 = mock.Mock() uuid.uuid4.return_value = message_id mock_utcnow.return_value = datetime.datetime.utcnow() logger = mock.Mock() message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'bar', 'timestamp': str(timeutils.utcnow()), } with mock.patch.object(logging, 'getLogger') as gl: gl.return_value = logger notifier.info(test_utils.TestContext(), 'test.notify', 'bar') # NOTE(JayF): TestContext calls this, too uuid.uuid4.assert_has_calls([mock.call(), mock.call()]) logging.getLogger.assert_called_once_with( 'oslo.messaging.notification.test.notify') logger.info.assert_called_once_with(JsonMessageMatcher(message)) self.assertTrue(notifier.is_enabled()) def test_sample_priority(self): # Ensure logger drops sample-level notifications. driver = _impl_log.LogDriver(None, None, None) logger = mock.Mock(spec=logging.getLogger('oslo.messaging.' 'notification.foo')) logger.sample = None msg = {'event_type': 'foo'} with mock.patch.object(logging, 'getLogger') as gl: gl.return_value = logger driver.notify(None, msg, "sample", None) logging.getLogger.assert_called_once_with('oslo.messaging.' 'notification.foo') def test_mask_passwords(self): # Ensure that passwords are masked with notifications driver = _impl_log.LogDriver(None, None, None) logger = mock.MagicMock() logger.info = mock.MagicMock() message = {'password': 'passw0rd', 'event_type': 'foo'} mask_str = jsonutils.dumps(strutils.mask_dict_password(message)) with mock.patch.object(logging, 'getLogger') as gl: gl.return_value = logger driver.notify(None, message, 'info', 0) logger.info.assert_called_once_with(mask_str) class TestNotificationConfig(test_utils.BaseTestCase): def test_retry_config(self): conf = self.messaging_conf.conf self.config(driver=['messaging'], group='oslo_messaging_notifications') conf.set_override('retry', 3, group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport) self.assertEqual(3, notifier.retry) def test_notifier_retry_config(self): conf = self.messaging_conf.conf self.config(driver=['messaging'], group='oslo_messaging_notifications') conf.set_override('retry', 3, group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, retry=5) self.assertEqual(5, notifier.retry) class TestRoutingNotifier(test_utils.BaseTestCase): def setUp(self): super(TestRoutingNotifier, self).setUp() self.config(driver=['routing'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') self.notifier = oslo_messaging.Notifier(transport) self.router = self.notifier._driver_mgr['routing'].obj self.assertTrue(self.notifier.is_enabled()) def _fake_extension_manager(self, ext): return extension.ExtensionManager.make_test_instance( [extension.Extension('test', None, None, ext), ]) def _empty_extension_manager(self): return extension.ExtensionManager.make_test_instance([]) def test_should_load_plugin(self): self.router.used_drivers = set(["zoo", "blah"]) ext = mock.MagicMock() ext.name = "foo" self.assertFalse(self.router._should_load_plugin(ext)) ext.name = "zoo" self.assertTrue(self.router._should_load_plugin(ext)) def test_load_notifiers_no_config(self): # default routing_config="" self.router._load_notifiers() self.assertEqual({}, self.router.routing_groups) self.assertEqual(0, len(self.router.used_drivers)) def test_load_notifiers_no_extensions(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r"" config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._empty_extension_manager()): with mock.patch('oslo_messaging.notify.' '_impl_routing.LOG') as mylog: self.router._load_notifiers() self.assertFalse(mylog.debug.called) self.assertEqual({}, self.router.routing_groups) def test_load_notifiers_config(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r""" group_1: rpc : foo group_2: rpc : blah """ config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._fake_extension_manager( mock.MagicMock())): with mock.patch('oslo_messaging.notify.' '_impl_routing.LOG'): self.router._load_notifiers() groups = list(self.router.routing_groups.keys()) groups.sort() self.assertEqual(['group_1', 'group_2'], groups) def test_get_drivers_for_message_accepted_events(self): config = r""" group_1: rpc: accepted_events: - foo.* - blah.zoo.* - zip """ groups = yaml.safe_load(config) group = groups['group_1'] # No matching event ... self.assertEqual([], self.router._get_drivers_for_message( group, "unknown", "info")) # Child of foo ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "foo.1", "info")) # Foo itself ... self.assertEqual([], self.router._get_drivers_for_message( group, "foo", "info")) # Child of blah.zoo self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "blah.zoo.zing", "info")) def test_get_drivers_for_message_accepted_priorities(self): config = r""" group_1: rpc: accepted_priorities: - info - error """ groups = yaml.safe_load(config) group = groups['group_1'] # No matching priority self.assertEqual([], self.router._get_drivers_for_message( group, None, "unknown")) # Info ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "info")) # Error (to make sure the list is getting processed) ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "error")) def test_get_drivers_for_message_both(self): config = r""" group_1: rpc: accepted_priorities: - info accepted_events: - foo.* driver_1: accepted_priorities: - info driver_2: accepted_events: - foo.* """ groups = yaml.safe_load(config) group = groups['group_1'] # Valid event, but no matching priority self.assertEqual(['driver_2'], self.router._get_drivers_for_message( group, 'foo.blah', "unknown")) # Valid priority, but no matching event self.assertEqual(['driver_1'], self.router._get_drivers_for_message( group, 'unknown', "info")) # Happy day ... x = self.router._get_drivers_for_message(group, 'foo.blah', "info") x.sort() self.assertEqual(['driver_1', 'driver_2', 'rpc'], x) def test_filter_func(self): ext = mock.MagicMock() ext.name = "rpc" # Good ... self.assertTrue(self.router._filter_func(ext, {}, {}, 'info', None, ['foo', 'rpc'])) # Bad self.assertFalse(self.router._filter_func(ext, {}, {}, 'info', None, ['foo'])) def test_notify(self): self.router.routing_groups = {'group_1': None, 'group_2': None} drivers_mock = mock.MagicMock() drivers_mock.side_effect = [['rpc'], ['foo']] with mock.patch.object(self.router, 'plugin_manager') as pm: with mock.patch.object(self.router, '_get_drivers_for_message', drivers_mock): self.notifier.info(test_utils.TestContext(), 'my_event', {}) self.assertEqual(sorted(['rpc', 'foo']), sorted(pm.map.call_args[0][6])) def test_notify_filtered(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r""" group_1: rpc: accepted_events: - my_event rpc2: accepted_priorities: - info bar: accepted_events: - nothing """ config_file = mock.MagicMock() config_file.return_value = routing_config rpc_driver = mock.Mock() rpc2_driver = mock.Mock() bar_driver = mock.Mock() pm = dispatch.DispatchExtensionManager.make_test_instance( [extension.Extension('rpc', None, None, rpc_driver), extension.Extension('rpc2', None, None, rpc2_driver), extension.Extension('bar', None, None, bar_driver)], ) with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=pm): with mock.patch('oslo_messaging.notify.' '_impl_routing.LOG'): cxt = test_utils.TestContext() self.notifier.info(cxt, 'my_event', {}) self.assertFalse(bar_driver.info.called) rpc_driver.notify.assert_called_once_with( cxt, mock.ANY, 'INFO', -1) rpc2_driver.notify.assert_called_once_with( cxt, mock.ANY, 'INFO', -1) class TestNoOpNotifier(test_utils.BaseTestCase): def test_notifier(self): self.config(driver=['noop'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost') self.assertFalse(notifier.is_enabled()) class TestNotifierTransportWarning(test_utils.BaseTestCase): @mock.patch('oslo_messaging.notify.notifier._LOG') def test_warning_when_rpc_transport(self, log): transport = oslo_messaging.get_rpc_transport(self.conf) oslo_messaging.Notifier(transport, 'test.localhost') log.warning.assert_called_once_with( "Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1386733 oslo.messaging-14.9.0/oslo_messaging/tests/rpc/0000775000175000017500000000000000000000000021517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/rpc/__init__.py0000664000175000017500000000000000000000000023616 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/rpc/test_client.py0000664000175000017500000005532300000000000024416 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import testscenarios from unittest import mock import oslo_messaging from oslo_messaging import exceptions from oslo_messaging import serializer as msg_serializer from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestCastCall(test_utils.BaseTestCase): scenarios = [ ('cast_no_ctxt_no_args', dict(call=False, ctxt={}, args={})), ('call_no_ctxt_no_args', dict(call=True, ctxt={}, args={})), ('cast_ctxt_and_args', dict(call=False, ctxt=dict(user='testuser', project='testtenant'), args=dict(bar='blaa', foobar=11.01))), ('call_ctxt_and_args', dict(call=True, ctxt=dict(user='testuser', project='testtenant'), args=dict(bar='blaa', foobar=11.01))), ] def test_cast_call(self): self.config(rpc_response_timeout=None) transport_options = oslo_messaging.TransportOptions() transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.get_rpc_client( transport, oslo_messaging.Target(), transport_options=transport_options) transport._send = mock.Mock() msg = dict(method='foo', args=self.args) kwargs = {'retry': None, 'transport_options': transport_options} if self.call: kwargs['wait_for_reply'] = True kwargs['timeout'] = None kwargs['call_monitor_timeout'] = None method = client.call if self.call else client.cast method(self.ctxt, 'foo', **self.args) self.assertFalse(transport_options.at_least_once) transport._send.assert_called_once_with(oslo_messaging.Target(), self.ctxt, msg, **kwargs) def test_cast_call_with_transport_options(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') transport_options = oslo_messaging.TransportOptions(at_least_once=True) client = oslo_messaging.get_rpc_client( transport, oslo_messaging.Target(), transport_options=transport_options) transport._send = mock.Mock() msg = dict(method='foo', args=self.args) kwargs = {'retry': None, 'transport_options': transport_options} if self.call: kwargs['wait_for_reply'] = True kwargs['timeout'] = None kwargs['call_monitor_timeout'] = None method = client.call if self.call else client.cast method(self.ctxt, 'foo', **self.args) self.assertTrue(transport_options.at_least_once) transport._send.assert_called_once_with(oslo_messaging.Target(), self.ctxt, msg, **kwargs) class TestCastToTarget(test_utils.BaseTestCase): _base = [ ('all_none', dict(ctor={}, prepare={}, expect={})), ('ctor_exchange', dict(ctor=dict(exchange='testexchange'), prepare={}, expect=dict(exchange='testexchange'))), ('prepare_exchange', dict(ctor={}, prepare=dict(exchange='testexchange'), expect=dict(exchange='testexchange'))), ('prepare_exchange_none', dict(ctor=dict(exchange='testexchange'), prepare=dict(exchange=None), expect={})), ('both_exchange', dict(ctor=dict(exchange='ctorexchange'), prepare=dict(exchange='testexchange'), expect=dict(exchange='testexchange'))), ('ctor_topic', dict(ctor=dict(topic='testtopic'), prepare={}, expect=dict(topic='testtopic'))), ('prepare_topic', dict(ctor={}, prepare=dict(topic='testtopic'), expect=dict(topic='testtopic'))), ('prepare_topic_none', dict(ctor=dict(topic='testtopic'), prepare=dict(topic=None), expect={})), ('both_topic', dict(ctor=dict(topic='ctortopic'), prepare=dict(topic='testtopic'), expect=dict(topic='testtopic'))), ('ctor_namespace', dict(ctor=dict(namespace='testnamespace'), prepare={}, expect=dict(namespace='testnamespace'))), ('prepare_namespace', dict(ctor={}, prepare=dict(namespace='testnamespace'), expect=dict(namespace='testnamespace'))), ('prepare_namespace_none', dict(ctor=dict(namespace='testnamespace'), prepare=dict(namespace=None), expect={})), ('both_namespace', dict(ctor=dict(namespace='ctornamespace'), prepare=dict(namespace='testnamespace'), expect=dict(namespace='testnamespace'))), ('ctor_version', dict(ctor=dict(version='1.1'), prepare={}, expect=dict(version='1.1'))), ('prepare_version', dict(ctor={}, prepare=dict(version='1.1'), expect=dict(version='1.1'))), ('prepare_version_none', dict(ctor=dict(version='1.1'), prepare=dict(version=None), expect={})), ('both_version', dict(ctor=dict(version='ctorversion'), prepare=dict(version='1.1'), expect=dict(version='1.1'))), ('ctor_server', dict(ctor=dict(server='testserver'), prepare={}, expect=dict(server='testserver'))), ('prepare_server', dict(ctor={}, prepare=dict(server='testserver'), expect=dict(server='testserver'))), ('prepare_server_none', dict(ctor=dict(server='testserver'), prepare=dict(server=None), expect={})), ('both_server', dict(ctor=dict(server='ctorserver'), prepare=dict(server='testserver'), expect=dict(server='testserver'))), ('ctor_fanout', dict(ctor=dict(fanout=True), prepare={}, expect=dict(fanout=True))), ('prepare_fanout', dict(ctor={}, prepare=dict(fanout=True), expect=dict(fanout=True))), ('prepare_fanout_none', dict(ctor=dict(fanout=True), prepare=dict(fanout=None), expect={})), ('both_fanout', dict(ctor=dict(fanout=True), prepare=dict(fanout=False), expect=dict(fanout=False))), ] _prepare = [ ('single_prepare', dict(double_prepare=False)), ('double_prepare', dict(double_prepare=True)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._base, cls._prepare) def setUp(self): super(TestCastToTarget, self).setUp(conf=cfg.ConfigOpts()) def test_cast_to_target(self): target = oslo_messaging.Target(**self.ctor) expect_target = oslo_messaging.Target(**self.expect) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.get_rpc_client(transport, target) transport._send = mock.Mock() msg = dict(method='foo', args={}) if 'namespace' in self.expect: msg['namespace'] = self.expect['namespace'] if 'version' in self.expect: msg['version'] = self.expect['version'] if self.prepare: client = client.prepare(**self.prepare) if self.double_prepare: client = client.prepare(**self.prepare) client.cast({}, 'foo') transport._send.assert_called_once_with(expect_target, {}, msg, retry=None, transport_options=None) TestCastToTarget.generate_scenarios() _notset = object() class TestCallTimeout(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(confval=None, ctor=None, prepare=_notset, expect=None, cm=None)), ('confval', dict(confval=21, ctor=None, prepare=_notset, expect=21, cm=None)), ('ctor', dict(confval=None, ctor=21.1, prepare=_notset, expect=21.1, cm=None)), ('ctor_zero', dict(confval=None, ctor=0, prepare=_notset, expect=0, cm=None)), ('prepare', dict(confval=None, ctor=None, prepare=21.1, expect=21.1, cm=None)), ('prepare_override', dict(confval=None, ctor=10.1, prepare=21.1, expect=21.1, cm=None)), ('prepare_zero', dict(confval=None, ctor=None, prepare=0, expect=0, cm=None)), ('call_monitor', dict(confval=None, ctor=None, prepare=60, expect=60, cm=30)), ] def test_call_timeout(self): self.config(rpc_response_timeout=self.confval) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.get_rpc_client( transport, oslo_messaging.Target(), timeout=self.ctor, call_monitor_timeout=self.cm) transport._send = mock.Mock() msg = dict(method='foo', args={}) kwargs = dict(wait_for_reply=True, timeout=self.expect, retry=None, call_monitor_timeout=self.cm, transport_options=None) if self.prepare is not _notset: client = client.prepare(timeout=self.prepare) client.call({}, 'foo') transport._send.assert_called_once_with(oslo_messaging.Target(), {}, msg, **kwargs) class TestCallRetry(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(ctor=None, prepare=_notset, expect=None)), ('ctor', dict(ctor=21, prepare=_notset, expect=21)), ('ctor_zero', dict(ctor=0, prepare=_notset, expect=0)), ('prepare', dict(ctor=None, prepare=21, expect=21)), ('prepare_override', dict(ctor=10, prepare=21, expect=21)), ('prepare_zero', dict(ctor=None, prepare=0, expect=0)), ] def test_call_retry(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.get_rpc_client( transport, oslo_messaging.Target(), retry=self.ctor) transport._send = mock.Mock() msg = dict(method='foo', args={}) kwargs = dict(wait_for_reply=True, timeout=60, retry=self.expect, call_monitor_timeout=None, transport_options=None) if self.prepare is not _notset: client = client.prepare(retry=self.prepare) client.call({}, 'foo') transport._send.assert_called_once_with(oslo_messaging.Target(), {}, msg, **kwargs) class TestCallFanout(test_utils.BaseTestCase): scenarios = [ ('target', dict(prepare=_notset, target={'fanout': True})), ('prepare', dict(prepare={'fanout': True}, target={})), ('both', dict(prepare={'fanout': True}, target={'fanout': True})), ] def test_call_fanout(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.get_rpc_client( transport, oslo_messaging.Target(**self.target)) if self.prepare is not _notset: client = client.prepare(**self.prepare) self.assertRaises(exceptions.InvalidTarget, client.call, {}, 'foo') class TestSerializer(test_utils.BaseTestCase): scenarios = [ ('cast', dict(call=False, ctxt=dict(user='bob'), args=dict(a='a', b='b', c='c'), retval=None)), ('call', dict(call=True, ctxt=dict(user='bob'), args=dict(a='a', b='b', c='c'), retval='d')), ] def test_call_serializer(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') serializer = msg_serializer.NoOpSerializer() client = oslo_messaging.get_rpc_client( transport, oslo_messaging.Target(), serializer=serializer) transport._send = mock.Mock() kwargs = dict(wait_for_reply=True, timeout=None) if self.call else {} kwargs['retry'] = None if self.call: kwargs['call_monitor_timeout'] = None transport._send.return_value = self.retval serializer.serialize_entity = mock.Mock() serializer.deserialize_entity = mock.Mock() serializer.serialize_context = mock.Mock() def _stub(ctxt, arg): return 's' + arg msg = dict(method='foo', args=dict()) for k, v in self.args.items(): msg['args'][k] = 's' + v serializer.serialize_entity.side_effect = _stub if self.call: serializer.deserialize_entity.return_value = 'd' + self.retval serializer.serialize_context.return_value = dict(user='alice') method = client.call if self.call else client.cast retval = method(self.ctxt, 'foo', **self.args) if self.retval is not None: self.assertEqual('d' + self.retval, retval) transport._send.assert_called_once_with(oslo_messaging.Target(), dict(user='alice'), msg, transport_options=None, **kwargs) expected_calls = [mock.call(self.ctxt, arg) for arg in self.args] self.assertEqual(expected_calls, serializer.serialize_entity.mock_calls) if self.call: serializer.deserialize_entity.assert_called_once_with(self.ctxt, self.retval) serializer.serialize_context.assert_called_once_with(self.ctxt) class TestVersionCap(test_utils.BaseTestCase): _call_vs_cast = [ ('call', dict(call=True)), ('cast', dict(call=False)), ] _cap_scenarios = [ ('all_none', dict(cap=None, prepare_cap=_notset, version=None, prepare_version=_notset, success=True)), ('ctor_cap_ok', dict(cap='1.1', prepare_cap=_notset, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_override_ok', dict(cap='2.0', prepare_cap='1.1', version='1.0', prepare_version='1.0', success=True)), ('ctor_cap_override_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_minor_fail', dict(cap='1.0', prepare_cap=_notset, version='1.1', prepare_version=_notset, success=False)), ('ctor_cap_major_fail', dict(cap='2.0', prepare_cap=_notset, version=None, prepare_version='1.0', success=False)), ('ctor_cap_none_version_ok', dict(cap=None, prepare_cap=_notset, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_version_none_fail', dict(cap='1.0', prepare_cap=_notset, version=None, prepare_version=_notset, success=False)), ] @classmethod def generate_scenarios(cls): cls.scenarios = ( testscenarios.multiply_scenarios(cls._call_vs_cast, cls._cap_scenarios)) def test_version_cap(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(version=self.version) client = oslo_messaging.get_rpc_client(transport, target, version_cap=self.cap) if self.success: transport._send = mock.Mock() if self.prepare_version is not _notset: target = target(version=self.prepare_version) msg = dict(method='foo', args={}) if target.version is not None: msg['version'] = target.version kwargs = {'retry': None} if self.call: kwargs['wait_for_reply'] = True kwargs['timeout'] = None kwargs['call_monitor_timeout'] = None prep_kwargs = {} if self.prepare_cap is not _notset: prep_kwargs['version_cap'] = self.prepare_cap if self.prepare_version is not _notset: prep_kwargs['version'] = self.prepare_version if prep_kwargs: client = client.prepare(**prep_kwargs) method = client.call if self.call else client.cast try: method({}, 'foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.RPCVersionCapError, ex) self.assertFalse(self.success) else: self.assertTrue(self.success) transport._send.assert_called_once_with(target, {}, msg, transport_options=None, **kwargs) TestVersionCap.generate_scenarios() class TestCanSendVersion(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(cap=None, prepare_cap=_notset, version=None, prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_ok', dict(cap='1.1', prepare_cap=_notset, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_override_ok', dict(cap='2.0', prepare_cap='1.1', version='1.0', prepare_version='1.0', can_send_version=_notset, can_send=True)), ('ctor_cap_override_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_can_send_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version='1.1', can_send=True)), ('ctor_cap_can_send_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version=None, can_send=True)), ('ctor_cap_minor_fail', dict(cap='1.0', prepare_cap=_notset, version='1.1', prepare_version=_notset, can_send_version=_notset, can_send=False)), ('ctor_cap_major_fail', dict(cap='2.0', prepare_cap=_notset, version=None, prepare_version='1.0', can_send_version=_notset, can_send=False)), ('ctor_cap_none_version_ok', dict(cap=None, prepare_cap=_notset, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_version_none_fail', dict(cap='1.0', prepare_cap=_notset, version=None, prepare_version=_notset, can_send_version=_notset, can_send=False)), ('ctor_cap_version_can_send_none_fail', dict(cap='1.0', prepare_cap=_notset, version='1.0', prepare_version=_notset, can_send_version=None, can_send=False)), ] def test_version_cap(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(version=self.version) client = oslo_messaging.get_rpc_client(transport, target, version_cap=self.cap) prep_kwargs = {} if self.prepare_cap is not _notset: prep_kwargs['version_cap'] = self.prepare_cap if self.prepare_version is not _notset: prep_kwargs['version'] = self.prepare_version if prep_kwargs: client = client.prepare(**prep_kwargs) if self.can_send_version is not _notset: can_send = client.can_send_version(version=self.can_send_version) call_context_can_send = client.prepare().can_send_version( version=self.can_send_version) self.assertEqual(can_send, call_context_can_send) else: can_send = client.can_send_version() self.assertEqual(self.can_send, can_send) def test_invalid_version_type(self): target = oslo_messaging.Target(topic='sometopic') transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.get_rpc_client(transport, target) self.assertRaises(exceptions.MessagingException, client.prepare, version='5') self.assertRaises(exceptions.MessagingException, client.prepare, version='5.a') self.assertRaises(exceptions.MessagingException, client.prepare, version='5.5.a') class TestTransportWarning(test_utils.BaseTestCase): @mock.patch('oslo_messaging.rpc.client.LOG') def test_warning_when_notifier_transport(self, log): transport = oslo_messaging.get_notification_transport(self.conf) oslo_messaging.get_rpc_client(transport, oslo_messaging.Target()) log.warning.assert_called_once_with( "Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/rpc/test_dispatcher.py0000664000175000017500000003352500000000000025266 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios import time import oslo_messaging from oslo_messaging import rpc from oslo_messaging import serializer as msg_serializer from oslo_messaging.tests import utils as test_utils from unittest import mock load_tests = testscenarios.load_tests_apply_scenarios class _FakeEndpoint(object): def __init__(self, target=None): self.target = target def foo(self, ctxt, **kwargs): pass @rpc.expose def bar(self, ctxt, **kwargs): pass def _foobar(self, ctxt, **kwargs): pass class TestDispatcher(test_utils.BaseTestCase): scenarios = [ ('no_endpoints', dict(endpoints=[], access_policy=None, dispatch_to=None, ctxt={}, msg=dict(method='foo'), exposed_methods=['foo', 'bar', '_foobar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('default_target', dict(endpoints=[{}], access_policy=None, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo'), exposed_methods=['foo', 'bar', '_foobar'], success=True, ex=None)), ('default_target_ctxt_and_args', dict(endpoints=[{}], access_policy=oslo_messaging.LegacyRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='bar'), ctxt=dict(user='bob'), msg=dict(method='bar', args=dict(blaa=True)), exposed_methods=['foo', 'bar', '_foobar'], success=True, ex=None)), ('default_target_namespace', dict(endpoints=[{}], access_policy=oslo_messaging.LegacyRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace=None), exposed_methods=['foo', 'bar', '_foobar'], success=True, ex=None)), ('default_target_version', dict(endpoints=[{}], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', version='1.0'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('default_target_no_such_method', dict(endpoints=[{}], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foobar'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.NoSuchMethod)), ('namespace', dict(endpoints=[{}, dict(namespace='testns')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=1, method='foo'), ctxt={}, msg=dict(method='foo', namespace='testns'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('namespace_mismatch', dict(endpoints=[{}, dict(namespace='testns')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foo', namespace='nstest'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('version', dict(endpoints=[dict(version='1.5'), dict(version='3.4')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=1, method='foo'), ctxt={}, msg=dict(method='foo', version='3.2'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('version_mismatch', dict(endpoints=[dict(version='1.5'), dict(version='3.0')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foo', version='3.2'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('message_in_null_namespace_with_multiple_namespaces', dict(endpoints=[dict(namespace='testns', legacy_namespaces=[None])], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace=None), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('message_in_wrong_namespace_with_multiple_namespaces', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None])], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foo', namespace='wrong'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('message_with_endpoint_no_private_and_public_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None])], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace='testns'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('message_with_endpoint_no_private_and_private_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None], )], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='_foobar'), ctxt={}, msg=dict(method='_foobar', namespace='testns'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.NoSuchMethod)), ('message_with_endpoint_explicitly_exposed_without_exposed_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None], )], access_policy=oslo_messaging.ExplicitRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace='testns'), exposed_methods=['bar'], success=False, ex=oslo_messaging.NoSuchMethod)), ('message_with_endpoint_explicitly_exposed_with_exposed_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None], )], access_policy=oslo_messaging.ExplicitRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='bar'), ctxt={}, msg=dict(method='bar', namespace='testns'), exposed_methods=['bar'], success=True, ex=None)), ] def test_dispatcher(self): def _set_endpoint_mock_properties(endpoint): endpoint.foo = mock.Mock(spec=dir(_FakeEndpoint.foo)) # mock doesn't pick up the decorated method. endpoint.bar = mock.Mock(spec=dir(_FakeEndpoint.bar)) endpoint.bar.exposed = mock.PropertyMock(return_value=True) endpoint._foobar = mock.Mock(spec=dir(_FakeEndpoint._foobar)) return endpoint endpoints = [_set_endpoint_mock_properties(mock.Mock( spec=_FakeEndpoint, target=oslo_messaging.Target(**e))) for e in self.endpoints] serializer = None dispatcher = oslo_messaging.RPCDispatcher(endpoints, serializer, self.access_policy) incoming = mock.Mock(ctxt=self.ctxt, message=self.msg, client_timeout=0) res = None try: res = dispatcher.dispatch(incoming) except Exception as ex: self.assertFalse(self.success, ex) self.assertIsNotNone(self.ex, ex) self.assertIsInstance(ex, self.ex, ex) if isinstance(ex, oslo_messaging.NoSuchMethod): self.assertEqual(self.msg.get('method'), ex.method) elif isinstance(ex, oslo_messaging.UnsupportedVersion): self.assertEqual(self.msg.get('version', '1.0'), ex.version) if ex.method: self.assertEqual(self.msg.get('method'), ex.method) else: self.assertTrue(self.success, "Unexpected success of operation during testing") self.assertIsNotNone(res) for n, endpoint in enumerate(endpoints): for method_name in self.exposed_methods: method = getattr(endpoint, method_name) if self.dispatch_to and n == self.dispatch_to['endpoint'] and \ method_name == self.dispatch_to['method'] and \ method_name in self.exposed_methods: method.assert_called_once_with( self.ctxt, **self.msg.get('args', {})) else: self.assertEqual(0, method.call_count, 'method: {}'.format(method)) class TestDispatcherWithPingEndpoint(test_utils.BaseTestCase): def test_dispatcher_with_ping(self): self.config(rpc_ping_enabled=True) dispatcher = oslo_messaging.RPCDispatcher([], None, None) incoming = mock.Mock(ctxt={}, message=dict(method='oslo_rpc_server_ping'), client_timeout=0) res = dispatcher.dispatch(incoming) self.assertEqual('pong', res) def test_dispatcher_with_ping_already_used(self): class MockEndpoint(object): def oslo_rpc_server_ping(self, ctxt, **kwargs): return 'not_pong' mockEndpoint = MockEndpoint() self.config(rpc_ping_enabled=True) dispatcher = oslo_messaging.RPCDispatcher([mockEndpoint], None, None) incoming = mock.Mock(ctxt={}, message=dict(method='oslo_rpc_server_ping'), client_timeout=0) res = dispatcher.dispatch(incoming) self.assertEqual('not_pong', res) class TestSerializer(test_utils.BaseTestCase): scenarios = [ ('no_args_or_retval', dict(ctxt={}, dctxt={}, args={}, retval=None)), ('args_and_retval', dict(ctxt=dict(user='bob'), dctxt=dict(user='alice'), args=dict(a='a', b='b', c='c'), retval='d')), ] def test_serializer(self): endpoint = _FakeEndpoint() serializer = msg_serializer.NoOpSerializer() dispatcher = oslo_messaging.RPCDispatcher([endpoint], serializer) endpoint.foo = mock.Mock() args = dict([(k, 'd' + v) for k, v in self.args.items()]) endpoint.foo.return_value = self.retval serializer.serialize_entity = mock.Mock() serializer.deserialize_entity = mock.Mock() serializer.deserialize_context = mock.Mock() serializer.deserialize_context.return_value = self.dctxt expected_side_effect = ['d' + arg for arg in self.args] serializer.deserialize_entity.side_effect = expected_side_effect serializer.serialize_entity.return_value = None if self.retval: serializer.serialize_entity.return_value = 's' + self.retval incoming = mock.Mock() incoming.ctxt = self.ctxt incoming.message = dict(method='foo', args=self.args) incoming.client_timeout = 0 retval = dispatcher.dispatch(incoming) if self.retval is not None: self.assertEqual('s' + self.retval, retval) endpoint.foo.assert_called_once_with(self.dctxt, **args) serializer.deserialize_context.assert_called_once_with(self.ctxt) expected_calls = [mock.call(self.dctxt, arg) for arg in self.args] self.assertEqual(expected_calls, serializer.deserialize_entity.mock_calls) serializer.serialize_entity.assert_called_once_with(self.dctxt, self.retval) class TestMonitorFailure(test_utils.BaseTestCase): """Test what happens when the call monitor watchdog hits an exception when sending the heartbeat. """ class _SleepyEndpoint(object): def __init__(self, target=None): self.target = target def sleep(self, ctxt, **kwargs): time.sleep(kwargs['timeout']) return True def test_heartbeat_failure(self): endpoints = [self._SleepyEndpoint()] dispatcher = oslo_messaging.RPCDispatcher(endpoints, serializer=None) # sleep long enough for the client_timeout to expire multiple times # the timeout is (client_timeout/2) and must be > 1.0 message = {'method': 'sleep', 'args': {'timeout': 3.5}} ctxt = {'test': 'value'} incoming = mock.Mock(ctxt=ctxt, message=message, client_timeout=2.0) incoming.heartbeat = mock.Mock(side_effect=Exception('BOOM!')) res = dispatcher.dispatch(incoming) self.assertTrue(res) # only one call to heartbeat should be made since the watchdog thread # should exit on the first exception thrown self.assertEqual(1, incoming.heartbeat.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/rpc/test_server.py0000664000175000017500000010626700000000000024452 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from unittest import mock import eventlet import fixtures from oslo_config import cfg from oslo_utils import eventletutils import testscenarios import oslo_messaging from oslo_messaging import rpc from oslo_messaging.rpc import dispatcher from oslo_messaging.rpc import server as rpc_server_module from oslo_messaging import server as server_module from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class ServerSetupMixin(object): class Server(object): def __init__(self, transport, topic, server, endpoint, serializer, exchange): self.controller = ServerSetupMixin.ServerController() target = oslo_messaging.Target(topic=topic, server=server, exchange=exchange) self.server = oslo_messaging.get_rpc_server(transport, target, [endpoint, self.controller], serializer=serializer) def wait(self): # Wait for the executor to process the stop message, indicating all # test messages have been processed self.controller.stopped.wait() # Check start() does nothing with a running server self.server.start() self.server.stop() self.server.wait() def start(self): self.server.start() class ServerController(object): def __init__(self): self.stopped = eventletutils.Event() def stop(self, ctxt): self.stopped.set() class TestSerializer(object): def serialize_entity(self, ctxt, entity): return ('s' + entity) if entity else entity def deserialize_entity(self, ctxt, entity): return ('d' + entity) if entity else entity def serialize_context(self, ctxt): return dict([(k, 's' + v) for k, v in ctxt.items()]) def deserialize_context(self, ctxt): return dict([(k, 'd' + v) for k, v in ctxt.items()]) def __init__(self): self.serializer = self.TestSerializer() def _setup_server(self, transport, endpoint, topic=None, server=None, exchange=None): server = self.Server(transport, topic=topic or 'testtopic', server=server or 'testserver', endpoint=endpoint, serializer=self.serializer, exchange=exchange) server.start() return server def _stop_server(self, client, server, topic=None, exchange=None): client.cast({}, 'stop') server.wait() def _setup_client(self, transport, topic='testtopic', exchange=None): target = oslo_messaging.Target(topic=topic, exchange=exchange) return oslo_messaging.get_rpc_client(transport, target=target, serializer=self.serializer) class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin): def __init__(self, *args): super(TestRPCServer, self).__init__(*args) ServerSetupMixin.__init__(self) def setUp(self): super(TestRPCServer, self).setUp(conf=cfg.ConfigOpts()) # FakeExchangeManager uses a class-level exchanges mapping; "reset" it # before tests assert amount of items stored self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging._drivers.impl_fake.FakeExchangeManager._exchanges', new_value={})) def test_constructor(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() access_policy = dispatcher.DefaultRPCAccessPolicy server = oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer, access_policy=access_policy, executor='threading') self.assertIs(server.conf, self.conf) self.assertIs(server.transport, transport) self.assertIsInstance(server.dispatcher, oslo_messaging.RPCDispatcher) self.assertIs(server.dispatcher.endpoints, endpoints) self.assertIs(server.dispatcher.serializer, serializer) self.assertEqual('threading', server.executor_type) def test_constructor_with_eventlet_executor(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() access_policy = dispatcher.DefaultRPCAccessPolicy server = oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer, access_policy=access_policy, executor='eventlet') self.assertIs(server.conf, self.conf) self.assertIs(server.transport, transport) self.assertIsInstance(server.dispatcher, oslo_messaging.RPCDispatcher) self.assertIs(server.dispatcher.endpoints, endpoints) self.assertIs(server.dispatcher.serializer, serializer) self.assertEqual('eventlet', server.executor_type) def test_constructor_with_unrecognized_executor(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() access_policy = dispatcher.DefaultRPCAccessPolicy self.assertRaises( server_module.ExecutorLoadFailure, oslo_messaging.get_rpc_server, transport=transport, target=target, endpoints=endpoints, serializer=serializer, access_policy=access_policy, executor='boom') def test_server_wait_method(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() class MagicMockIgnoreArgs(mock.MagicMock): """MagicMock ignores arguments. A MagicMock which can never misinterpret the arguments passed to it during construction. """ def __init__(self, *args, **kwargs): super(MagicMockIgnoreArgs, self).__init__() server = oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer) # Mocking executor server._executor_cls = MagicMockIgnoreArgs server._create_listener = MagicMockIgnoreArgs() server.dispatcher = MagicMockIgnoreArgs() # Here assigning executor's listener object to listener variable # before calling wait method, because in wait method we are # setting executor to None. server.start() listener = server.listener server.stop() # call server wait method server.wait() self.assertEqual(1, listener.cleanup.call_count) def test_no_target_server(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') server = oslo_messaging.get_rpc_server( transport, oslo_messaging.Target(topic='testtopic'), []) try: server.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) self.assertEqual('testtopic', ex.target.topic) else: self.assertTrue(False) def test_no_server_topic(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(server='testserver') server = oslo_messaging.get_rpc_server(transport, target, []) try: server.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) self.assertEqual('testserver', ex.target.server) else: self.assertTrue(False) def _test_no_client_topic(self, call=True): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = self._setup_client(transport, topic=None) method = client.call if call else client.cast try: method({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) self.assertIsNotNone(ex.target) else: self.assertTrue(False) def test_no_client_topic_call(self): self._test_no_client_topic(call=True) def test_no_client_topic_cast(self): self._test_no_client_topic(call=False) def test_client_call_timeout(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') finished = False wait = threading.Condition() class TestEndpoint(object): def ping(self, ctxt, arg): with wait: if not finished: wait.wait() server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.prepare(timeout=0).call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex) else: self.assertTrue(False) with wait: finished = True wait.notify() self._stop_server(client, server_thread) def test_unknown_executor(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') try: oslo_messaging.get_rpc_server(transport, None, [], executor='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) self.assertEqual('foo', ex.executor) else: self.assertTrue(False) def test_cast(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) endpoint = TestEndpoint() server_thread = self._setup_server(transport, endpoint) client = self._setup_client(transport) client.cast({}, 'ping', arg='foo') client.cast({}, 'ping', arg='bar') self._stop_server(client, server_thread) self.assertEqual(['dsfoo', 'dsbar'], endpoint.pings) def test_call(self): # NOTE(milan): using a separate transport instance for each the client # and the server to be able to check independent transport instances # can communicate over same exchange&topic transport_srv = oslo_messaging.get_rpc_transport(self.conf, url='fake:') transport_cli = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): return arg server_thread = self._setup_server(transport_srv, TestEndpoint()) client = self._setup_client(transport_cli) self.assertIsNone(client.call({}, 'ping', arg=None)) self.assertEqual(0, client.call({}, 'ping', arg=0)) self.assertFalse(client.call({}, 'ping', arg=False)) self.assertEqual([], client.call({}, 'ping', arg=[])) self.assertEqual({}, client.call({}, 'ping', arg={})) self.assertEqual('dsdsfoo', client.call({}, 'ping', arg='foo')) self._stop_server(client, server_thread) def test_direct_call(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): return arg server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) direct = client.prepare(server='testserver') self.assertIsNone(direct.call({}, 'ping', arg=None)) self.assertEqual(0, client.call({}, 'ping', arg=0)) self.assertFalse(client.call({}, 'ping', arg=False)) self.assertEqual([], client.call({}, 'ping', arg=[])) self.assertEqual({}, client.call({}, 'ping', arg={})) self.assertEqual('dsdsfoo', direct.call({}, 'ping', arg='foo')) self._stop_server(client, server_thread) def test_context(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ctxt_check(self, ctxt, key): return ctxt[key] server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) self.assertEqual('dsdsb', client.call({'dsa': 'b'}, 'ctxt_check', key='a')) self._stop_server(client, server_thread) def test_failure(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): raise ValueError(arg) debugs = [] errors = [] def stub_debug(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] debugs.append(str(msg) % a) def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'debug', stub_debug)) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'error', stub_error)) server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, ValueError) self.assertEqual('dsfoo', str(ex)) self.assertTrue(len(debugs) == 2) self.assertGreater(len(errors), 0) else: self.assertTrue(False) self._stop_server(client, server_thread) def test_expected_failure(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') debugs = [] errors = [] def stub_debug(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] debugs.append(str(msg) % a) def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'debug', stub_debug)) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'error', stub_error)) class TestEndpoint(object): @oslo_messaging.expected_exceptions(ValueError) def ping(self, ctxt, arg): raise ValueError(arg) server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, ValueError) self.assertEqual('dsfoo', str(ex)) self.assertGreater(len(debugs), 0) self.assertTrue(len(errors) == 0) else: self.assertTrue(False) self._stop_server(client, server_thread) @mock.patch('oslo_messaging.rpc.server.LOG') def test_warning_when_notifier_transport(self, log): transport = oslo_messaging.get_notification_transport(self.conf) target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer) log.warning.assert_called_once_with( "Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin): _exchanges = [ ('same_exchange', dict(exchange1=None, exchange2=None)), ('diff_exchange', dict(exchange1='x1', exchange2='x2')), ] _topics = [ ('same_topic', dict(topic1='t', topic2='t')), ('diff_topic', dict(topic1='t1', topic2='t2')), ] _server = [ ('same_server', dict(server1=None, server2=None)), ('diff_server', dict(server1='s1', server2='s2')), ] _fanout = [ ('not_fanout', dict(fanout1=None, fanout2=None)), ('fanout', dict(fanout1=True, fanout2=True)), ] _method = [ ('call', dict(call1=True, call2=True)), ('cast', dict(call1=False, call2=False)), ] _endpoints = [ ('one_endpoint', dict(multi_endpoints=False, expect1=['ds1', 'ds2'], expect2=['ds1', 'ds2'])), ('two_endpoints', dict(multi_endpoints=True, expect1=['ds1'], expect2=['ds2'])), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges, cls._topics, cls._server, cls._fanout, cls._method, cls._endpoints) # fanout call not supported def filter_fanout_call(scenario): params = scenario[1] fanout = params['fanout1'] or params['fanout2'] call = params['call1'] or params['call2'] return not (call and fanout) # listening multiple times on same topic/server pair not supported def filter_same_topic_and_server(scenario): params = scenario[1] single_topic = params['topic1'] == params['topic2'] single_server = params['server1'] == params['server2'] return not (single_topic and single_server) # fanout to multiple servers on same topic and exchange each endpoint # will receive both messages def fanout_to_servers(scenario): params = scenario[1] fanout = params['fanout1'] or params['fanout2'] single_exchange = params['exchange1'] == params['exchange2'] single_topic = params['topic1'] == params['topic2'] multi_servers = params['server1'] != params['server2'] if fanout and single_exchange and single_topic and multi_servers: params['expect1'] = params['expect1'][:] + params['expect1'] params['expect2'] = params['expect2'][:] + params['expect2'] return scenario # multiple endpoints on same topic and exchange # either endpoint can get either message def single_topic_multi_endpoints(scenario): params = scenario[1] single_exchange = params['exchange1'] == params['exchange2'] single_topic = params['topic1'] == params['topic2'] if single_topic and single_exchange and params['multi_endpoints']: params['expect_either'] = (params['expect1'] + params['expect2']) params['expect1'] = params['expect2'] = [] else: params['expect_either'] = [] return scenario for f in [filter_fanout_call, filter_same_topic_and_server]: cls.scenarios = [i for i in cls.scenarios if f(i)] for m in [fanout_to_servers, single_topic_multi_endpoints]: cls.scenarios = [m(i) for i in cls.scenarios] def __init__(self, *args): super(TestMultipleServers, self).__init__(*args) ServerSetupMixin.__init__(self) def setUp(self): super(TestMultipleServers, self).setUp(conf=cfg.ConfigOpts()) self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging._drivers.impl_fake.FakeExchangeManager._exchanges', new_value={})) def test_multiple_servers(self): transport1 = oslo_messaging.get_rpc_transport(self.conf, url='fake:') if self.exchange1 != self.exchange2: transport2 = oslo_messaging.get_rpc_transport(self.conf, url='fake:') else: transport2 = transport1 class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) def alive(self, ctxt): return 'alive' if self.multi_endpoints: endpoint1, endpoint2 = TestEndpoint(), TestEndpoint() else: endpoint1 = endpoint2 = TestEndpoint() server1 = self._setup_server(transport1, endpoint1, topic=self.topic1, exchange=self.exchange1, server=self.server1) server2 = self._setup_server(transport2, endpoint2, topic=self.topic2, exchange=self.exchange2, server=self.server2) client1 = self._setup_client(transport1, topic=self.topic1, exchange=self.exchange1) client2 = self._setup_client(transport2, topic=self.topic2, exchange=self.exchange2) client1 = client1.prepare(server=self.server1) client2 = client2.prepare(server=self.server2) if self.fanout1: client1.call({}, 'alive') client1 = client1.prepare(fanout=True) if self.fanout2: client2.call({}, 'alive') client2 = client2.prepare(fanout=True) (client1.call if self.call1 else client1.cast)({}, 'ping', arg='1') (client2.call if self.call2 else client2.cast)({}, 'ping', arg='2') self._stop_server(client1.prepare(fanout=None), server1, topic=self.topic1, exchange=self.exchange1) self._stop_server(client2.prepare(fanout=None), server2, topic=self.topic2, exchange=self.exchange2) def check(pings, expect): self.assertEqual(len(expect), len(pings)) for a in expect: self.assertIn(a, pings) if self.expect_either: check(endpoint1.pings + endpoint2.pings, self.expect_either) else: check(endpoint1.pings, self.expect1) check(endpoint2.pings, self.expect2) TestMultipleServers.generate_scenarios() class TestServerLocking(test_utils.BaseTestCase): def setUp(self): super(TestServerLocking, self).setUp(conf=cfg.ConfigOpts()) def _logmethod(name): def method(self, *args, **kwargs): with self._lock: self._calls.append(name) return method executors = [] class FakeExecutor(object): def __init__(self, *args, **kwargs): self._lock = threading.Lock() self._calls = [] executors.append(self) submit = _logmethod('submit') shutdown = _logmethod('shutdown') self.executors = executors class MessageHandlingServerImpl(oslo_messaging.MessageHandlingServer): def _create_listener(self): return mock.Mock() def _process_incoming(self, incoming): pass self.server = MessageHandlingServerImpl(mock.Mock(), mock.Mock()) self.server._executor_cls = FakeExecutor def test_start_stop_wait(self): # Test a simple execution of start, stop, wait in order eventlet.spawn(self.server.start) self.server.stop() self.server.wait() self.assertEqual(1, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertTrue(self.server.listener.cleanup.called) def test_reversed_order(self): # Test that if we call wait, stop, start, these will be correctly # reordered eventlet.spawn(self.server.wait) # This is non-deterministic, but there's not a great deal we can do # about that eventlet.sleep(0) eventlet.spawn(self.server.stop) eventlet.sleep(0) eventlet.spawn(self.server.start) self.server.wait() self.assertEqual(1, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) def test_wait_for_running_task(self): # Test that if 2 threads call a method simultaneously, both will wait, # but only 1 will call the underlying executor method. start_event = eventletutils.Event() finish_event = eventletutils.Event() running_event = eventletutils.Event() done_event = eventletutils.Event() _runner = [None] class SteppingFakeExecutor(self.server._executor_cls): def __init__(self, *args, **kwargs): # Tell the test which thread won the race _runner[0] = eventlet.getcurrent() running_event.set() start_event.wait() super(SteppingFakeExecutor, self).__init__(*args, **kwargs) done_event.set() finish_event.wait() self.server._executor_cls = SteppingFakeExecutor start1 = eventlet.spawn(self.server.start) start2 = eventlet.spawn(self.server.start) # Wait until one of the threads starts running running_event.wait() runner = _runner[0] waiter = start2 if runner == start1 else start2 waiter_finished = eventletutils.Event() waiter.link(lambda _: waiter_finished.set()) # At this point, runner is running start(), and waiter() is waiting for # it to complete. runner has not yet logged anything. self.assertEqual(0, len(self.executors)) self.assertFalse(waiter_finished.is_set()) # Let the runner log the call start_event.set() done_event.wait() # We haven't signalled completion yet, so submit shouldn't have run self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) self.assertFalse(waiter_finished.is_set()) # Let the runner complete finish_event.set() waiter.wait() runner.wait() # Check that both threads have finished, start was only called once, # and execute ran self.assertTrue(waiter_finished.is_set()) self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) def test_start_stop_wait_stop_wait(self): # Test that we behave correctly when calling stop/wait more than once. # Subsequent calls should be noops. self.server.start() self.server.stop() self.server.wait() self.server.stop() self.server.wait() self.assertEqual(len(self.executors), 1) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertTrue(self.server.listener.cleanup.called) def test_state_wrapping(self): # Test that we behave correctly if a thread waits, and the server state # has wrapped when it it next scheduled # Ensure that if 2 threads wait for the completion of 'start', the # first will wait until complete_event is signalled, but the second # will continue complete_event = eventletutils.Event() complete_waiting_callback = eventletutils.Event() start_state = self.server._states['start'] old_wait_for_completion = start_state.wait_for_completion waited = [False] def new_wait_for_completion(*args, **kwargs): if not waited[0]: waited[0] = True complete_waiting_callback.set() complete_event.wait() old_wait_for_completion(*args, **kwargs) start_state.wait_for_completion = new_wait_for_completion # thread1 will wait for start to complete until we signal it thread1 = eventlet.spawn(self.server.stop) thread1_finished = eventletutils.Event() thread1.link(lambda _: thread1_finished.set()) self.server.start() complete_waiting_callback.wait() # The server should have started, but stop should not have been called self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) self.assertFalse(thread1_finished.is_set()) self.server.stop() self.server.wait() # We should have gone through all the states, and thread1 should still # be waiting self.assertEqual(1, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertFalse(thread1_finished.is_set()) # Start again self.server.start() # We should now record 4 executors (2 for each server) self.assertEqual(2, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertEqual([], self.executors[1]._calls) self.assertFalse(thread1_finished.is_set()) # Allow thread1 to complete complete_event.set() thread1_finished.wait() # thread1 should now have finished, and stop should not have been # called again on either the first or second executor self.assertEqual(2, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertEqual([], self.executors[1]._calls) self.assertTrue(thread1_finished.is_set()) @mock.patch.object(server_module, 'DEFAULT_LOG_AFTER', 1) @mock.patch.object(server_module, 'LOG') def test_logging(self, mock_log): # Test that we generate a log message if we wait longer than # DEFAULT_LOG_AFTER log_event = eventletutils.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill() @mock.patch.object(server_module, 'LOG') def test_logging_explicit_wait(self, mock_log): # Test that we generate a log message if we wait longer than # the number of seconds passed to log_after log_event = eventletutils.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop, log_after=1) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill() @mock.patch.object(server_module, 'LOG') def test_logging_with_timeout(self, mock_log): # Test that we log a message after log_after seconds if we've also # specified an absolute timeout log_event = eventletutils.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop, log_after=1, timeout=2) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill() def test_timeout_wait(self): # Test that we will eventually timeout when passing the timeout option # if a preceding condition is not satisfied. self.assertRaises(server_module.TaskTimeout, self.server.stop, timeout=1) def test_timeout_running(self): # Test that we will eventually timeout if we're waiting for another # thread to complete this task # Start the server, which will also instantiate an executor self.server.start() self.server.stop() shutdown_called = eventletutils.Event() # Patch the executor's stop method to be very slow def slow_shutdown(wait): shutdown_called.set() eventlet.sleep(10) self.executors[0].shutdown = slow_shutdown # Call wait in a new thread thread = eventlet.spawn(self.server.wait) # Wait until the thread is in the slow stop method shutdown_called.wait() # Call wait again in the main thread with a timeout self.assertRaises(server_module.TaskTimeout, self.server.wait, timeout=1) thread.kill() @mock.patch.object(server_module, 'LOG') def test_log_after_zero(self, mock_log): # Test that we do not log a message after DEFAULT_LOG_AFTER if the # caller gave log_after=1 # Call stop without calling start. self.assertRaises(server_module.TaskTimeout, self.server.stop, log_after=0, timeout=2) # We timed out. Ensure we didn't log anything. self.assertFalse(mock_log.warning.called) class TestRPCExposeDecorator(test_utils.BaseTestCase): def foo(self): pass @rpc.expose def bar(self): """bar docstring""" pass def test_undecorated(self): self.assertRaises(AttributeError, lambda: self.foo.exposed) def test_decorated(self): self.assertEqual(True, self.bar.exposed) self.assertEqual("""bar docstring""", self.bar.__doc__) self.assertEqual('bar', self.bar.__name__) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_config_opts_proxy.py0000664000175000017500000000663100000000000026125 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import types from oslo_messaging._drivers import common as drv_cmn from oslo_messaging.tests import utils as test_utils from oslo_messaging import transport class TestConfigOptsProxy(test_utils.BaseTestCase): def test_rabbit(self): group = 'oslo_messaging_rabbit' self.config(rabbit_retry_interval=1, rabbit_qos_prefetch_count=0, group=group) dummy_opts = [cfg.ListOpt('list_str', item_type=types.String(), default=[]), cfg.ListOpt('list_int', item_type=types.Integer(), default=[]), cfg.DictOpt('dict', default={}), cfg.BoolOpt('bool', default=False), cfg.StrOpt('str', default='default')] self.conf.register_opts(dummy_opts, group=group) url = transport.TransportURL.parse( self.conf, "rabbit:///" "?rabbit_qos_prefetch_count=2" "&list_str=1&list_str=2&list_str=3" "&list_int=1&list_int=2&list_int=3" "&dict=x:1&dict=y:2&dict=z:3" "&bool=True" ) conf = drv_cmn.ConfigOptsProxy(self.conf, url, group) self.assertRaises(cfg.NoSuchOptError, conf.__getattr__, 'unknown_group') self.assertIsInstance(getattr(conf, group), conf.GroupAttrProxy) self.assertEqual(1, conf.oslo_messaging_rabbit.rabbit_retry_interval) self.assertEqual(2, conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count) self.assertEqual(['1', '2', '3'], conf.oslo_messaging_rabbit.list_str) self.assertEqual([1, 2, 3], conf.oslo_messaging_rabbit.list_int) self.assertEqual({'x': '1', 'y': '2', 'z': '3'}, conf.oslo_messaging_rabbit.dict) self.assertEqual(True, conf.oslo_messaging_rabbit.bool) self.assertEqual('default', conf.oslo_messaging_rabbit.str) def test_not_in_group(self): group = 'oslo_messaging_rabbit' url = transport.TransportURL.parse( self.conf, "rabbit:///?unknown_opt=4" ) self.assertRaises(cfg.NoSuchOptError, drv_cmn.ConfigOptsProxy, self.conf, url, group) def test_invalid_value(self): group = 'oslo_messaging_rabbit' self.config(kombu_reconnect_delay=1.0, group=group) url = transport.TransportURL.parse( self.conf, "rabbit:///?kombu_reconnect_delay=invalid_value" ) self.assertRaises(ValueError, drv_cmn.ConfigOptsProxy, self.conf, url, group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_exception_serialization.py0000664000175000017500000002546500000000000027313 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_serialization import jsonutils import testscenarios import oslo_messaging from oslo_messaging._drivers import common as exceptions from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios EXCEPTIONS_MODULE = 'builtins' OTHER_EXCEPTIONS_MODULE = 'exceptions' class NovaStyleException(Exception): format = 'I am Nova' def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: message = self.format % kwargs super(NovaStyleException, self).__init__(message) class KwargsStyleException(NovaStyleException): format = 'I am %(who)s' def add_remote_postfix(ex): ex_type = type(ex) message = str(ex) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), {'__str__': str_override, '__unicode__': str_override}) new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__ try: ex.__class__ = new_ex_type except TypeError: ex.args = (message,) + ex.args[1:] return ex class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase): _add_remote = [ ('add_remote', dict(add_remote=True)), ('do_not_add_remote', dict(add_remote=False)), ] _exception_types = [ ('bog_standard', dict(cls=Exception, args=['test'], kwargs={}, clsname='Exception', modname=EXCEPTIONS_MODULE, msg='test')), ('nova_style', dict(cls=NovaStyleException, args=[], kwargs={}, clsname='NovaStyleException', modname=__name__, msg='I am Nova')), ('nova_style_with_msg', dict(cls=NovaStyleException, args=['testing'], kwargs={}, clsname='NovaStyleException', modname=__name__, msg='testing')), ('kwargs_style', dict(cls=KwargsStyleException, args=[], kwargs={'who': 'Oslo'}, clsname='KwargsStyleException', modname=__name__, msg='I am Oslo')), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._add_remote, cls._exception_types) def test_serialize_remote_exception(self): try: try: raise self.cls(*self.args, **self.kwargs) except Exception as ex: # Note: in Python 3 ex variable will be cleared at the end of # the except clause, so explicitly make an extra copy of it cls_error = ex if self.add_remote: ex = add_remote_postfix(ex) raise ex except Exception: exc_info = sys.exc_info() serialized = exceptions.serialize_remote_exception(exc_info) failure = jsonutils.loads(serialized) self.assertEqual(self.clsname, failure['class'], failure) self.assertEqual(self.modname, failure['module']) self.assertEqual(self.msg, failure['message']) self.assertEqual([self.msg], failure['args']) self.assertEqual(self.kwargs, failure['kwargs']) # Note: _Remote prefix not stripped from tracebacks tb = cls_error.__class__.__name__ + ': ' + self.msg self.assertIn(tb, ''.join(failure['tb'])) SerializeRemoteExceptionTestCase.generate_scenarios() class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase): _standard_allowed = [__name__] scenarios = [ ('bog_standard', dict(allowed=_standard_allowed, clsname='Exception', modname=EXCEPTIONS_MODULE, cls=Exception, args=['test'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='Exception', remote_args=('test\ntraceback\ntraceback\n', ), remote_kwargs={})), ('different_python_versions', dict(allowed=_standard_allowed, clsname='Exception', modname=OTHER_EXCEPTIONS_MODULE, cls=Exception, args=['test'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='Exception', remote_args=('test\ntraceback\ntraceback\n', ), remote_kwargs={})), ('nova_style', dict(allowed=_standard_allowed, clsname='NovaStyleException', modname=__name__, cls=NovaStyleException, args=[], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='NovaStyleException_Remote', remote_args=('I am Nova', ), remote_kwargs={})), ('nova_style_with_msg', dict(allowed=_standard_allowed, clsname='NovaStyleException', modname=__name__, cls=NovaStyleException, args=['testing'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='NovaStyleException_Remote', remote_args=('testing', ), remote_kwargs={})), ('kwargs_style', dict(allowed=_standard_allowed, clsname='KwargsStyleException', modname=__name__, cls=KwargsStyleException, args=[], kwargs={'who': 'Oslo'}, str='test\ntraceback\ntraceback\n', remote_name='KwargsStyleException_Remote', remote_args=('I am Oslo', ), remote_kwargs={})), ('not_allowed', dict(allowed=[], clsname='NovaStyleException', modname=__name__, cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: NovaStyleException test\n" "[%r]." % 'traceback\ntraceback\n'), msg=("Remote error: NovaStyleException test\n" "[%r]." % 'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'NovaStyleException', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_module', dict(allowed=['notexist'], clsname='Exception', modname='notexist', cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: Exception test\n" "[%r]." % 'traceback\ntraceback\n'), msg=("Remote error: Exception test\n" "[%r]." % 'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'Exception', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_exception', dict(allowed=[], clsname='FarcicalError', modname=EXCEPTIONS_MODULE, cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: FarcicalError test\n" "[%r]." % 'traceback\ntraceback\n'), msg=("Remote error: FarcicalError test\n" "[%r]." % 'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'FarcicalError', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_kwarg', dict(allowed=[], clsname='Exception', modname=EXCEPTIONS_MODULE, cls=oslo_messaging.RemoteError, args=[], kwargs={'foobar': 'blaa'}, str=("Remote error: Exception test\n" "[%r]." % 'traceback\ntraceback\n'), msg=("Remote error: Exception test\n" "[%r]." % 'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'Exception', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('system_exit', dict(allowed=[], clsname='SystemExit', modname=EXCEPTIONS_MODULE, cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: SystemExit test\n" "[%r]." % 'traceback\ntraceback\n'), msg=("Remote error: SystemExit test\n" "[%r]." % 'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'SystemExit', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ] def test_deserialize_remote_exception(self): failure = { 'class': self.clsname, 'module': self.modname, 'message': 'test', 'tb': ['traceback\ntraceback\n'], 'args': self.args, 'kwargs': self.kwargs, } serialized = jsonutils.dumps(failure) ex = exceptions.deserialize_remote_exception(serialized, self.allowed) self.assertIsInstance(ex, self.cls) self.assertEqual(self.remote_name, ex.__class__.__name__) self.assertEqual(self.str, str(ex)) if hasattr(self, 'msg'): self.assertEqual(self.msg, str(ex)) self.assertEqual((self.msg,) + self.remote_args, ex.args) else: self.assertEqual(self.remote_args, ex.args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_expected_exceptions.py0000664000175000017500000000412500000000000026410 0ustar00zuulzuul00000000000000 # Copyright 2012 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging from oslo_messaging.tests import utils as test_utils class TestExpectedExceptions(test_utils.BaseTestCase): def test_exception(self): e = None try: try: raise ValueError() except Exception: raise oslo_messaging.ExpectedException() except oslo_messaging.ExpectedException as e: # noqa: F841 self.assertIsInstance(e, oslo_messaging.ExpectedException) self.assertTrue(hasattr(e, 'exc_info')) self.assertIsInstance(e.exc_info[1], ValueError) def test_decorator_expected(self): class FooException(Exception): pass @oslo_messaging.expected_exceptions(FooException) def naughty(): raise FooException() self.assertRaises(oslo_messaging.ExpectedException, naughty) def test_decorator_expected_subclass(self): class FooException(Exception): pass class BarException(FooException): pass @oslo_messaging.expected_exceptions(FooException) def naughty(): raise BarException() self.assertRaises(oslo_messaging.ExpectedException, naughty) def test_decorator_unexpected(self): class FooException(Exception): pass @oslo_messaging.expected_exceptions(FooException) def really_naughty(): raise ValueError() self.assertRaises(ValueError, really_naughty) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_fixture.py0000664000175000017500000000650100000000000024034 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_messaging import conffixture from oslo_messaging.tests import utils as test_utils class TestConfFixture(test_utils.BaseTestCase): def test_fixture_wraps_set_override(self): conf = self.messaging_conf.conf self.assertIsNotNone(conf.set_override.wrapped) self.messaging_conf._teardown_decorator() self.assertFalse(hasattr(conf.set_override, 'wrapped')) def test_fixture_wraps_clear_override(self): conf = self.messaging_conf.conf self.assertIsNotNone(conf.clear_override.wrapped) self.messaging_conf._teardown_decorator() self.assertFalse(hasattr(conf.clear_override, 'wrapped')) def test_fixture_setup_teardown_decorator(self): conf = cfg.ConfigOpts() self.assertFalse(hasattr(conf.set_override, 'wrapped')) self.assertFalse(hasattr(conf.clear_override, 'wrapped')) fixture = conffixture.ConfFixture(conf) self.assertFalse(hasattr(conf.set_override, 'wrapped')) self.assertFalse(hasattr(conf.clear_override, 'wrapped')) self.useFixture(fixture) self.assertTrue(hasattr(conf.set_override, 'wrapped')) self.assertTrue(hasattr(conf.clear_override, 'wrapped')) fixture._teardown_decorator() self.assertFalse(hasattr(conf.set_override, 'wrapped')) self.assertFalse(hasattr(conf.clear_override, 'wrapped')) def test_fixture_properties(self): conf = self.messaging_conf.conf self.messaging_conf.transport_url = 'fake:/vhost' self.assertEqual('fake:/vhost', self.messaging_conf.transport_url) self.assertEqual('fake:/vhost', conf.transport_url) def test_old_notifications_config_override(self): conf = self.messaging_conf.conf conf.set_override( "notification_driver", ["messaging"]) conf.set_override( "notification_transport_url", "http://xyz") conf.set_override( "notification_topics", ['topic1']) self.assertEqual(["messaging"], conf.oslo_messaging_notifications.driver) self.assertEqual("http://xyz", conf.oslo_messaging_notifications.transport_url) self.assertEqual(['topic1'], conf.oslo_messaging_notifications.topics) conf.clear_override("notification_driver") conf.clear_override("notification_transport_url") conf.clear_override("notification_topics") self.assertEqual([], conf.oslo_messaging_notifications.driver) self.assertIsNone(conf.oslo_messaging_notifications.transport_url) self.assertEqual(['notifications'], conf.oslo_messaging_notifications.topics) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_opts.py0000664000175000017500000000432500000000000023335 0ustar00zuulzuul00000000000000 # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import stevedore import testtools from oslo_messaging import server try: from oslo_messaging import opts except ImportError: opts = None from oslo_messaging.tests import utils as test_utils @testtools.skipIf(opts is None, "Options not importable") class OptsTestCase(test_utils.BaseTestCase): def _test_list_opts(self, result): self.assertEqual(5, len(result)) groups = [g for (g, l) in result] self.assertIn(None, groups) self.assertIn('oslo_messaging_amqp', groups) self.assertIn('oslo_messaging_notifications', groups) self.assertIn('oslo_messaging_rabbit', groups) self.assertIn('oslo_messaging_kafka', groups) def test_list_opts(self): self._test_list_opts(opts.list_opts()) def test_entry_point(self): result = None for ext in stevedore.ExtensionManager('oslo.config.opts', invoke_on_load=True): if ext.name == "oslo.messaging": result = ext.obj break self.assertIsNotNone(result) self._test_list_opts(result) def test_defaults(self): transport = mock.Mock() transport.conf = self.conf class MessageHandlingServerImpl(server.MessageHandlingServer): def _create_listener(self): pass def _process_incoming(self, incoming): pass MessageHandlingServerImpl(transport, mock.Mock()) opts.set_defaults(self.conf, executor_thread_pool_size=100) self.assertEqual(100, self.conf.executor_thread_pool_size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_target.py0000664000175000017500000001546200000000000023642 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TargetConstructorTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(kwargs=dict())), ('exchange', dict(kwargs=dict(exchange='testexchange'))), ('topic', dict(kwargs=dict(topic='testtopic'))), ('namespace', dict(kwargs=dict(namespace='testnamespace'))), ('version', dict(kwargs=dict(version='3.4'))), ('server', dict(kwargs=dict(server='testserver'))), ('fanout', dict(kwargs=dict(fanout=True))), ] def test_constructor(self): target = oslo_messaging.Target(**self.kwargs) for k in self.kwargs: self.assertEqual(self.kwargs[k], getattr(target, k)) for k in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: if k in self.kwargs: continue self.assertIsNone(getattr(target, k)) class TargetCallableTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(attrs=dict(), kwargs=dict(), vals=dict())), ('exchange_attr', dict(attrs=dict(exchange='testexchange'), kwargs=dict(), vals=dict(exchange='testexchange'))), ('exchange_arg', dict(attrs=dict(), kwargs=dict(exchange='testexchange'), vals=dict(exchange='testexchange'))), ('topic_attr', dict(attrs=dict(topic='testtopic'), kwargs=dict(), vals=dict(topic='testtopic'))), ('topic_arg', dict(attrs=dict(), kwargs=dict(topic='testtopic'), vals=dict(topic='testtopic'))), ('namespace_attr', dict(attrs=dict(namespace='testnamespace'), kwargs=dict(), vals=dict(namespace='testnamespace'))), ('namespace_arg', dict(attrs=dict(), kwargs=dict(namespace='testnamespace'), vals=dict(namespace='testnamespace'))), ('version_attr', dict(attrs=dict(version='3.4'), kwargs=dict(), vals=dict(version='3.4'))), ('version_arg', dict(attrs=dict(), kwargs=dict(version='3.4'), vals=dict(version='3.4'))), ('server_attr', dict(attrs=dict(server='testserver'), kwargs=dict(), vals=dict(server='testserver'))), ('server_arg', dict(attrs=dict(), kwargs=dict(server='testserver'), vals=dict(server='testserver'))), ('fanout_attr', dict(attrs=dict(fanout=True), kwargs=dict(), vals=dict(fanout=True))), ('fanout_arg', dict(attrs=dict(), kwargs=dict(fanout=True), vals=dict(fanout=True))), ] def test_callable(self): target = oslo_messaging.Target(**self.attrs) target = target(**self.kwargs) for k in self.vals: self.assertEqual(self.vals[k], getattr(target, k)) for k in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: if k in self.vals: continue self.assertIsNone(getattr(target, k)) class TargetReprTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(kwargs=dict(), repr='')), ('exchange', dict(kwargs=dict(exchange='testexchange'), repr='exchange=testexchange')), ('topic', dict(kwargs=dict(topic='testtopic'), repr='topic=testtopic')), ('namespace', dict(kwargs=dict(namespace='testnamespace'), repr='namespace=testnamespace')), ('version', dict(kwargs=dict(version='3.4'), repr='version=3.4')), ('server', dict(kwargs=dict(server='testserver'), repr='server=testserver')), ('fanout', dict(kwargs=dict(fanout=True), repr='fanout=True')), ('exchange_and_fanout', dict(kwargs=dict(exchange='testexchange', fanout=True), repr='exchange=testexchange, ' 'fanout=True')), ] def test_repr(self): target = oslo_messaging.Target(**self.kwargs) self.assertEqual('', str(target)) _notset = object() class EqualityTestCase(test_utils.BaseTestCase): @classmethod def generate_scenarios(cls): attr = [ ('exchange', dict(attr='exchange')), ('topic', dict(attr='topic')), ('namespace', dict(attr='namespace')), ('version', dict(attr='version')), ('server', dict(attr='server')), ('fanout', dict(attr='fanout')), ] a = [ ('a_notset', dict(a_value=_notset)), ('a_none', dict(a_value=None)), ('a_empty', dict(a_value='')), ('a_foo', dict(a_value='foo')), ('a_bar', dict(a_value='bar')), ] b = [ ('b_notset', dict(b_value=_notset)), ('b_none', dict(b_value=None)), ('b_empty', dict(b_value='')), ('b_foo', dict(b_value='foo')), ('b_bar', dict(b_value='bar')), ] cls.scenarios = testscenarios.multiply_scenarios(attr, a, b) for s in cls.scenarios: s[1]['equals'] = (s[1]['a_value'] == s[1]['b_value']) def test_equality(self): a_kwargs = {self.attr: self.a_value} b_kwargs = {self.attr: self.b_value} a = oslo_messaging.Target(**a_kwargs) b = oslo_messaging.Target(**b_kwargs) if self.equals: self.assertEqual(a, b) self.assertFalse(a != b) else: self.assertNotEqual(a, b) self.assertFalse(a == b) EqualityTestCase.generate_scenarios() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_transport.py0000664000175000017500000003216000000000000024402 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from unittest import mock from oslo_config import cfg from stevedore import driver import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils from oslo_messaging import transport load_tests = testscenarios.load_tests_apply_scenarios class _FakeDriver(object): def __init__(self, conf): self.conf = conf def send(self, *args, **kwargs): pass def send_notification(self, *args, **kwargs): pass def listen(self, target, batch_size, batch_timeout): pass class _FakeManager(object): def __init__(self, driver): self.driver = driver class GetTransportTestCase(test_utils.BaseTestCase): scenarios = [ ('default', dict(url=None, transport_url=None, control_exchange=None, allowed=None, expect=dict(backend='rabbit', exchange=None, url='rabbit:', allowed=[]))), ('transport_url', dict(url=None, transport_url='testtransport:', control_exchange=None, allowed=None, expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('url_param', dict(url='testtransport:', transport_url=None, control_exchange=None, allowed=None, expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('control_exchange', dict(url=None, transport_url='testbackend:', control_exchange='testexchange', allowed=None, expect=dict(backend='testbackend', exchange='testexchange', url='testbackend:', allowed=[]))), ('allowed_remote_exmods', dict(url=None, transport_url='testbackend:', control_exchange=None, allowed=['foo', 'bar'], expect=dict(backend='testbackend', exchange=None, url='testbackend:', allowed=['foo', 'bar']))), ] @mock.patch('oslo_messaging.transport.LOG') def test_get_transport(self, fake_logger): self.messaging_conf.reset() self.config(control_exchange=self.control_exchange) if self.transport_url: self.config(transport_url=self.transport_url) driver.DriverManager = mock.Mock() invoke_args = [self.conf, oslo_messaging.TransportURL.parse(self.conf, self.expect['url'])] invoke_kwds = dict(default_exchange=self.expect['exchange'], allowed_remote_exmods=self.expect['allowed']) drvr = _FakeDriver(self.conf) driver.DriverManager.return_value = _FakeManager(drvr) kwargs = dict(url=self.url) if self.allowed is not None: kwargs['allowed_remote_exmods'] = self.allowed transport_ = oslo_messaging.get_transport(self.conf, **kwargs) self.assertIsNotNone(transport_) self.assertIs(transport_.conf, self.conf) self.assertIs(transport_._driver, drvr) self.assertIsInstance(transport_, transport.RPCTransport) driver.DriverManager.assert_called_once_with('oslo.messaging.drivers', self.expect['backend'], invoke_on_load=True, invoke_args=invoke_args, invoke_kwds=invoke_kwds) class GetTransportSadPathTestCase(test_utils.BaseTestCase): scenarios = [ ('invalid_transport_url', dict(url=None, transport_url='invalid', ex=dict(cls=oslo_messaging.InvalidTransportURL, msg_contains='No scheme specified', url='invalid'))), ('invalid_url_param', dict(url='invalid', transport_url=None, ex=dict(cls=oslo_messaging.InvalidTransportURL, msg_contains='No scheme specified', url='invalid'))), ('driver_load_failure', dict(url=None, transport_url='testbackend:/', ex=dict(cls=oslo_messaging.DriverLoadFailure, msg_contains='Failed to load', driver='testbackend'))), ] def test_get_transport_sad(self): self.config(transport_url=self.transport_url) ex_cls = self.ex.pop('cls') ex_msg_contains = self.ex.pop('msg_contains') ex = self.assertRaises( ex_cls, oslo_messaging.get_transport, self.conf, url=self.url) self.assertIn(ex_msg_contains, str(ex)) for k, v in self.ex.items(): self.assertTrue(hasattr(ex, k)) self.assertEqual(v, str(getattr(ex, k))) # FIXME(markmc): this could be used elsewhere class _SetDefaultsFixture(fixtures.Fixture): def __init__(self, set_defaults, opts, *names): super(_SetDefaultsFixture, self).__init__() self.set_defaults = set_defaults self.opts = opts self.names = names def setUp(self): super(_SetDefaultsFixture, self).setUp() # FIXME(markmc): this comes from Id5c1f3ba def first(seq, default=None, key=None): if key is None: key = bool return next(filter(key, seq), default) def default(opts, name): return first(opts, key=lambda o: o.name == name).default orig_defaults = {} for n in self.names: orig_defaults[n] = default(self.opts, n) def restore_defaults(): self.set_defaults(**orig_defaults) self.addCleanup(restore_defaults) class TestSetDefaults(test_utils.BaseTestCase): def setUp(self): super(TestSetDefaults, self).setUp(conf=cfg.ConfigOpts()) self.useFixture(_SetDefaultsFixture( oslo_messaging.set_transport_defaults, transport._transport_opts, 'control_exchange')) def test_set_default_control_exchange(self): oslo_messaging.set_transport_defaults(control_exchange='foo') driver.DriverManager = mock.Mock() invoke_kwds = dict(default_exchange='foo', allowed_remote_exmods=[]) driver.DriverManager.return_value = \ _FakeManager(_FakeDriver(self.conf)) oslo_messaging.get_transport(self.conf) driver.DriverManager.assert_called_once_with(mock.ANY, mock.ANY, invoke_on_load=mock.ANY, invoke_args=mock.ANY, invoke_kwds=invoke_kwds) class TestTransportMethodArgs(test_utils.BaseTestCase): _target = oslo_messaging.Target(topic='topic', server='server') def test_send_defaults(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send = mock.Mock() t._send(self._target, 'ctxt', 'message') t._driver.send.assert_called_once_with(self._target, 'ctxt', 'message', wait_for_reply=None, timeout=None, call_monitor_timeout=None, retry=None, transport_options=None) def test_send_all_args(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send = mock.Mock() t._send(self._target, 'ctxt', 'message', wait_for_reply='wait_for_reply', timeout='timeout', call_monitor_timeout='cm_timeout', retry='retry') t._driver.send.\ assert_called_once_with(self._target, 'ctxt', 'message', wait_for_reply='wait_for_reply', timeout='timeout', call_monitor_timeout='cm_timeout', retry='retry', transport_options=None) def test_send_notification(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send_notification = mock.Mock() t._send_notification(self._target, 'ctxt', 'message', version=1.0) t._driver.send_notification.assert_called_once_with(self._target, 'ctxt', 'message', 1.0, retry=None) def test_send_notification_all_args(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send_notification = mock.Mock() t._send_notification(self._target, 'ctxt', 'message', version=1.0, retry=5) t._driver.send_notification.assert_called_once_with(self._target, 'ctxt', 'message', 1.0, retry=5) def test_listen(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.listen = mock.Mock() t._listen(self._target, 1, None) t._driver.listen.assert_called_once_with(self._target, 1, None) class TestTransportUrlCustomisation(test_utils.BaseTestCase): def setUp(self): super(TestTransportUrlCustomisation, self).setUp() def transport_url_parse(url): return transport.TransportURL.parse(self.conf, url) self.url1 = transport_url_parse( "fake:/vhost1/localhost:5672/?x=1&y=2&z=3") self.url2 = transport_url_parse("fake:/vhost2/localhost:5672/?foo=bar") self.url3 = transport_url_parse( "fake:/vhost1/localhost:5672/?l=1&l=2&l=3") self.url4 = transport_url_parse( "fake:/vhost2/localhost:5672/?d=x:1&d=y:2&d=z:3") self.url5 = transport_url_parse("fake://noport:/?") def test_hash(self): urls = {} urls[self.url1] = self.url1 urls[self.url2] = self.url2 urls[self.url3] = self.url3 urls[self.url4] = self.url4 urls[self.url5] = self.url5 self.assertEqual(3, len(urls)) def test_eq(self): self.assertEqual(self.url1, self.url3) self.assertEqual(self.url2, self.url4) self.assertNotEqual(self.url1, self.url4) def test_query(self): self.assertEqual({'x': '1', 'y': '2', 'z': '3'}, self.url1.query) self.assertEqual({'foo': 'bar'}, self.url2.query) self.assertEqual({'l': '1,2,3'}, self.url3.query) self.assertEqual({'d': 'x:1,y:2,z:3'}, self.url4.query) def test_noport(self): self.assertIsNone(self.url5.hosts[0].port) class TestTransportHostCustomisation(test_utils.BaseTestCase): def setUp(self): super(TestTransportHostCustomisation, self).setUp() self.host1 = transport.TransportHost("host1", 5662, "user", "pass") self.host2 = transport.TransportHost("host1", 5662, "user", "pass") self.host3 = transport.TransportHost("host1", 5663, "user", "pass") self.host4 = transport.TransportHost("host1", 5662, "user2", "pass") self.host5 = transport.TransportHost("host1", 5662, "user", "pass2") self.host6 = transport.TransportHost("host2", 5662, "user", "pass") def test_hash(self): hosts = {} hosts[self.host1] = self.host1 hosts[self.host2] = self.host2 hosts[self.host3] = self.host3 hosts[self.host4] = self.host4 hosts[self.host5] = self.host5 hosts[self.host6] = self.host6 self.assertEqual(5, len(hosts)) def test_eq(self): self.assertEqual(self.host1, self.host2) self.assertNotEqual(self.host1, self.host3) self.assertNotEqual(self.host1, self.host4) self.assertNotEqual(self.host1, self.host5) self.assertNotEqual(self.host1, self.host6) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_urls.py0000664000175000017500000002014300000000000023331 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestParseURL(test_utils.BaseTestCase): scenarios = [ ('transport', dict(url='foo:', expect=dict(transport='foo'))), ('virtual_host_slash', dict(url='foo:////', expect=dict(transport='foo', virtual_host='/'))), ('virtual_host', dict(url='foo:///bar', expect=dict(transport='foo', virtual_host='bar'))), ('host', dict(url='foo://host/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host'), ]))), ('ipv6_host', dict(url='foo://[ffff::1]/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1'), ]))), ('port', dict(url='foo://host:1234/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234), ]))), ('ipv6_port', dict(url='foo://[ffff::1]:1234/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1', port=1234), ]))), ('username', dict(url='foo://u@host:1234/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234, username='u'), ]))), ('password', dict(url='foo://u:p@host:1234/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234, username='u', password='p'), ]))), ('creds_no_host', dict(url='foo://u:p@/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(username='u', password='p'), ]))), ('multi_host', dict(url='foo://u:p@host1:1234,host2:4321/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', port=1234, username='u', password='p'), dict(host='host2', port=4321), ]))), ('multi_host_partial_creds', dict(url='foo://u:p@host1,host2/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', username='u', password='p'), dict(host='host2'), ]))), ('multi_creds', dict(url='foo://u1:p1@host1:1234,u2:p2@host2:4321/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', port=1234, username='u1', password='p1'), dict(host='host2', port=4321, username='u2', password='p2'), ]))), ('multi_creds_ipv6', dict(url='foo://u1:p1@[ffff::1]:1234,u2:p2@[ffff::2]:4321/bar', expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1', port=1234, username='u1', password='p1'), dict(host='ffff::2', port=4321, username='u2', password='p2'), ]))), ('quoting', dict(url='foo://u%24:p%26@host:1234/%24', expect=dict(transport='foo', virtual_host='$', hosts=[ dict(host='host', port=1234, username='u$', password='p&'), ]))), ] def test_parse_url(self): url = oslo_messaging.TransportURL.parse(self.conf, self.url) hosts = [] for host in self.expect.get('hosts', []): hosts.append(oslo_messaging.TransportHost(host.get('host'), host.get('port'), host.get('username'), host.get('password'))) expected = oslo_messaging.TransportURL(self.conf, self.expect.get('transport'), self.expect.get('virtual_host'), hosts) self.assertEqual(expected, url) class TestFormatURL(test_utils.BaseTestCase): scenarios = [ ('transport', dict(transport='testtransport', virtual_host=None, hosts=[], expected='testtransport:///')), ('virtual_host', dict(transport='testtransport', virtual_host='/vhost', hosts=[], expected='testtransport:////vhost')), ('host', dict(transport='testtransport', virtual_host='/', hosts=[ dict(hostname='host', port=10, username='bob', password='secret'), ], expected='testtransport://bob:secret@host:10//')), ('multi_host', dict(transport='testtransport', virtual_host='', hosts=[ dict(hostname='h1', port=1000, username='b1', password='s1'), dict(hostname='h2', port=2000, username='b2', password='s2'), ], expected='testtransport://b1:s1@h1:1000,b2:s2@h2:2000/')), ('quoting', dict(transport='testtransport', virtual_host='/$', hosts=[ dict(hostname='host', port=10, username='b$', password='s&'), ], expected='testtransport://b%24:s%26@host:10//%24')), ] def test_parse_url(self): hosts = [] for host in self.hosts: hosts.append(oslo_messaging.TransportHost(host.get('hostname'), host.get('port'), host.get('username'), host.get('password'))) url = oslo_messaging.TransportURL(self.conf, self.transport, self.virtual_host, hosts) self.assertEqual(self.expected, str(url)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/test_utils.py0000664000175000017500000000747300000000000023517 0ustar00zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers import common from oslo_messaging import _utils as utils from oslo_messaging.tests import utils as test_utils from unittest import mock class VersionIsCompatibleTestCase(test_utils.BaseTestCase): def test_version_is_compatible_same(self): self.assertTrue(utils.version_is_compatible('1.23', '1.23')) def test_version_is_compatible_newer_minor(self): self.assertTrue(utils.version_is_compatible('1.24', '1.23')) def test_version_is_compatible_older_minor(self): self.assertFalse(utils.version_is_compatible('1.22', '1.23')) def test_version_is_compatible_major_difference1(self): self.assertFalse(utils.version_is_compatible('2.23', '1.23')) def test_version_is_compatible_major_difference2(self): self.assertFalse(utils.version_is_compatible('1.23', '2.23')) def test_version_is_compatible_newer_rev(self): self.assertFalse(utils.version_is_compatible('1.23', '1.23.1')) def test_version_is_compatible_newer_rev_both(self): self.assertFalse(utils.version_is_compatible('1.23.1', '1.23.2')) def test_version_is_compatible_older_rev_both(self): self.assertTrue(utils.version_is_compatible('1.23.2', '1.23.1')) def test_version_is_compatible_older_rev(self): self.assertTrue(utils.version_is_compatible('1.24', '1.23.1')) def test_version_is_compatible_no_rev_is_zero(self): self.assertTrue(utils.version_is_compatible('1.23.0', '1.23')) class TimerTestCase(test_utils.BaseTestCase): def test_no_duration_no_callback(self): t = common.DecayingTimer() t.start() remaining = t.check_return() self.assertIsNone(remaining) def test_no_duration_but_maximum(self): t = common.DecayingTimer() t.start() remaining = t.check_return(maximum=2) self.assertEqual(2, remaining) @mock.patch('oslo_utils.timeutils.now') def test_duration_expired_no_callback(self, now): now.return_value = 0 t = common.DecayingTimer(2) t.start() now.return_value = 3 remaining = t.check_return() self.assertEqual(0, remaining) @mock.patch('oslo_utils.timeutils.now') def test_duration_callback(self, now): now.return_value = 0 t = common.DecayingTimer(2) t.start() now.return_value = 3 callback = mock.Mock() remaining = t.check_return(callback) self.assertEqual(0, remaining) callback.assert_called_once_with() @mock.patch('oslo_utils.timeutils.now') def test_duration_callback_with_args(self, now): now.return_value = 0 t = common.DecayingTimer(2) t.start() now.return_value = 3 callback = mock.Mock() remaining = t.check_return(callback, 1, a='b') self.assertEqual(0, remaining) callback.assert_called_once_with(1, a='b') @mock.patch('oslo_utils.timeutils.now') def test_reset(self, now): now.return_value = 0 t = common.DecayingTimer(3) t.start() now.return_value = 1 remaining = t.check_return() self.assertEqual(2, remaining) t.restart() remaining = t.check_return() self.assertEqual(3, remaining) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/tests/utils.py0000664000175000017500000000566600000000000022462 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import threading from oslo_config import cfg from oslo_context.context import RequestContext from oslo_utils import eventletutils from oslotest import base TRUE_VALUES = ('true', '1', 'yes') class BaseTestCase(base.BaseTestCase): def setUp(self, conf=cfg.CONF): super(BaseTestCase, self).setUp() from oslo_messaging import conffixture self.messaging_conf = self.useFixture(conffixture.ConfFixture(conf)) self.messaging_conf.transport_url = 'fake:/' self.conf = self.messaging_conf.conf self.conf.project = 'project' self.conf.prog = 'prog' def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ group = kw.pop('group', None) for k, v in kw.items(): self.conf.set_override(k, v, group) class ServerThreadHelper(threading.Thread): def __init__(self, server): super(ServerThreadHelper, self).__init__() self.daemon = True self._server = server self._stop_event = eventletutils.Event() self._start_event = eventletutils.Event() def start(self): super(ServerThreadHelper, self).start() self._start_event.wait() def run(self): self._server.start() self._start_event.set() self._stop_event.wait() # Check start() does nothing with a running listener self._server.start() self._server.stop() self._server.wait() def stop(self): self._stop_event.set() class TestContext(RequestContext): def redacted_copy(self): # NOTE(JayF): By returning our self here instead of redacting, we can # continue using equality comparisons in unit tests. return self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/transport.py0000664000175000017500000004552600000000000022213 0ustar00zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright (c) 2012 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from debtcollector import removals from oslo_config import cfg from stevedore import driver from urllib import parse from oslo_messaging import exceptions __all__ = [ 'DriverLoadFailure', 'InvalidTransportURL', 'Transport', 'TransportHost', 'TransportURL', 'TransportOptions', 'get_transport', 'set_transport_defaults', ] LOG = logging.getLogger(__name__) _transport_opts = [ cfg.StrOpt('transport_url', default="rabbit://", secret=True, help='The network address and optional user credentials for ' 'connecting to the messaging backend, in URL format. The ' 'expected format is:\n\n' 'driver://[user:pass@]host:port[,[userN:passN@]hostN:' 'portN]/virtual_host?query\n\n' 'Example: rabbit://rabbitmq:password@127.0.0.1:5672//\n\n' 'For full details on the fields in the URL see the ' 'documentation of oslo_messaging.TransportURL at ' 'https://docs.openstack.org/oslo.messaging/latest/' 'reference/transport.html'), cfg.StrOpt('control_exchange', default='openstack', help='The default exchange under which topics are scoped. May ' 'be overridden by an exchange name specified in the ' 'transport_url option.'), ] def set_transport_defaults(control_exchange): """Set defaults for messaging transport configuration options. :param control_exchange: the default exchange under which topics are scoped :type control_exchange: str """ cfg.set_defaults(_transport_opts, control_exchange=control_exchange) class Transport(object): """A messaging transport. This is a mostly opaque handle for an underlying messaging transport driver. RPCs and Notifications may use separate messaging systems that utilize different drivers, access permissions, message delivery, etc. To ensure the correct messaging functionality, the corresponding method should be used to construct a Transport object from transport configuration gleaned from the user's configuration and, optionally, a transport URL. The factory method for RPC Transport objects:: def get_rpc_transport(conf, url=None, allowed_remote_exmods=None) If a transport URL is supplied as a parameter, any transport configuration contained in it takes precedence. If no transport URL is supplied, but there is a transport URL supplied in the user's configuration then that URL will take the place of the URL parameter. The factory method for Notification Transport objects:: def get_notification_transport(conf, url=None, allowed_remote_exmods=None) If no transport URL is provided, the URL in the notifications section of the config file will be used. If that URL is also absent, the same transport as specified in the user's default section will be used. The Transport has a single 'conf' property which is the cfg.ConfigOpts instance used to construct the transport object. """ def __init__(self, driver): self.conf = driver.conf self._driver = driver def _require_driver_features(self, requeue=False): self._driver.require_features(requeue=requeue) def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, call_monitor_timeout=None, retry=None, transport_options=None): if not target.topic: raise exceptions.InvalidTarget('A topic is required to send', target) return self._driver.send(target, ctxt, message, wait_for_reply=wait_for_reply, timeout=timeout, call_monitor_timeout=call_monitor_timeout, retry=retry, transport_options=transport_options) def _send_notification(self, target, ctxt, message, version, retry=None): if not target.topic: raise exceptions.InvalidTarget('A topic is required to send', target) self._driver.send_notification(target, ctxt, message, version, retry=retry) def _listen(self, target, batch_size, batch_timeout): if not (target.topic and target.server): raise exceptions.InvalidTarget('A server\'s target must have ' 'topic and server names specified', target) return self._driver.listen(target, batch_size, batch_timeout) def _listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): for target, priority in targets_and_priorities: if not target.topic: raise exceptions.InvalidTarget('A target must have ' 'topic specified', target) return self._driver.listen_for_notifications( targets_and_priorities, pool, batch_size, batch_timeout ) def cleanup(self): """Release all resources associated with this transport.""" self._driver.cleanup() class RPCTransport(Transport): """Transport object for RPC.""" def __init__(self, driver): super(RPCTransport, self).__init__(driver) class NotificationTransport(Transport): """Transport object for notifications.""" def __init__(self, driver): super(NotificationTransport, self).__init__(driver) class InvalidTransportURL(exceptions.MessagingException): """Raised if transport URL is invalid.""" def __init__(self, url, msg): super(InvalidTransportURL, self).__init__(msg) self.url = url class DriverLoadFailure(exceptions.MessagingException): """Raised if a transport driver can't be loaded.""" def __init__(self, driver, ex): msg = 'Failed to load transport driver "%s": %s' % (driver, ex) super(DriverLoadFailure, self).__init__(msg) self.driver = driver self.ex = ex def _get_transport(conf, url=None, allowed_remote_exmods=None, transport_cls=RPCTransport): allowed_remote_exmods = allowed_remote_exmods or [] conf.register_opts(_transport_opts) if not isinstance(url, TransportURL): url = TransportURL.parse(conf, url) kwargs = dict(default_exchange=conf.control_exchange, allowed_remote_exmods=allowed_remote_exmods) try: mgr = driver.DriverManager('oslo.messaging.drivers', url.transport.split('+')[0], invoke_on_load=True, invoke_args=[conf, url], invoke_kwds=kwargs) except RuntimeError as ex: raise DriverLoadFailure(url.transport, ex) return transport_cls(mgr.driver) @removals.remove( message='use get_rpc_transport or get_notification_transport' ) def get_transport(conf, url=None, allowed_remote_exmods=None): """A factory method for Transport objects. This method will construct a Transport object from transport configuration gleaned from the user's configuration and, optionally, a transport URL. If a transport URL is supplied as a parameter, any transport configuration contained in it takes precedence. If no transport URL is supplied, but there is a transport URL supplied in the user's configuration then that URL will take the place of the URL parameter. In both cases, any configuration not supplied in the transport URL may be taken from individual configuration parameters in the user's configuration. An example transport URL might be:: rabbit://me:passwd@host:5672/virtual_host and can either be passed as a string or a TransportURL object. :param conf: the user configuration :type conf: cfg.ConfigOpts :param url: a transport URL, see :py:class:`transport.TransportURL` :type url: str or TransportURL :param allowed_remote_exmods: a list of modules which a client using this transport will deserialize remote exceptions from :type allowed_remote_exmods: list """ return _get_transport(conf, url, allowed_remote_exmods, transport_cls=RPCTransport) class TransportHost(object): """A host element of a parsed transport URL.""" def __init__(self, hostname=None, port=None, username=None, password=None): self.hostname = hostname self.port = port self.username = username self.password = password def __hash__(self): return hash((self.hostname, self.port, self.username, self.password)) def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['hostname', 'port', 'username', 'password']: v = getattr(self, a) if v: attrs.append((a, repr(v))) values = ', '.join(['%s=%s' % i for i in attrs]) return '' class TransportOptions(object): def __init__(self, at_least_once=False): self._at_least_once = at_least_once @property def at_least_once(self): return self._at_least_once class TransportURL(object): """A parsed transport URL. Transport URLs take the form:: driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query where: driver Specifies the transport driver to use. Typically this is `rabbit` for the RabbitMQ broker. See the documentation for other available transport drivers. [user:pass@]host:port Specifies the network location of the broker. `user` and `pass` are the optional username and password used for authentication with the broker. `user` and `pass` may contain any of the following ASCII characters: * Alphabetic (a-z and A-Z) * Numeric (0-9) * Special characters: & = $ - _ . + ! * ( ) `user` may include at most one `@` character for compatibility with some implementations of SASL. All other characters in `user` and `pass` must be encoded via '%nn' You may include multiple different network locations separated by commas. The client will connect to any of the available locations and will automatically fail over to another should the connection fail. virtual_host Specifies the "virtual host" within the broker. Support for virtual hosts is specific to the message bus used. query Permits passing driver-specific options which override the corresponding values from the configuration file. :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts :param transport: a transport name for example 'rabbit' :type transport: str :param virtual_host: a virtual host path for example '/' :type virtual_host: str :param hosts: a list of TransportHost objects :type hosts: list :param query: a dictionary of URL query parameters :type query: dict """ def __init__(self, conf, transport=None, virtual_host=None, hosts=None, query=None): self.conf = conf self.conf.register_opts(_transport_opts) self.transport = transport self.virtual_host = virtual_host if hosts is None: self.hosts = [] else: self.hosts = hosts if query is None: self.query = {} else: self.query = query def __hash__(self): return hash((tuple(self.hosts), self.transport, self.virtual_host)) def __eq__(self, other): return (self.transport == other.transport and self.virtual_host == other.virtual_host and self.hosts == other.hosts) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['transport', 'virtual_host', 'hosts']: v = getattr(self, a) if v: attrs.append((a, repr(v))) values = ', '.join(['%s=%s' % i for i in attrs]) return '' def __str__(self): netlocs = [] for host in self.hosts: username = host.username password = host.password hostname = host.hostname port = host.port # Starting place for the network location netloc = '' # Build the username and password portion of the transport URL if username is not None or password is not None: if username is not None: netloc += parse.quote(username, '') if password is not None: netloc += ':%s' % parse.quote(password, '') netloc += '@' # Build the network location portion of the transport URL if hostname: if ':' in hostname: netloc += '[%s]' % hostname else: netloc += hostname if port is not None: netloc += ':%d' % port netlocs.append(netloc) # Assemble the transport URL url = '%s://%s/' % (self.transport, ','.join(netlocs)) if self.virtual_host: url += parse.quote(self.virtual_host) if self.query: url += '?' + parse.urlencode(self.query, doseq=True) return url @classmethod def parse(cls, conf, url=None): """Parse a URL as defined by :py:class:`TransportURL` and return a TransportURL object. Assuming a URL takes the form of:: transport://user:pass@host:port[,userN:passN@hostN:portN]/virtual_host?query then parse the URL and return a TransportURL object. Netloc is parsed following the sequence bellow: * It is first split by ',' in order to support multiple hosts * All hosts should be specified with username/password or not at the same time. In case of lack of specification, username and password will be omitted:: user:pass@host1:port1,host2:port2 [ {"username": "user", "password": "pass", "host": "host1:port1"}, {"host": "host2:port2"} ] If the url is not provided conf.transport_url is parsed instead. :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts :param url: The URL to parse :type url: str :returns: A TransportURL """ if not url: conf.register_opts(_transport_opts) url = url or conf.transport_url if not isinstance(url, str): raise InvalidTransportURL(url, 'Wrong URL type') url = parse.urlparse(url) if not url.scheme: raise InvalidTransportURL(url.geturl(), 'No scheme specified') transport = url.scheme query = {} if url.query: for key, values in parse.parse_qs(url.query).items(): query[key] = ','.join(values) virtual_host = None if url.path.startswith('/'): virtual_host = parse.unquote(url.path[1:]) hosts_with_credentials = [] hosts_without_credentials = [] hosts = [] for host in url.netloc.split(','): if not host: continue hostname = host username = password = port = None if '@' in host: username, hostname = host.rsplit('@', 1) if ':' in username: username, password = username.split(':', 1) password = parse.unquote(password) username = parse.unquote(username) if not hostname: hostname = None elif hostname.startswith('['): # Find the closing ']' and extract the hostname host_end = hostname.find(']') if host_end < 0: # NOTE(Vek): Identical to what Python 2.7's # urlparse.urlparse() raises in this case raise ValueError('Invalid IPv6 URL') port_text = hostname[host_end:] hostname = hostname[1:host_end] # Now we need the port; this is compliant with how urlparse # parses the port data port = None if ':' in port_text: port = port_text.split(':', 1)[1] elif ':' in hostname: hostname, port = hostname.split(':', 1) if port == "": port = None if port is not None: port = int(port) if username is None or password is None: hosts_without_credentials.append(hostname) else: hosts_with_credentials.append(hostname) hosts.append(TransportHost(hostname=hostname, port=port, username=username, password=password)) if (len(hosts_with_credentials) > 0 and len(hosts_without_credentials) > 0): LOG.warning("All hosts must be set with username/password or " "not at the same time. Hosts with credentials " "are: %(hosts_with_credentials)s. Hosts without " "credentials are %(hosts_without_credentials)s.", {'hosts_with_credentials': hosts_with_credentials, 'hosts_without_credentials': hosts_without_credentials}) return cls(conf, transport, virtual_host, hosts, query) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/oslo_messaging/version.py0000664000175000017500000000126600000000000021635 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('oslo.messaging') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1106715 oslo.messaging-14.9.0/releasenotes/0000775000175000017500000000000000000000000017251 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1466737 oslo.messaging-14.9.0/releasenotes/notes/0000775000175000017500000000000000000000000020401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/RPC-call-monitoring-7977f047d069769a.yaml0000664000175000017500000000071700000000000027040 0ustar00zuulzuul00000000000000--- prelude: > RPCClient now supports RPC call monitoring for detecting the loss of a server during an RPC call. features: - | RPC call monitoring is a new RPCClient feature. Call monitoring causes the RPC server to periodically send keepalive messages back to the RPCClient while the RPC call is being processed. This can be used for early detection of a server failure without having to wait for the full call timeout to expire. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/add-enable_cancel_on_failover-22ac472b93dd3a23.yaml0000664000175000017500000000024700000000000031335 0ustar00zuulzuul00000000000000--- fixes: - | Add a new option `enable_cancel_on_failover` for rabbitmq driver which when enabled, will cancel consumers when queue appears to be down. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/add-ping-endpoint.yaml0000664000175000017500000000075400000000000024574 0ustar00zuulzuul00000000000000--- features: - | RPC dispatcher can have an extra endpoint named ping. This endpoint can be enabled thanks to a specific configuration parameter: [DEFAULT] rpc_ping_enabled=true # default is false The purpose of this new endpoint is to help operators do a RPC call (a ping) toward a specific RPC callback (e.g. a nova-compute, or a neutron-agent). This is helping a lot for monitoring agents (for example, if agents are deployed in a kubernetes pod). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/add-quorum-control-configurations-beed79811ff97ba2.yaml0000664000175000017500000000031600000000000032441 0ustar00zuulzuul00000000000000--- features: - | Add quorum configuration x-max-in-memory-length, x-max-in-memory-bytes, x-delivery-limit which control the quorum queue memory usage and handle the message poisoning problem ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/add-ssl-support-for-kafka.yaml0000664000175000017500000000036300000000000026167 0ustar00zuulzuul00000000000000--- features: - | | SSL support for oslo_messaging's kafka driver | Next configuration params was added * *ssl_client_cert_file* (default='') * *ssl_client_key_file* (default='') * *ssl_client_key_password* (default='') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml0000664000175000017500000000007100000000000025262 0ustar00zuulzuul00000000000000--- other: - Switch to reno for managing release notes.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/adding_support_for_quorum_queues-3101d055b492289e.yaml0000664000175000017500000000107100000000000032144 0ustar00zuulzuul00000000000000--- features: - | Adding support for quorum queues. Quorum queues are enabled if the ``rabbit_quorum_queue`` parameter is sets (``x-queue-type: quorum``). Setting x-queue-type to quorum means that replicated FIFO queue based on the Raft consensus algorithm will be used. It is available as of RabbitMQ 3.8.0. The quorum queues are durable by default (``amqp_durable_queues``) will be ignored. when enabled the HA queues (``rabbit_ha_queues``) aka mirrored queues should be disabled since the queue can't be both types at the same time ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/allow-transient-no-expire-ce7ae9d8c9d15751.yaml0000664000175000017500000000070300000000000030626 0ustar00zuulzuul00000000000000--- features: - | Allow creation of transient queues with no expire. When an operator rely on rabbitmq policies, there is no point to set the queue TTL in config. When the rabbit_transient_queues_ttl is set to 0, no x-expire parameter will be set on queue declaration. In that specific situation, it is recommended to set an expire value using rabbitmq policies. See https://www.rabbitmq.com/parameters.html#policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/auto-deleted-failed-quorum-ca6a3923c3ed999a.yaml0000664000175000017500000000053300000000000030716 0ustar00zuulzuul00000000000000--- fixes: - | Auto-delete the failed quorum rabbit queues. When rabbit is failing for a specific quorum queue, delete the queue before trying to recreate it. This may happen if the queue is not recoverable on rabbit side. See https://www.rabbitmq.com/quorum-queues.html#availability for more info on this specific case. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/blocking-executor-deprecated-895146c1c3bf2f51.yaml0000664000175000017500000000041300000000000031143 0ustar00zuulzuul00000000000000--- deprecations: - The blocking executor has been deprecated for removal in Rocky. Its usage was never recommended for applications, and it has no test coverage. Applications should choose the appropriate threading model that maps their usage instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/blocking-executor-support-dropped-a3bc74c6825863f0.yaml0000664000175000017500000000047000000000000032217 0ustar00zuulzuul00000000000000--- upgrade: - | The blocking executor has been deprecated for removal in Rocky and support is now dropped in Ussuri. Its usage was never recommended for applications, and it has no test coverage. Applications should choose the appropriate threading model that maps to their usage instead. ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=oslo.messaging-14.9.0/releasenotes/notes/bug-1917645-rabbit-use-retry-parameter-for-notifications-3f7c508ab4437579.yaml 22 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/bug-1917645-rabbit-use-retry-parameter-for-notifications-3f0000664000175000017500000000055400000000000033060 0ustar00zuulzuul00000000000000--- fixes: - | As a fix for `bug 1917645 `_ the rabbit backend is changed to use the ``[oslo_messaging_notifications]retry`` parameter when driver tries to connect to the message bus during notification sending. Before this fix the rabbit backend retried the connection forever blocking the caller thread. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=oslo.messaging-14.9.0/releasenotes/notes/bug-1981093-kafka-dont-log-in-tpool-execute-fa50ceee2d55ebae.yaml 22 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/bug-1981093-kafka-dont-log-in-tpool-execute-fa50ceee2d55eba0000664000175000017500000000055100000000000032354 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1981093 `_] Pulls calls to logging functions out of ``impl_kafka._produce_message``. Since ``_produce_message`` is called through tpool.execute, calling logging functions inside ``_produce_message`` could cause subsequent calls to logging functions to deadlock. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/bug-1993149-e8b231791b65e938.yaml0000664000175000017500000000043700000000000025000 0ustar00zuulzuul00000000000000--- upgrade: - | If kombu_reconnect_delay is specified in the [oslo_messaging_rabbit] section, ensure that it is less than 5.0, the value of ACK_REQUEUE_EVERY_SECONDS_MAX fixes: - | Increased ACK_REQUEUE_EVERY_SECONDS_MAX to resolve issues with rabbitmq HA failover. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/bug-2068630-6ff92f213bc4eca0.yaml0000664000175000017500000000024600000000000025242 0ustar00zuulzuul00000000000000--- fixes: - | Force queue deletion when it is not possible de redeclare a queue. See `bug 2068630 `__ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/bump-amqp-version-due-to-tls-issue-e877b152eb101c15.yaml0000664000175000017500000000122100000000000032110 0ustar00zuulzuul00000000000000--- critical: - | In combination with amqp<=2.4.0, ``oslo.messaging`` was unreliable when configured with TLS (as is generally recommended). Users would see frequent errors such as this:: MessagingTimeout: Timed out waiting for a reply to message ID ae039d1695984addbfaaef032ce4fda3 Such issues would typically lead to downstream service timeouts, with no recourse available other than disabling TLS altogether (see `bug 1800957 `_). The underlying issue is fixed in amqp version 2.4.1, which is now the minimum version that ``oslo.messaging`` requires. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/connection_ttl-2cf0fe6e1ab8c73c.yaml0000664000175000017500000000033700000000000026744 0ustar00zuulzuul00000000000000--- features: - | | Idle connections in the pool will be expired and closed. | Default ttl is 1200s. Next configuration params was added * *conn_pool_ttl* (defaul 1200) * *conn_pool_min_size* (default 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/declare_fallback_durable_exchange-0db677de4fdf1e78.yaml0000664000175000017500000000021000000000000032425 0ustar00zuulzuul00000000000000--- fixes: - | Force creating non durable control exchange when a precondition failed related to config that differ occuring. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/deprecate-ZeroMQ-driver-a8af25aaba867c5b.yaml0000664000175000017500000000042000000000000030311 0ustar00zuulzuul00000000000000--- deprecations: - | ZeroMQ support has been deprecated. The ZeroMQ driver ``zmq://`` has been unmaintained for over a year and no longer functions properly. It is recommended to use one of the maintained backends instead, such as RabbitMQ or AMQP 1.0. ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=oslo.messaging-14.9.0/releasenotes/notes/deprecate-the-option-heartbeat_in_pthread-from-rabbit-driver-5757adb83701caa5.yaml 22 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/deprecate-the-option-heartbeat_in_pthread-from-rabbit-drive0000664000175000017500000000061400000000000034010 0ustar00zuulzuul00000000000000--- deprecations: - | The ``heartbeat_in_pthread`` option from the rabbitmq driver has been deprecated and it is recommended not to use the feature anymore. The option is strongly related to Eventlet but Eventlet will be removed from OpenStack services in a future release. In addition, this feature has never worked with services using eventlet for core service framework. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/deprecated-amqp1-driver-4bf57449bc2b7aad.yaml0000664000175000017500000000043000000000000030244 0ustar00zuulzuul00000000000000--- deprecations: - | The AMQP1 driver is now deprecated. Its related functional tests are also disabled. Neither debian nor ubuntu in the latest releases have any binary built for qpid server, not even 3rd party. Only qpid proton, the client lib, is available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/disable-mandatory-flag-a6210a534f3853f0.yaml0000664000175000017500000000021000000000000027627 0ustar00zuulzuul00000000000000--- upgrade: - | Deprecating the ``direct_mandatory_flag``. It will not be possible to deactivate this functionality anymore. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=oslo.messaging-14.9.0/releasenotes/notes/do-not-run-heartbeat-in-pthread-by-default-42e1299f59b841f8.yaml 22 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/do-not-run-heartbeat-in-pthread-by-default-42e1299f59b841f80000664000175000017500000000054500000000000032450 0ustar00zuulzuul00000000000000--- upgrade: - | The ``[oslo_messaging_rabbit] heartbeat_in_pthread`` config option defaults to ``False`` again. For wsgi applications it is recommended to set this value to ``True`` but enabling it for non-wsgi services may break such service. Please check https://bugs.launchpad.net/oslo.messaging/+bug/1934937 for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/drop-python27-support-5ef2f365d8930483.yaml0000664000175000017500000000017700000000000027532 0ustar00zuulzuul00000000000000--- upgrade: - | Support for Python 2.7 has been dropped. The minimum version of Python now supported is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/enforce_fips_mode-07dd259eb8a73c2b.yaml0000664000175000017500000000051400000000000027230 0ustar00zuulzuul00000000000000--- features: - | Adding a new option, ``[oslo_messaging_rabbit] ssl_enforce_fips_mode``, to the rabbitmq driver to enforce the OpenSSL FIPS mode if supported by the version of Python. security: - | We are now able to enforce the OpenSSL FIPS mode by using ``[oslo_messaging_rabbit] ssl_enforce_fips_mode``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/fix-access_policy-deafult-a6954a147cb002b0.yaml0000664000175000017500000000015700000000000030432 0ustar00zuulzuul00000000000000--- upgrade: - | Change the default value of RPC dispatcher access_policy to DefaultRPCAccessPolicy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/get-rpc-client-0b4aa62160864b29.yaml0000664000175000017500000000054700000000000026153 0ustar00zuulzuul00000000000000--- features: - | Added new ``get_rpc_client`` function to instantiate the RPCClient class deprecations: - | Instantiating the RPCClient class directly is deprecated in favor of using the new ``get_rpc_client`` function to expose a more common API similar to existing functions such as ``get_rpc_server`` and ``get_rpc_transport`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/get-rpc-helpers-cls-8911826ac08aef2a.yaml0000664000175000017500000000026200000000000027260 0ustar00zuulzuul00000000000000--- features: - | The ``get_rpc_transport``, ``get_rpc_server`` and ``get_rpc_client`` helper functions now have support for overriding the class that is instantiated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/get_rpc_transport-4aa3511ad9754a60.yaml0000664000175000017500000000052700000000000027152 0ustar00zuulzuul00000000000000--- features: - | Add get_rpc_transport call to make the API clear for the separation of RPC and Notification messaging backends. deprecations: - | Deprecate get_transport and use get_rpc_transport or get_notification_transport to make the API usage clear for the separation of RPC and Notification messaging backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/handle-missing-queue-553a803f94976be7.yaml0000664000175000017500000000024000000000000027402 0ustar00zuulzuul00000000000000--- features: - | Adding retry strategy based on the mandatory flag. Missing exchanges and queues are now identified separately for logging purposes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/heartbeat-rate-3-7ada9edbccc11a3f.yaml0000664000175000017500000000017600000000000027104 0ustar00zuulzuul00000000000000--- fixes: - | Change heartbeat_rate default from 2 to 3 in order to send AMQP heartbeat frames at correct interval ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/kafka-client-library-change-fe16d5a34550db7f.yaml0000664000175000017500000000076300000000000031006 0ustar00zuulzuul00000000000000--- fixes: - | Threading issues with the kafka-python consumer client were identified and documented. The driver has been updated to integrate the confluent-kafka python library. The confluent-kafka client leverages the high performance librdkafka C client and is safe for multiple thread use. upgrade: - | With the change in the client library used, projects using the Kafka driver should use extras oslo.messaging[kafka] to pull in dependencies for the driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/no-log-if-ignore-errors-e2223b8a646b4c40.yaml0000664000175000017500000000031700000000000027773 0ustar00zuulzuul00000000000000other: - | NoSuchMethod exception will not be logged for special non-existing methods which names end with '_ignore_errors'. Such methods might be used as health probes for openstack services. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=oslo.messaging-14.9.0/releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d672a2.yaml 22 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d0000664000175000017500000000032200000000000033212 0ustar00zuulzuul00000000000000--- deprecations: - The rabbitmq driver option ``DEFAULT/max_retries`` has been deprecated for removal (at a later point in the future) as it did not make logical sense for notifications and for RPC. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/oslo-metrics-support-fe16343a637cc14b.yaml0000664000175000017500000000036100000000000027626 0ustar00zuulzuul00000000000000--- features: - | | Introduce support for sending rpc client metrics to oslo.metrics. | This feature can be enabled by setting a configuration parameter: [oslo_messaging_metrics] metrics_enabled = True # default is false ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/pika-driver-has-been-deprecated-e2407fa53c91fe5c.yaml0000664000175000017500000000045400000000000031561 0ustar00zuulzuul00000000000000--- deprecations: - The pika driver has been deprecated for removal in Rocky. This driver was developed as a replacement for the default rabbit driver. However testing has not shown any appreciable improvement over the default rabbit driver in terms of performance and stability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/rabbit-no-wait-for-ack-9e5de3e1320d7660.yaml0000664000175000017500000000114700000000000027565 0ustar00zuulzuul00000000000000--- other: - | On rabbitmq, in the past, acknownlegement of messages was done within the application callback thread/greenlet. This thread was blocked until the message was ack. In newton, we rewrote the message acknownlegement to ensure we haven't two threads writting the socket at the same times. Now all pendings ack are done by the main thread. They are no more reason to block the application callback thread until the message is ack. Other driver already release the application callback threads before the message is acknownleged. This is also the case for rabbitmq, now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/rabbit_queue_manager-363209285cbbe257.yaml0000664000175000017500000000215300000000000027504 0ustar00zuulzuul00000000000000--- features: - | Add three new options (``use_queue_manager``, ``hostname``, ``processname``) to switch oslo.messaging from random queue names (for reply_q and fanouts) to consistent naming. The default value is False, so oslo.messaging will still use random queue names if nothing is set in configuration file of services. When switching use_queue_manager to True, the uuid4 random string from the queue name is replaced with a combination of hostname, processname and counter. The counter will be kept in shared memory (/dev/shm/x_y_qmanager). This way, when a service using oslo.messaging restarts (e.g. neutron), it will re-create the queues using the same name as the previous run, so no new queues are created and no need for rabbitmq to delete the previous queues. This is extremely useful for operator to debug which queue belong to which server/process. It's also higlhy recommended to enable this feature when using quorum queues for transient (option named ``rabbit_transient_quorum_queue``) to avoid consuming all erlang atoms after some time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/rabbit_quorum_typo-9c06a9fd8d767f53.yaml0000664000175000017500000000032000000000000027447 0ustar00zuulzuul00000000000000--- fixes: - | Fixed typo in variable names ``rabbit_quorum_max_memory_length`` and ``rabbit_quorum_max_memory_bytes``. Please make changes in your config files to correspond correct variables. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/rabbit_transient_quorum-fc3c3f88ead90034.yaml0000664000175000017500000000274100000000000030532 0ustar00zuulzuul00000000000000--- features: - | Add an option to enable transient queues to use quorum. Transient queues in OpenStack are not so transient, they live the whole process lifetime (e.g. until you restart a service, like nova-compute). Transient here means they belong to a specific process, compared to regular queues which may be used by more processes. Usually, transients queues are the "fanout" and "reply" queues. By default, without any rabbitmq policy tuning, they are not durable neither highly available. By enabling quorum for transients, oslo.messaging will declare quorum queues instead of classic on rabbitmq. As a result, those queues will automatically become HA and durable. Note that this may have an impact on your cluster, as rabbit will need more cpu, ram and network bandwith to manage the queues. This was tested at pretty large scale (2k hypervisors) with a cluster of 5 nodes. Also note that the current rabbitmq implementation rely on a fixed number of "erlang atom" (5M by default), and one atom is consumed each time a quorum queue is created with a different name. If your deployment is doing a lot of queue deletion/creation, you may consume all your atoms quicker. When enabling quorum for transients, you may also want to update your rabbitmq policies accordingly (e.g. make sure they apply on quorum). This option will stay disabled by default for now but may become the default in the future. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/removal-deprecated-options-6d4c5db90525c52d.yaml0000664000175000017500000000143000000000000030733 0ustar00zuulzuul00000000000000--- upgrade: - | Remove deprecated configuration options from multiple drivers. * The rpc_backend option from the [DEFAULT] section has been removed. * The AMQP driver has removed the configuration options of allow_insecure_clients, username and password from the [oslo_messaging_amqp] section. * The Kafa driver has removed the configuration options of kafka_default_host and kafka_default_port from the [oslo_messaging_kafka] section. * The Rabbit driver has removed the configuration options of rabbit_host, rabbit_port, rabbit_hosts, rabbit_userid, rabbit_password, rabbit_virtual_host rabbit_max_retries and rabbit_durable_queues from the [oslo_messaging_rabbit] section. Operators must switch to setting the transport_url directive in the [DEFAULT] section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/remove-RequestContextSerializer-234c0496a7e0376b.yaml0000664000175000017500000000021400000000000031657 0ustar00zuulzuul00000000000000--- upgrade: - RequestContextSerializer was deprecated since 4.6, and it isn't used by any other project, so we can remove it safely. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/remove-ZeroMQ-driver-e9e0bbbb7bd4f5e6.yaml0000664000175000017500000000045100000000000027745 0ustar00zuulzuul00000000000000--- prelude: > The ZMQ-based driver for RPC communications has been removed deprecations: - | The driver support for the ZeroMQ messaging library is removed. Users of the oslo.messaging RPC services must use the supported rabbit ("rabbit://...") or amqp1 ("amqp://..." )drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/remove-pika-1bae204ced2521a3.yaml0000664000175000017500000000043500000000000025754 0ustar00zuulzuul00000000000000--- prelude: > The Pika-based driver for RabbitMQ has been removed. upgrade: - | Users of the Pika-based driver must change the prefix of all the transport_url configuration options from "pika://..." to "rabbit://..." to use the default kombu based RabbitMQ driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/reply_q-timeout-e3c3bae636e8bc74.yaml0000664000175000017500000000016300000000000027003 0ustar00zuulzuul00000000000000--- features: - | The name of the ``reply_q`` is now logged when a timeout occurs while waiting for a reply. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/retry-support-07996ef04dda9482.yaml0000664000175000017500000000026300000000000026320 0ustar00zuulzuul00000000000000--- features: - | | Retry support for oslo_messaging_notifications driver | Configuration param 'retry' is added. Default is -1, indefinite * *retry* (default=-1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/run-heartbeat-in-pthread-by-default-28637b41ebf500dc.yaml0000664000175000017500000000054200000000000032321 0ustar00zuulzuul00000000000000--- upgrade: - | The ``[oslo_messaging_rabbit] heartbeat_in_pthread`` config option now defaults to ``True``. Applications will run RabbitMQ heartbeat in a Python thread by default. deprecations: - | ``heartbeat_in_pthread`` has been deprecated and will be removed in a future release. If configured, this option should be unset. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/stream-c3dd31ee98f6bbd7.yaml0000664000175000017500000000014300000000000025223 0ustar00zuulzuul00000000000000--- features: - | Add an option to use stream queues for rabbitmq driver instead of fanouts. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/undeprecate_heartbeat_in_pthread-48e2c1fc008cf208.yaml0000664000175000017500000000030000000000000032201 0ustar00zuulzuul00000000000000--- upgrade: - | We undeprecated the ``heartbeat_in_pthread`` option. This option will remain available to allow customers to run the rabbitmq heartbeat in python thread or not. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/notes/use-extras-for-optional-deps-2a00e8007ef7a629.yaml0000664000175000017500000000045400000000000031060 0ustar00zuulzuul00000000000000--- prelude: > Projects using any of the optional drivers can use extras to pull in dependencies for that driver. upgrade: - | Projects using the AMQP 1.0 driver may now depend on oslo.messaging[amqp1]. Projects using the Kafka driver may now depend on oslo.messaging[kafka] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.150674 oslo.messaging-14.9.0/releasenotes/source/0000775000175000017500000000000000000000000020551 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000022022 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000022023 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000022023 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724686539.150674 oslo.messaging-14.9.0/releasenotes/source/_static/0000775000175000017500000000000000000000000022177 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000024450 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1546743 oslo.messaging-14.9.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000022706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000025157 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/conf.py0000664000175000017500000000422000000000000022046 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # oslo.log Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Project information -------------------------------------------------- # General information about the project. copyright = '2016, oslo.messaging Developers' # Release notes do not need a version in the title, they span # multiple versions. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.messaging' openstackdocs_bug_project = 'oslo.messaging' openstackdocs_bug_tag = '' # The master toctree document. master_doc = 'index' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/index.rst0000664000175000017500000000043500000000000022414 0ustar00zuulzuul00000000000000============================= oslo.messaging Release Notes ============================= .. toctree:: :maxdepth: 1 unreleased 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1106715 oslo.messaging-14.9.0/releasenotes/source/locale/0000775000175000017500000000000000000000000022010 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1106715 oslo.messaging-14.9.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000022762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1546743 oslo.messaging-14.9.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000024547 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000006676100000000000027620 0ustar00zuulzuul00000000000000# Andi Chandler , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.messaging\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-09-29 10:37+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-09-21 01:02+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "(see `bug 1800957 `_)." msgstr "" "(see `bug 1800957 `_)." msgid "*conn_pool_min_size* (default 2)" msgstr "*conn_pool_min_size* (default 2)" msgid "*conn_pool_ttl* (defaul 1200)" msgstr "*conn_pool_ttl* (defaul 1200)" msgid "*retry* (default=-1)" msgstr "*retry* (default=-1)" msgid "*ssl_client_cert_file* (default='')" msgstr "*ssl_client_cert_file* (default='')" msgid "*ssl_client_key_file* (default='')" msgstr "*ssl_client_key_file* (default='')" msgid "*ssl_client_key_password* (default='')" msgstr "*ssl_client_key_password* (default='')" msgid "10.2.4" msgstr "10.2.4" msgid "11.0.0" msgstr "11.0.0" msgid "12.0.0" msgstr "12.0.0" msgid "12.1.4" msgstr "12.1.4" msgid "12.1.6" msgstr "12.1.6" msgid "12.11.0" msgstr "12.11.0" msgid "12.12.0" msgstr "12.12.0" msgid "12.13.0" msgstr "12.13.0" msgid "12.13.1" msgstr "12.13.1" msgid "12.14.0" msgstr "12.14.0" msgid "12.3.0" msgstr "12.3.0" msgid "12.4.0" msgstr "12.4.0" msgid "12.5.2" msgstr "12.5.2" msgid "12.6.0" msgstr "12.6.0" msgid "12.7.0" msgstr "12.7.0" msgid "12.7.1" msgstr "12.7.1" msgid "12.7.3" msgstr "12.7.3" msgid "12.9.0" msgstr "12.9.0" msgid "12.9.3" msgstr "12.9.3" msgid "12.9.4-2" msgstr "12.9.4-2" msgid "13.0.0" msgstr "13.0.0" msgid "14.0.0" msgstr "14.0.0" msgid "14.0.1" msgstr "14.0.1" msgid "14.1.0" msgstr "14.1.0" msgid "14.2.0" msgstr "14.2.0" msgid "14.2.1" msgstr "14.2.1" msgid "14.3.0" msgstr "14.3.0" msgid "14.3.1" msgstr "14.3.1" msgid "14.4.0" msgstr "14.4.0" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "5.17.3" msgstr "5.17.3" msgid "5.20.0" msgstr "5.20.0" msgid "5.24.0" msgstr "5.24.0" msgid "5.24.2" msgstr "5.24.2" msgid "5.26.0" msgstr "5.26.0" msgid "5.27.0" msgstr "5.27.0" msgid "5.30.2" msgstr "5.30.2" msgid "5.30.8" msgstr "5.30.8" msgid "5.33.0" msgstr "5.33.0" msgid "5.34.1" msgstr "5.34.1" msgid "5.35.5" msgstr "5.35.5" msgid "5.6.0" msgstr "5.6.0" msgid "6.0.0" msgstr "6.0.0" msgid "6.2.0" msgstr "6.2.0" msgid "7.0.0" msgstr "7.0.0" msgid "8.0.0" msgstr "8.0.0" msgid "8.1.3" msgstr "8.1.3" msgid "9.0.0" msgstr "9.0.0" msgid "9.3.0" msgstr "9.3.0" msgid "9.5.0" msgstr "9.5.0" msgid "9.5.2-4" msgstr "9.5.2-4" msgid "" "A bug in the ``amqp`` python library can cause the connection to the " "RabbitMQ broker to hang when using SSL/TLS. This results in frequent errors " "such as this::" msgstr "" "A bug in the ``amqp`` python library can cause the connection to the " "RabbitMQ broker to hang when using SSL/TLS. This results in frequent errors " "such as this::" msgid "" "Add a new option `enable_cancel_on_failover` for rabbitmq driver which when " "enabled, will cancel consumers when queue appears to be down." msgstr "" "Add a new option `enable_cancel_on_failover` for RabbitMQ driver which when " "enabled, will cancel consumers when the queue appears to be down." msgid "" "Add get_rpc_transport call to make the API clear for the separation of RPC " "and Notification messaging backends." msgstr "" "Add get_rpc_transport call to make the API clear for the separation of RPC " "and Notification messaging backends." msgid "" "Add quorum configuration x-max-in-memory-length, x-max-in-memory-bytes, x-" "delivery-limit which control the quorum queue memory usage and handle the " "message poisoning problem" msgstr "" "Add quorum configuration x-max-in-memory-length, x-max-in-memory-bytes, x-" "delivery-limit which control the quorum queue memory usage and handle the " "message poisoning problem" msgid "" "Added new ``get_rpc_client`` function to instantiate the RPCClient class" msgstr "" "Added new ``get_rpc_client`` function to instantiate the RPCClient class" msgid "" "Adding a new option, ``[oslo_messaging_rabbit] ssl_enforce_fips_mode``, to " "the rabbitmq driver to enforce the OpenSSL FIPS mode if supported by the " "version of Python." msgstr "" "Adding a new option, ``[oslo_messaging_rabbit] ssl_enforce_fips_mode``, to " "the rabbitmq driver to enforce the OpenSSL FIPS mode if supported by the " "version of Python." msgid "" "Adding retry strategy based on the mandatory flag. Missing exchanges and " "queues are now identified separately for logging purposes." msgstr "" "Adding retry strategy based on the mandatory flag. Missing exchanges and " "queues are now identified separately for logging purposes." msgid "" "Adding support for quorum queues. Quorum queues are enabled if the " "``rabbit_quorum_queue`` parameter is sets (``x-queue-type: quorum``). " "Setting x-queue-type to quorum means that replicated FIFO queue based on the " "Raft consensus algorithm will be used. It is available as of RabbitMQ 3.8.0. " "The quorum queues are durable by default (``amqp_durable_queues``) will be " "ignored. when enabled the HA queues (``rabbit_ha_queues``) aka mirrored " "queues should be disabled since the queue can't be both types at the same " "time" msgstr "" "Adding support for quorum queues. Quorum queues are enabled if the " "``rabbit_quorum_queue`` parameter is sets (``x-queue-type: quorum``). " "Setting x-queue-type to quorum means that replicated FIFO queue based on the " "Raft consensus algorithm will be used. It is available as of RabbitMQ 3.8.0. " "The quorum queues are durable by default (``amqp_durable_queues``) will be " "ignored. when enabled the HA queues (``rabbit_ha_queues``) aka mirrored " "queues should be disabled since the queue can't be both types at the same " "time" msgid "" "As a fix for `bug 1917645 `_ the rabbit " "backend is changed to use the ``[oslo_messaging_notifications]retry`` " "parameter when driver tries to connect to the message bus during " "notification sending. Before this fix the rabbit backend retried the " "connection forever blocking the caller thread." msgstr "" "As a fix for `bug 1917645 `_ the rabbit " "backend is changed to use the ``[oslo_messaging_notifications]retry`` " "parameter when the driver tries to connect to the message bus during " "notification sending. Before this fix the rabbit backend retried the " "connection forever blocking the caller thread." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Change the default value of RPC dispatcher access_policy to " "DefaultRPCAccessPolicy." msgstr "" "Change the default value of RPC dispatcher access_policy to " "DefaultRPCAccessPolicy." msgid "Configuration param 'retry' is added. Default is -1, indefinite" msgstr "Configuration param 'retry' is added. Default is -1, indefinite" msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "Default ttl is 1200s. Next configuration params was added" msgstr "Default TTL is 1200s. Next configuration params was added" msgid "" "Deprecate get_transport and use get_rpc_transport or " "get_notification_transport to make the API usage clear for the separation of " "RPC and Notification messaging backends." msgstr "" "Deprecate get_transport and use get_rpc_transport or " "get_notification_transport to make the API usage clear for the separation of " "RPC and Notification messaging backends." msgid "" "Deprecating the ``direct_mandatory_flag``. It will not be possible to " "deactivate this functionality anymore." msgstr "" "Deprecating the ``direct_mandatory_flag``. It will not be possible to " "deactivate this functionality anymore." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Fixed typo in variable names ``rabbit_quorum_max_memory_length`` and " "``rabbit_quorum_max_memory_bytes``. Please make changes in your config files " "to correspond correct variables." msgstr "" "Fixed typo in variable names ``rabbit_quorum_max_memory_length`` and " "``rabbit_quorum_max_memory_bytes``. Please make changes in your config files " "to correspond correct variables." msgid "" "Force creating non durable control exchange when a precondition failed " "related to config that differ occuring." msgstr "" "Force creating non-durable control exchange when a precondition failed " "related to config that differs occurring." msgid "Idle connections in the pool will be expired and closed." msgstr "Idle connections in the pool will be expired and closed." msgid "" "If kombu_reconnect_delay is specified in the [oslo_messaging_rabbit] " "section, ensure that it is less than 5.0, the value of " "ACK_REQUEUE_EVERY_SECONDS_MAX" msgstr "" "If kombu_reconnect_delay is specified in the [oslo_messaging_rabbit] " "section, ensure that it is less than 5.0, the value of " "ACK_REQUEUE_EVERY_SECONDS_MAX" msgid "" "In combination with amqp<=2.4.0, ``oslo.messaging`` was unreliable when " "configured with TLS (as is generally recommended). Users would see frequent " "errors such as this::" msgstr "" "In combination with amqp<=2.4.0, ``oslo.messaging`` was unreliable when " "configured with TLS (as is generally recommended). Users would see frequent " "errors such as this::" msgid "" "Increased ACK_REQUEUE_EVERY_SECONDS_MAX to resolve issues with rabbitmq HA " "failover." msgstr "" "Increased ACK_REQUEUE_EVERY_SECONDS_MAX to resolve issues with RabbitMQ HA " "failover." msgid "" "Instantiating the RPCClient class directly is deprecated in favor of using " "the new ``get_rpc_client`` function to expose a more common API similar to " "existing functions such as ``get_rpc_server`` and ``get_rpc_transport``" msgstr "" "Instantiating the RPCClient class directly is deprecated in favour of using " "the new ``get_rpc_client`` function to expose a more common API similar to " "existing functions such as ``get_rpc_server`` and ``get_rpc_transport``" msgid "Introduce support for sending rpc client metrics to oslo.metrics." msgstr "Introduce support for sending RPC client metrics to oslo.metrics." msgid "" "It is recommended that deployments using SSL/TLS upgrade the amqp library to " "v2.4.1 or later." msgstr "" "It is recommended that deployments using SSL/TLS upgrade the amqp library to " "v2.4.1 or later." msgid "Known Issues" msgstr "Known Issues" msgid "New Features" msgstr "New Features" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "Next configuration params was added" msgstr "Next configuration params was added" msgid "" "NoSuchMethod exception will not be logged for special non-existing methods " "which names end with '_ignore_errors'. Such methods might be used as health " "probes for openstack services." msgstr "" "NoSuchMethod exception will not be logged for special non-existing methods " "which names end with '_ignore_errors'. Such methods might be used as health " "probes for OpenStack services." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "On rabbitmq, in the past, acknownlegement of messages was done within the " "application callback thread/greenlet. This thread was blocked until the " "message was ack. In newton, we rewrote the message acknownlegement to ensure " "we haven't two threads writting the the socket at the same times. Now all " "pendings ack are done by the main thread. They are no more reason to block " "the application callback thread until the message is ack. Other driver " "already release the application callback threads before the message is " "acknownleged. This is also the case for rabbitmq, now." msgstr "" "On RabbitMQ, in the past, acknowledgement of messages was done within the " "application callback thread/greenlet. This thread was blocked until the " "message was acknowledged. In Newton, we rewrote the message acknowledgement " "to ensure we haven't two threads writing to the socket at the same time. Now " "all pending acknowledgements are done by the main thread. They are no more " "reason to block the application callback thread until the message is " "acknowledged. Other drivers already release the application callback threads " "before the message is acknowledged. This is also the case for RabbitMQ now." msgid "" "Operators must switch to setting the transport_url directive in the " "[DEFAULT] section." msgstr "" "Operators must switch to setting the transport_url directive in the " "[DEFAULT] section." msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Prelude" msgstr "Prelude" msgid "" "Projects using any of the optional drivers can use extras to pull in " "dependencies for that driver." msgstr "" "Projects using any of the optional drivers can use extras to pull in " "dependencies for that driver." msgid "" "Projects using the AMQP 1.0 driver may now depend on oslo.messaging[amqp1]. " "Projects using the Kafka driver may now depend on oslo.messaging[kafka]" msgstr "" "Projects using the AMQP 1.0 driver may now depend on oslo.messaging[amqp1]. " "Projects using the Kafka driver may now depend on oslo.messaging[kafka]" msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "RPC call monitoring is a new RPCClient feature. Call monitoring causes the " "RPC server to periodically send keepalive messages back to the RPCClient " "while the RPC call is being processed. This can be used for early detection " "of a server failure without having to wait for the full call timeout to " "expire." msgstr "" "RPC call monitoring is a new RPCClient feature. Call monitoring causes the " "RPC server to periodically send keepalive messages back to the RPCClient " "while the RPC call is being processed. This can be used for early detection " "of a server failure without having to wait for the full call timeout to " "expire." msgid "" "RPC dispatcher can have an extra endpoint named ping. This endpoint can be " "enabled thanks to a specific configuration parameter: [DEFAULT] " "rpc_ping_enabled=true # default is false" msgstr "" "RPC dispatcher can have an extra endpoint named ping. This endpoint can be " "enabled thanks to a specific configuration parameter: [DEFAULT] " "rpc_ping_enabled=true # default is false" msgid "" "RPCClient now supports RPC call monitoring for detecting the loss of a " "server during an RPC call." msgstr "" "RPCClient now supports RPC call monitoring for detecting the loss of a " "server during an RPC call." msgid "Remove deprecated configuration options from multiple drivers." msgstr "Remove deprecated configuration options from multiple drivers." msgid "" "RequestContextSerializer was deprecated since 4.6, and it isn't used by any " "other project, so we can remove it safely." msgstr "" "RequestContextSerializer was deprecated since 4.6, and it isn't used by any " "other project, so we can remove it safely." msgid "Retry support for oslo_messaging_notifications driver" msgstr "Retry support for oslo_messaging_notifications driver" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "SSL support for oslo_messaging's kafka driver" msgstr "SSL support for oslo_messaging's Kafka driver" msgid "Security Issues" msgstr "Security Issues" msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Such issues would typically lead to downstream service timeouts, with no " "recourse available other than disabling TLS altogether (see `bug 1800957 " "`_)." msgstr "" "Such issues would typically lead to downstream service timeouts, with no " "recourse available other than disabling TLS altogether (see `bug 1800957 " "`_)." msgid "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgstr "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgid "" "The AMQP driver has removed the configuration options of " "allow_insecure_clients, username and password from the [oslo_messaging_amqp] " "section." msgstr "" "The AMQP driver has removed the configuration options of " "allow_insecure_clients, username and password from the [oslo_messaging_amqp] " "section." msgid "" "The AMQP1 driver is now deprecated. Its related functional tests are also " "disabled. Neither debian nor ubuntu in the latest releases have any binary " "built for qpid server, not even 3rd party. Only qpid proton, the client lib, " "is available." msgstr "" "The AMQP1 driver is now deprecated. Its related functional tests are also " "disabled. Neither Debian nor Ubuntu in the latest releases have any binary " "built for qpid server, not even 3rd party. Only qpid proton, the client lib, " "is available." msgid "" "The Kafa driver has removed the configuration options of kafka_default_host " "and kafka_default_port from the [oslo_messaging_kafka] section." msgstr "" "The Kafka driver has removed the configuration options of kafka_default_host " "and kafka_default_port from the [oslo_messaging_kafka] section." msgid "The Pika-based driver for RabbitMQ has been removed." msgstr "The Pika-based driver for RabbitMQ has been removed." msgid "" "The Rabbit driver has removed the configuration options of rabbit_host, " "rabbit_port, rabbit_hosts, rabbit_userid, rabbit_password, " "rabbit_virtual_host rabbit_max_retries and rabbit_durable_queues from the " "[oslo_messaging_rabbit] section." msgstr "" "The Rabbit driver has removed the configuration options of rabbit_host, " "rabbit_port, rabbit_hosts, rabbit_userid, rabbit_password, " "rabbit_virtual_host rabbit_max_retries and rabbit_durable_queues from the " "[oslo_messaging_rabbit] section." msgid "The ZMQ-based driver for RPC communications has been removed" msgstr "The ZMQ-based driver for RPC communications has been removed" msgid "" "The ``[oslo_messaging_rabbit] heartbeat_in_pthread`` config option defaults " "to ``False`` again. For wsgi applications it is recommended to set this " "value to ``True`` but enabling it for non-wsgi services may break such " "service. Please check https://bugs.launchpad.net/oslo.messaging/+bug/1934937 " "for more details." msgstr "" "The ``[oslo_messaging_rabbit] heartbeat_in_pthread`` config option defaults " "to ``False`` again. For wsgi applications it is recommended to set this " "value to ``True`` but enabling it for non-wsgi services may break such " "service. Please check https://bugs.launchpad.net/oslo.messaging/+bug/1934937 " "for more details." msgid "" "The ``[oslo_messaging_rabbit] heartbeat_in_pthread`` config option now " "defaults to ``True``. Applications will run RabbitMQ heartbeat in a Python " "thread by default." msgstr "" "The ``[oslo_messaging_rabbit] heartbeat_in_pthread`` config option now " "defaults to ``True``. Applications will run RabbitMQ heartbeat in a Python " "thread by default." msgid "" "The ``get_rpc_transport``, ``get_rpc_server`` and ``get_rpc_client`` helper " "functions now have support for overriding the class that is instantiated." msgstr "" "The ``get_rpc_transport``, ``get_rpc_server`` and ``get_rpc_client`` helper " "functions now have support for overriding the class that is instantiated." msgid "" "The blocking executor has been deprecated for removal in Rocky and support " "is now dropped in Ussuri. Its usage was never recommended for applications, " "and it has no test coverage. Applications should choose the appropriate " "threading model that maps to their usage instead." msgstr "" "The blocking executor has been deprecated for removal in Rocky and support " "is now dropped in Ussuri. Its usage was never recommended for applications, " "and it has no test coverage. Applications should choose the appropriate " "threading model that maps to their usage instead." msgid "" "The blocking executor has been deprecated for removal in Rocky. Its usage " "was never recommended for applications, and it has no test coverage. " "Applications should choose the appropriate threading model that maps their " "usage instead." msgstr "" "The blocking executor has been deprecated for removal in Rocky. Its usage " "was never recommended for applications, and it has no test coverage. " "Applications should choose the appropriate threading model that maps their " "usage instead." msgid "" "The driver support for the ZeroMQ messaging library is removed. Users of the " "oslo.messaging RPC services must use the supported rabbit (\"rabbit://...\") " "or amqp1 (\"amqp://...\" )drivers." msgstr "" "The driver support for the ZeroMQ messaging library is removed. Users of the " "oslo.messaging RPC services must use the supported Rabbit (\"rabbit://...\") " "or amqp1 (\"amqp://...\" )drivers." msgid "" "The pika driver has been deprecated for removal in Rocky. This driver was " "developed as a replacement for the default rabbit driver. However testing " "has not shown any appreciable improvement over the default rabbit driver in " "terms of performance and stability." msgstr "" "The Pika driver has been deprecated for removal in Rocky. This driver was " "developed as a replacement for the default rabbit driver. However testing " "has not shown any appreciable improvement over the default rabbit driver in " "terms of performance and stability." msgid "" "The purpose of this new endpoint is to help operators do a RPC call (a ping) " "toward a specific RPC callback (e.g. a nova-compute, or a neutron-agent). " "This is helping a lot for monitoring agents (for example, if agents are " "deployed in a kubernetes pod)." msgstr "" "The purpose of this new endpoint is to help operators do a RPC call (a ping) " "toward a specific RPC callback (e.g. a nova-compute, or a neutron agent). " "This is helping a lot with monitoring agents (for example, if agents are " "deployed in a Kubernetes pod)." msgid "" "The rabbitmq driver option ``DEFAULT/max_retries`` has been deprecated for " "removal (at a later point in the future) as it did not make logical sense " "for notifications and for RPC." msgstr "" "The RabbitMQ driver option ``DEFAULT/max_retries`` has been deprecated for " "removal (at a later point in the future) as it did not make logical sense " "for notifications and for RPC." msgid "The rpc_backend option from the [DEFAULT] section has been removed." msgstr "The rpc_backend option from the [DEFAULT] section has been removed." msgid "" "The underlying issue is fixed in amqp version 2.4.1, which is now the " "minimum version that ``oslo.messaging`` requires." msgstr "" "The underlying issue is fixed in amqp version 2.4.1, which is now the " "minimum version that ``oslo.messaging`` requires." msgid "" "This bug has been fixed in `v2.4.1 of amqp `_." msgstr "" "This bug has been fixed in `v2.4.1 of amqp `_." msgid "This feature can be enabled by setting a configuration parameter:" msgstr "This feature can be enabled by setting a configuration parameter:" msgid "" "Threading issues with the kafka-python consumer client were identified and " "documented. The driver has been updated to integrate the confluent-kafka " "python library. The confluent-kafka client leverages the high performance " "librdkafka C client and is safe for multiple thread use." msgstr "" "Threading issues with the kafka-python consumer client were identified and " "documented. The driver has been updated to integrate the confluent-kafka " "python library. The confluent-kafka client leverages the high performance " "librdkafka C client and is safe for multiple thread use." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Users of the Pika-based driver must change the prefix of all the " "transport_url configuration options from \"pika://...\" to \"rabbit://...\" " "to use the default kombu based RabbitMQ driver." msgstr "" "Users of the Pika-based driver must change the prefix of all the " "transport_url configuration options from \"pika://...\" to \"rabbit://...\" " "to use the default kombu based RabbitMQ driver." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "We are now able to enforce the OpenSSL FIPS mode by using " "``[oslo_messaging_rabbit] ssl_enforce_fips_mode``." msgstr "" "We are now able to enforce the OpenSSL FIPS mode by using " "``[oslo_messaging_rabbit] ssl_enforce_fips_mode``." msgid "" "We undeprecated the ``heartbeat_in_pthread`` option. This option will remain " "available to allow customers to run the rabbitmq heartbeat in python thread " "or not." msgstr "" "We un-deprecated the ``heartbeat_in_pthread`` option. This option will " "remain available to allow customers to run the RabbitMQ heartbeat in Python " "thread or not." msgid "" "With the change in the client library used, projects using the Kafka driver " "should use extras oslo.messaging[kafka] to pull in dependencies for the " "driver." msgstr "" "With the change in the client library used, projects using the Kafka driver " "should use extras oslo.messaging[kafka] to pull in dependencies for the " "driver." msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "ZeroMQ support has been deprecated. The ZeroMQ driver ``zmq://`` has been " "unmaintained for over a year and no longer functions properly. It is " "recommended to use one of the maintained backends instead, such as RabbitMQ " "or AMQP 1.0." msgstr "" "ZeroMQ support has been deprecated. The ZeroMQ driver ``zmq://`` has been " "unmaintained for over a year and no longer functions properly. It is " "recommended to use one of the maintained backends instead, such as RabbitMQ " "or AMQP 1.0." msgid "" "[`bug 1981093 `_] " "Pulls calls to logging functions out of ``impl_kafka._produce_message``. " "Since ``_produce_message`` is called through tpool.execute, calling logging " "functions inside ``_produce_message`` could cause subsequent calls to " "logging functions to deadlock." msgstr "" "[`bug 1981093 `_] " "Pulls calls to logging functions out of ``impl_kafka._produce_message``. " "Since ``_produce_message`` is called through tpool.execute, calling logging " "functions inside ``_produce_message`` could cause subsequent calls to " "logging functions to deadlock." msgid "[oslo_messaging_metrics] metrics_enabled = True # default is false" msgstr "[oslo_messaging_metrics] metrics_enabled = True # default is false" msgid "" "``heartbeat_in_pthread`` has been deprecated and will be removed in a future " "release. If configured, this option should be unset." msgstr "" "``heartbeat_in_pthread`` has been deprecated and will be removed in a future " "release. If configured, this option should be unset." msgid "oslo.messaging Release Notes" msgstr "oslo.messaging Release Notes" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/newton.rst0000664000175000017500000000021400000000000022612 0ustar00zuulzuul00000000000000============================ Newton Series Release Notes ============================ .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/ocata.rst0000664000175000017500000000021000000000000022363 0ustar00zuulzuul00000000000000=========================== Ocata Series Release Notes =========================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000022233 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000022600 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000022425 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000022420 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000022424 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/unreleased.rst0000664000175000017500000000015600000000000023434 0ustar00zuulzuul00000000000000============================= Current Series Release Notes ============================= .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000022627 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000023115 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000022733 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000022226 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000022232 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000022067 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/requirements.txt0000664000175000017500000000121400000000000020042 0ustar00zuulzuul00000000000000pbr>=2.0.0 # Apache-2.0 futurist>=1.2.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=5.3.0 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.utils>=3.37.0 # Apache-2.0 oslo.serialization>=2.18.0 # Apache-2.0 oslo.service>=1.24.0 # Apache-2.0 stevedore>=1.20.0 # Apache-2.0 debtcollector>=1.2.0 # Apache-2.0 # for jsonutils cachetools>=2.0.0 # MIT License WebOb>=1.7.1 # MIT # for the routing notifier PyYAML>=3.13 # MIT # rabbit driver is the default # we set the amqp version to ensure heartbeat works amqp>=2.5.2 # BSD kombu>=4.6.6 # BSD # middleware oslo.middleware>=3.31.0 # Apache-2.0 # metrics oslo.metrics>=0.2.1 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1546743 oslo.messaging-14.9.0/setup.cfg0000664000175000017500000000363600000000000016411 0ustar00zuulzuul00000000000000[metadata] name = oslo.messaging author = OpenStack author_email = openstack-discuss@lists.openstack.org summary = Oslo Messaging API description_file = README.rst home_page = https://docs.openstack.org/oslo.messaging/latest/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Developers Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython [extras] amqp1 = pyngus>=2.2.0 # Apache-2.0 kafka = confluent-kafka>=1.3.0 # Apache-2.0 [files] packages = oslo_messaging [entry_points] console_scripts = oslo-messaging-send-notification = oslo_messaging.notify.notifier:_send_notification oslo.messaging.drivers = rabbit = oslo_messaging._drivers.impl_rabbit:RabbitDriver amqp = oslo_messaging._drivers.impl_amqp1:ProtonDriver kafka = oslo_messaging._drivers.impl_kafka:KafkaDriver kombu = oslo_messaging._drivers.impl_rabbit:RabbitDriver fake = oslo_messaging._drivers.impl_fake:FakeDriver oslo.messaging.executors = eventlet = futurist:GreenThreadPoolExecutor threading = futurist:ThreadPoolExecutor oslo.messaging.notify.drivers = messagingv2 = oslo_messaging.notify.messaging:MessagingV2Driver messaging = oslo_messaging.notify.messaging:MessagingDriver log = oslo_messaging.notify._impl_log:LogDriver test = oslo_messaging.notify._impl_test:TestDriver noop = oslo_messaging.notify._impl_noop:NoOpDriver routing = oslo_messaging.notify._impl_routing:RoutingDriver oslo.config.opts = oslo.messaging = oslo_messaging.opts:list_opts [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/setup.py0000664000175000017500000000127100000000000016273 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/test-requirements.txt0000664000175000017500000000112600000000000021021 0ustar00zuulzuul00000000000000# Hacking already pins down pep8, pyflakes and flake8 hacking>=6.1.0,<=6.2.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD stestr>=2.0.0 # Apache-2.0 pre-commit>=2.6.0 # MIT testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT oslotest>=3.2.0 # Apache-2.0 pifpaf>=2.2.0 # Apache-2.0 # for test_impl_kafka confluent-kafka>=1.3.0 # Apache-2.0 coverage>=4.0 # Apache-2.0 # AMQP 1.0 support depends on the Qpid Proton AMQP 1.0 # development libraries. pyngus>=2.2.0 # Apache-2.0 # Bandit security code scanner bandit>=1.7.0,<1.8.0 # Apache-2.0 eventlet>=0.23.0 # MIT greenlet>=0.4.15 # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724686539.1546743 oslo.messaging-14.9.0/tools/0000775000175000017500000000000000000000000015720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/tools/functions.sh0000664000175000017500000000061100000000000020262 0ustar00zuulzuul00000000000000 wait_for_line () { while read line do echo "$line" | grep -q "$1" && break echo "$line" | grep "$2" && exit 1 done < "$3" # Read the fifo for ever otherwise process would block cat "$3" >/dev/null & } function clean_exit(){ local error_code="$?" for job in `jobs -p` do kill -9 $job done rm -rf "$1" return $error_code } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/tools/messages_length.yaml0000664000175000017500000006103700000000000021763 0ustar00zuulzuul00000000000000# The numbers below present the length of the messages (in string equivalent) # that were sent through the MQ backend (RabbitMQ) during the # boot_and_delete_server Rally scenario run (50 times, concurrency equal to 3). # The information was gathered via adding log to the _send method of # AMQPDriverBase class after all lines related to the msg object modifications. # Message length was gathered to introduce real-like message generator for # simulator.py oslo.messaging tool, that could introduce traffic closer to the # real control plane load and estimate both message length and size (in bytes) # going through the MQ layer. test_data: string_lengths: 806, 992, 992, 1116, 1116, 1191, 1595, 1199, 1043, 1210, 1220, 1191, 1123, 1624, 2583, 1153, 4412, 1642, 1210, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 6386, 6368, 6386, 6368, 6386, 11292, 2136, 5407, 6368, 11292, 2136, 5407, 2116, 2116, 11292, 2136, 5398, 5407, 4357, 5431, 2116, 2116, 5398, 4407, 5431, 2116, 2116, 5398, 4457, 5431, 4387, 2627, 4387, 2094, 2038, 2627, 2094, 2038, 5438, 4387, 5438, 2310, 2310, 2627, 2094, 2496, 2038, 5451, 2310, 5438, 2496, 2496, 2240, 2099, 2240, 1500, 2099, 2626, 5451, 2240, 2626, 1555, 1555, 1702, 1500, 5451, 1702, 2450, 2450, 1570, 1155, 4539, 1570, 4539, 1641, 2099, 1641, 2626, 1555, 1702, 2450, 1570, 3518, 5710, 1641, 2226, 2643, 3382, 6671, 3518, 2531, 2226, 2643, 2124, 3382, 5500, 3518, 2531, 2226, 2643, 965, 2124, 3382, 5500, 6858, 2531, 1177, 965, 2124, 5687, 1177, 965, 1575, 1500, 1500, 2549, 7745, 1575, 5687, 7688, 2183, 1177, 2549, 965, 6574, 7688, 2183, 7270, 2128, 7270, 2128, 1575, 6535, 2549, 6574, 6480, 2643, 2584, 6535, 1220, 2644, 7688, 2183, 1500, 1676, 2611, 1500, 6480, 2611, 2643, 1624, 2241, 1153, 4696, 7270, 2128, 2584, 2644, 1590, 2611, 2611, 1555, 2241, 1555, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4480, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4504, 5431, 4434, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 2549, 6574, 7688, 2183, 1500, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1575, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4532, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4532, 5431, 4434, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 1500, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4532, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 2099, 2626, 5451, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 5687, 1177, 965, 1575, 2549, 6574, 7688, 2183, 7270, 1500, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1500, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 1500, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6516, 2300, 6516, 5839, 6156, 6512, 1597, 1500, 1026, 1676, 1500, 6516, 4505, 1220, 2300, 6516, 1624, 6535, 1153, 4668, 5839, 2228, 6156, 1590, 6480, 2643, 6512, 2228, 2584, 1611, 2644, 1102, 1701, 2611, 4354, 2449, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1575, 2582, 2398, 6386, 2226, 6368, 2093, 3420, 6576, 2142, 4452, 11292, 2136, 6536, 5407, 6386, 6368, 2298, 2116, 2116, 2608, 5398, 1855, 1880, 2175, 4526, 5431, 11292, 2136, 5407, 4456, 2627, 2094, 2038, 2116, 2310, 2496, 5438, 2116, 2240, 5398, 5451, 4604, 5431, 2099, 2626, 1555, 4506, 2627, 1702, 2094, 2038, 5438, 2310, 2450, 2496, 4539, 2240, 1641, 2099, 1500, 1570, 6386, 2626, 5451, 1555, 6368, 1500, 1702, 2450, 11292, 2136, 1570, 5407, 3518, 2116, 2116, 5398, 4539, 2226, 1641, 4604, 2643, 5431, 3382, 3518, 5500, 4506, 2531, 2627, 2094, 2038, 5438, 2226, 2310, 2124, 2643, 3382, 5451, 2496, 5500, 2240, 2531, 2099, 2626, 1555, 5687, 2124, 1177, 1702, 965, 2450, 1570, 4539, 1641, 1575, 3518, 2226, 2643, 3382, 5500, 1575, 5687, 2531, 1177, 965, 6574, 2549, 2124, 1500, 1500, 7688, 2183, 7270, 2128, 1575, 5687, 1177, 2549, 6574, 965, 6535, 7688, 2183, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1500, 1702, 1500, 2450, 1570, 3308, 2043, 3518, 7270, 2128, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 1575, 2549, 6574, 4604, 6535, 6536, 7688, 2183, 2298, 6480, 2643, 2608, 1855, 1880, 2175, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 7270, 2128, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4604, 5431, 2142, 4604, 6535, 6536, 4506, 2627, 2094, 2038, 2298, 6480, 2643, 2310, 5438, 2608, 2496, 1855, 1880, 2175, 2584, 2240, 2644, 2099, 2626, 5451, 2611, 1555, 2611, 2241, 1702, 2450, 1555, 1570, 1702, 2450, 1570, 3308, 2043, 3518, 4539, 1641, 3518, 2582, 2398, 6386, 2226, 6368, 2093, 3420, 6576, 2226, 2643, 3382, 5500, 2142, 4604, 11292, 2136, 6536, 5407, 2531, 2116, 2116, 2124, 5398, 2298, 2608, 1855, 1880, 2175, 4604, 5431, 5687, 1177, 4506, 965, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 1500, 4539, 1641, 1500, 1575, 2549, 6574, 3518, 7688, 2183, 2226, 2643, 3382, 5500, 2531, 2124, 7270, 2128, 6386, 6368, 11292, 2136, 5407, 5687, 1177, 2116, 2116, 5398, 965, 4604, 6535, 5431, 6480, 2643, 4506, 2584, 2627, 2094, 2644, 2038, 5438, 2611, 2310, 2611, 5451, 2496, 2241, 2240, 1575, 1555, 1702, 2450, 2099, 1570, 2626, 3308, 1555, 2043, 3518, 1702, 4539, 1575, 2450, 1641, 1570, 2549, 1500, 6574, 1500, 1220, 2582, 2398, 2226, 2093, 7688, 2183, 3420, 1624, 6576, 1676, 3518, 1153, 4717, 2142, 1590, 4501, 2226, 6536, 1611, 2643, 7270, 2128, 1102, 1701, 3382, 5500, 2449, 2298, 2608, 1855, 2531, 1880, 2175, 2124, 6535, 6480, 2643, 2584, 5687, 2644, 1177, 2611, 965, 2611, 2241, 1555, 1702, 2450, 6386, 6368, 1570, 3308, 2043, 3518, 11292, 2136, 5407, 2116, 2582, 2116, 2398, 5398, 2226, 2093, 4551, 3420, 6576, 5431, 1575, 1500, 6574, 1500, 4481, 2549, 1575, 2627, 2142, 2094, 2038, 5438, 2310, 2496, 4579, 6536, 2240, 2099, 7688, 2183, 2626, 5451, 1555, 2298, 1702, 2450, 1570, 2608, 1855, 1880, 2175, 7270, 2128, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 5687, 2241, 1177, 965, 1555, 6386, 6368, 1702, 2450, 1570, 11292, 2136, 3308, 5407, 2043, 3518, 2116, 2116, 5398, 2582, 4579, 2398, 5431, 2226, 2093, 3420, 4481, 1500, 6576, 2627, 2094, 2038, 5438, 1500, 2142, 2310, 1575, 1575, 2496, 2240, 6574, 2099, 4579, 2626, 1555, 2549, 5451, 1702, 6536, 2450, 1570, 7688, 2183, 2298, 2608, 1855, 1880, 2175, 3518, 5710, 2226, 1641, 2643, 3382, 6671, 7270, 2128, 2531, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 2124, 4629, 5431, 6535, 4531, 2627, 2094, 2038, 2310, 6480, 2643, 2496, 5438, 6858, 2584, 1177, 2240, 965, 2644, 1500, 2611, 5451, 2611, 2241, 2099, 1500, 2626, 1555, 1555, 1702, 2450, 1702, 1575, 1570, 2450, 4539, 1570, 1641, 3308, 2043, 3518, 1575, 3518, 2549, 7745, 2582, 2398, 2226, 2643, 2226, 7688, 2093, 2183, 3382, 3420, 5500, 6576, 2531, 2124, 2142, 4629, 6536, 2298, 2608, 7270, 2128, 1855, 1880, 2175, 5687, 1177, 965, 6535, 6480, 2643, 2584, 2644, 6386, 6368, 2611, 2611, 2241, 11292, 2136, 5407, 1555, 1500, 1702, 2116, 2116, 1500, 5398, 2450, 1570, 3308, 4629, 2043, 5431, 3518, 1575, 4531, 2549, 2627, 2094, 2038, 5438, 6574, 2582, 2310, 2496, 2398, 5451, 2240, 7688, 2183, 2226, 1575, 2093, 3420, 2099, 2626, 1555, 6576, 1702, 2450, 2142, 1570, 4629, 6536, 4539, 1641, 2298, 2608, 1855, 1880, 2175, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 1500, 2531, 1500, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 5687, 6386, 1177, 1555, 6368, 965, 1702, 2450, 11292, 1570, 2136, 3308, 5407, 2043, 3518, 2116, 2116, 5398, 1575, 2582, 4679, 2398, 2226, 5431, 2093, 3420, 6576, 4581, 2627, 2094, 2038, 2310, 1575, 2496, 2549, 2142, 5438, 6574, 2240, 4679, 6536, 7688, 2183, 5451, 2099, 2626, 2298, 1555, 2608, 1855, 1880, 2175, 1702, 2450, 1570, 7270, 4539, 1500, 2128, 1641, 1500, 1597, 1066, 3518, 2226, 2643, 3382, 5500, 1220, 2531, 1624, 2124, 1153, 1676, 4818, 6386, 6535, 6368, 1624, 6480, 2643, 2584, 1611, 2644, 5687, 2611, 11292, 2136, 2611, 2241, 1177, 965, 1102, 1701, 5407, 2449, 1555, 1575, 1702, 2116, 2450, 2116, 1570, 5398, 3308, 2043, 3518, 4602, 5431, 2582, 2398, 4532, 2226, 2627, 2094, 2038, 2093, 5438, 2310, 3420, 2496, 6576, 1575, 2240, 5451, 2549, 2142, 6574, 4630, 6536, 2099, 2626, 1500, 7688, 2183, 1500, 4539, 1555, 2298, 1641, 2608, 1702, 1855, 1880, 2175, 2450, 1570, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 2531, 6386, 6368, 6535, 2124, 6480, 2643, 11292, 2136, 2584, 5407, 2644, 2611, 2611, 2241, 2116, 2116, 5687, 5398, 1177, 1555, 965, 1575, 1702, 2450, 4630, 1570, 3308, 5431, 2043, 3518, 4532, 2627, 2094, 2038, 5438, 2310, 2496, 2582, 2398, 2240, 5451, 2226, 2093, 1500, 2099, 3420, 6576, 2626, 1500, 1555, 1575, 6574, 2549, 2142, 1702, 4630, 4539, 2450, 1641, 6536, 1570, 7688, 2183, 2298, 2608, 1855, 1880, 2175, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 5687, 2241, 1177, 965, 1555, 1702, 6386, 2450, 6368, 1570, 3308, 2043, 1575, 1500, 3518, 11292, 2136, 5407, 1500, 2582, 2116, 2398, 2116, 2226, 5398, 2093, 3420, 6576, 4680, 5431, 2142, 4680, 6536, 4582, 1575, 2627, 2094, 2038, 5438, 6574, 2549, 2310, 5451, 2496, 2298, 2240, 2608, 1855, 1880, 2175, 7688, 2183, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 7270, 2128, 2531, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4680, 5431, 4582, 1500, 2627, 2094, 2038, 2310, 2124, 2496, 5438, 1500, 2240, 5451, 6535, 2099, 2626, 1555, 5687, 1177, 1702, 965, 6480, 2643, 2450, 2584, 1570, 2644, 2611, 1575, 4539, 2611, 1641, 2241, 1555, 1702, 3518, 2450, 1570, 3308, 1575, 2043, 3518, 2226, 2549, 2643, 6574, 3382, 5500, 2531, 7688, 2183, 2582, 2398, 2124, 2226, 2093, 3420, 6576, 2142, 4680, 6536, 5687, 1177, 2298, 965, 2608, 1855, 1880, 2175, 7270, 2128, 1500, 1500, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4680, 5431, 4582, 1575, 2627, 2094, 2038, 5438, 2549, 6574, 2310, 2496, 5451, 6535, 1575, 2240, 6480, 2643, 2099, 2626, 7688, 2183, 2584, 1555, 2644, 1702, 2611, 2611, 2450, 1570, 2241, 4539, 1641, 1555, 7270, 2128, 1712, 1702, 1154, 2450, 1570, 3308, 2043, 1500, 3518, 3518, 1500, 2582, 2398, 1220, 2226, 2226, 2643, 2093, 1624, 3420, 6576, 3382, 1153, 5500, 6535, 2531, 2124, 4768, 1624, 2142, 1676, 4552, 6480, 6536, 2643, 2584, 2644, 2611, 2298, 2611, 2608, 1855, 1880, 2241, 2175, 5687, 1177, 965, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4552, 1575, 1575, 6536, 6386, 2549, 6368, 6574, 1500, 2298, 1500, 7688, 2183, 2608, 11292, 1855, 1880, 2175, 2136, 5407, 2116, 2116, 5398, 4552, 5431, 7270, 4482, 2128, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 6386, 6368, 6535, 4539, 1641, 11292, 2136, 5407, 6480, 2643, 1575, 2584, 3518, 2644, 2611, 2611, 2116, 2116, 2241, 5398, 2226, 2643, 1555, 1702, 3382, 5500, 4580, 2450, 1570, 5431, 3308, 2043, 2531, 3518, 4482, 2124, 2627, 2094, 2038, 2310, 2496, 5438, 2582, 5451, 2240, 2398, 2226, 5687, 2093, 2099, 3420, 2626, 1177, 1555, 6576, 965, 1702, 2450, 1570, 2142, 4580, 4539, 6536, 1641, 1500, 2298, 1500, 2608, 1855, 1880, 2175, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 2549, 6574, 5687, 7688, 2183, 1177, 965, 7270, 2128, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4630, 1575, 5431, 1500, 1575, 4532, 1500, 2627, 2094, 2038, 5438, 2310, 2496, 2549, 6574, 6535, 2240, 7688, 2183, 2099, 2626, 5451, 6480, 2643, 1555, 2584, 2644, 1702, 2611, 2450, 1570, 2611, 7270, 2241, 2128, 1555, 1702, 4539, 1641, 2450, 1570, 3308, 2043, 3518, 3518, 6535, 6480, 2643, 2582, 2226, 2398, 2226, 2584, 2644, 2643, 2611, 2093, 2611, 3382, 3420, 2241, 5500, 6576, 1500, 1500, 2531, 1555, 2142, 4630, 6536, 2124, 1702, 2450, 1570, 2298, 5687, 2608, 1855, 1880, 2175, 3308, 2043, 1177, 965, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4630, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1575, 6386, 6368, 2549, 6574, 11292, 2136, 7688, 2183, 5407, 2116, 2116, 5398, 4630, 5431, 4532, 2627, 2094, 2038, 2310, 5438, 7270, 2496, 2128, 1500, 1500, 2240, 2099, 5451, 2626, 1555, 6386, 6368, 1702, 2450, 1570, 11292, 1575, 2136, 5407, 4539, 2116, 1641, 2116, 5398, 6535, 3518, 6480, 2643, 4630, 5431, 2226, 2643, 2584, 2644, 2611, 3382, 2611, 2241, 5500, 1555, 4532, 2627, 2094, 2038, 2531, 1702, 2310, 2450, 1570, 2496, 2124, 3308, 5438, 2240, 2043, 3518, 2099, 5451, 2626, 1555, 1702, 2582, 2398, 5687, 2450, 2226, 1570, 1177, 965, 2093, 3420, 6576, 2142, 4630, 4539, 6536, 1641, 1500, 3518, 1500, 2298, 2608, 1855, 1880, 2175, 2226, 2643, 1220, 3382, 5500, 1575, 1676, 2531, 2549, 6574, 1624, 2124, 7688, 2183, 1153, 4741, 1590, 1611, 5687, 1102, 1701, 1177, 965, 2449, 1597, 1066, 7270, 2128, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4525, 5431, 4455, 2627, 2094, 2038, 5438, 2310, 2496, 1500, 2240, 5451, 1500, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 4539, 1641, 2549, 6574, 6535, 3518, 7688, 2183, 6480, 2643, 2584, 2644, 2226, 2611, 2643, 2611, 3382, 2241, 5500, 1555, 2531, 7270, 2124, 2128, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4553, 6536, 1500, 1500, 2298, 2608, 1855, 1880, 2175, 6535, 5687, 1177, 965, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 1575, 6574, 6386, 2549, 2142, 6368, 4553, 11292, 2136, 6536, 5407, 7688, 2183, 2116, 2298, 2116, 5398, 2608, 1855, 1880, 2175, 1500, 1500, 7270, 2128, 4553, 5431, 4455, 6386, 6368, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 6535, 5451, 11292, 2136, 6480, 2643, 5407, 2584, 2099, 2116, 2626, 2644, 1555, 2116, 2611, 5398, 1702, 2611, 1575, 2450, 2241, 4539, 4553, 1570, 1555, 1641, 5431, 1702, 2450, 4455, 1570, 2627, 2094, 2038, 3308, 5438, 2310, 2043, 2496, 3518, 2240, 3518, 5451, 2099, 2626, 2226, 2643, 2582, 2398, 3382, 1555, 5500, 2226, 1702, 2093, 2531, 2450, 3420, 1570, 6576, 2124, 4539, 1641, 2142, 4553, 6536, 2298, 3518, 1500, 2608, 1855, 1880, 2175, 1500, 2226, 2643, 3382, 5500, 5687, 2531, 1177, 965, 2124, 6386, 6368, 11292, 2136, 5407, 1575, 5687, 2549, 6574, 1177, 2116, 965, 2116, 7688, 2183, 5398, 4553, 5431, 1575, 4455, 2627, 2094, 2038, 5438, 2310, 2496, 1500, 7270, 1500, 2128, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 2549, 4539, 6574, 1641, 6535, 3518, 7688, 2183, 6480, 2643, 2584, 2226, 2644, 2643, 2611, 3382, 2611, 5500, 2241, 1555, 2531, 1702, 2450, 2124, 1570, 7270, 2128, 3308, 2043, 3518, 2582, 1500, 2398, 2226, 1500, 2093, 5687, 3420, 1177, 6576, 2142, 4553, 965, 6536, 6535, 2298, 2608, 6480, 1855, 2643, 1880, 2175, 2584, 2644, 2611, 1220, 2611, 2241, 1555, 1702, 2450, 1570, 1676, 3308, 2043, 3518, 1575, 2582, 2398, 1624, 2226, 2549, 6574, 2093, 3420, 1153, 6386, 6576, 7688, 6368, 2183, 1575, 4767, 1624, 11292, 2136, 5407, 2142, 4551, 1611, 7270, 2128, 1102, 1701, 1500, 2449, 1500, 6536, 2116, 2116, 5398, 2298, 2608, 1855, 1880, 2175, 4551, 5431, 4481, 2627, 2094, 2038, 5438, 2310, 2496, 5451, 6535, 2240, 2099, 6480, 2643, 2626, 1555, 2584, 2644, 1702, 4539, 2611, 6386, 1641, 2450, 2611, 6368, 1570, 2241, 1555, 1575, 1702, 11292, 2450, 1570, 2136, 5407, 3308, 2043, 3518, 2116, 3518, 2116, 5398, 4579, 2582, 2226, 5431, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 4481, 6576, 2627, 2094, 2038, 5438, 2531, 2310, 2496, 5451, 2142, 2124, 4579, 2240, 6536, 2099, 2626, 1555, 2298, 2608, 1702, 1855, 1880, 2175, 2450, 1570, 4539, 1641, 5687, 1500, 1177, 965, 1500, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 2549, 6574, 7688, 2183, 5687, 1177, 965, 6386, 6368, 11292, 2136, 1575, 5407, 2116, 2116, 5398, 1500, 1500, 4579, 7270, 2128, 5431, 4481, 1575, 2627, 2094, 2038, 5438, 2549, 2310, 6574, 2496, 6535, 5451, 2240, 7688, 2183, 2099, 6457, 2643, 2626, 1555, 2584, 4539, 2644, 2611, 1641, 1702, 7270, 2128, 2611, 2450, 2241, 1570, 1555, 1500, 1500, 1702, 2450, 1570, 3308, 2043, 3518, 3518, 6535, 2582, 2398, 2226, 2643, 6480, 2643, 3382, 2226, 5500, 2584, 2644, 2093, 3420, 2611, 6553, 2531, 2611, 2124, 2241, 2142, 4579, 1555, 6513, 1702, 2298, 2450, 1570, 2608, 1855, 1880, 2175, 3308, 2043, 3518, 5687, 1177, 965, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4579, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1575, 2549, 6574, 6386, 6368, 7688, 2183, 11292, 2136, 5407, 1500, 2116, 1500, 2116, 5398, 4579, 5431, 4481, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 7270, 2128, 1555, 1575, 1702, 2450, 4539, 1570, 6386, 1641, 6368, 11292, 2136, 6535, 5407, 6480, 2643, 2116, 2116, 3518, 2584, 5398, 2644, 2611, 2226, 2643, 4629, 2611, 5431, 3382, 2241, 5500, 4531, 1555, 2531, 2627, 2094, 2038, 1702, 2310, 5438, 2450, 2496, 2124, 1570, 3308, 2240, 2043, 3518, 5451, 2099, 1500, 2626, 1500, 1555, 5687, 1702, 1177, 2450, 2582, 965, 1570, 2398, 2226, 2093, 3420, 6576, 4539, 1641, 2142, 4629, 6536, 3518, 2298, 2608, 1855, 1880, 2175, 2226, 2643, 3382, 5500, 1575, 1220, 2531, 1676, 2549, 6574, 2124, 1624, 7688, 2183, 1153, 4769, 1624, 1611, 1102, 1701, 5687, 2449, 1177, 1597, 965, 1066, 7270, 2128, 1500, 6386, 1500, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 1575, 4553, 5431, 4483, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 5451, 6535, 6574, 2549, 6480, 2643, 3518, 2584, 2644, 7688, 2183, 2226, 2611, 2643, 2611, 5710, 2241, 3382, 1641, 1555, 6671, 1702, 2450, 1570, 3308, 2531, 2043, 3518, 2124, 1500, 2582, 1500, 2398, 2226, 2093, 3420, 7270, 2128, 6576, 2142, 6858, 4581, 1177, 6536, 2298, 965, 2608, 6535, 1855, 1880, 2175, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 2142, 4581, 2549, 7745, 6536, 1575, 2298, 2608, 7688, 2183, 1855, 1880, 2175, 6386, 6368, 1500, 1500, 11292, 2136, 5407, 7270, 2128, 2116, 2116, 5398, 4631, 6386, 6368, 5431, 11292, 2136, 5407, 4533, 2627, 2094, 2038, 2310, 2496, 2116, 5438, 2116, 5398, 2240, 2099, 6535, 2626, 6480, 2643, 5451, 2584, 2644, 4631, 1555, 5431, 2611, 4533, 2627, 2094, 2038, 1702, 2310, 2496, 2611, 2241, 2450, 1570, 2240, 5438, 2099, 2626, 1555, 5451, 1555, 1702, 4539, 1641, 1702, 2450, 2450, 1570, 1570, 3518, 3308, 2043, 3518, 2226, 1575, 2643, 4539, 3382, 5500, 2582, 2398, 3518, 2226, 1641, 2226, 2093, 3420, 2643, 6576, 2531, 3382, 2124, 5500, 2142, 4631, 6536, 2531, 2298, 2608, 1855, 1880, 2175, 2124, 5687, 1177, 965, 1500, 1500, 1575, 5687, 1177, 2549, 965, 6574, 7688, 2183, 7270, 2128, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 1575, 4631, 1500, 2549, 1500, 5431, 6574, 6535, 4533, 2627, 2094, 2038, 7688, 2183, 2310, 6480, 2643, 2496, 5438, 2240, 2584, 2099, 2626, 2644, 2611, 5451, 1555, 2611, 1702, 2241, 2450, 1570, 1555, 1702, 2450, 1570, 7270, 3308, 2128, 4539, 2043, 3518, 1641, 3518, 2582, 2226, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 6576, 2531, 2142, 4631, 2124, 6536, 6535, 2298, 2608, 6480, 1855, 2643, 1880, 2175, 2584, 2644, 2611, 2611, 2241, 5687, 1177, 1555, 965, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4631, 6536, 2298, 1575, 2608, 1855, 1880, 2175, 6574, 1575, 1676, 7688, 2183, 1220, 2549, 1624, 1153, 4691, 6386, 6368, 1590, 1611, 7270, 2128, 1102, 1701, 11292, 2136, 2449, 5407, 1500, 1500, 2116, 2116, 5398, 4549, 5431, 6535, 6386, 6480, 6368, 2643, 4479, 2627, 2094, 2038, 2584, 2644, 5438, 1575, 2310, 5451, 2496, 2611, 2240, 2099, 2611, 2241, 2626, 11292, 2136, 1555, 5407, 1702, 2450, 1555, 1702, 2116, 1570, 2116, 2450, 5398, 4539, 1570, 1641, 4577, 3308, 5431, 2043, 3518, 3518, 4479, 2226, 2627, 2094, 2038, 5438, 2643, 2310, 3382, 5500, 2496, 2582, 5451, 2240, 2398, 2099, 2531, 2626, 1555, 2226, 2093, 1702, 2124, 3420, 2450, 1570, 6576, 2142, 4577, 6536, 4539, 1641, 2298, 5687, 2608, 1855, 1880, 2175, 1177, 965, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1500, 1500, 1575, 5687, 2549, 1177, 6574, 965, 7688, 2183, 6386, 6368, 1575, 7270, 2128, 11292, 2136, 5407, 2116, 2116, 5398, 4577, 5431, 4479, 1575, 2627, 2094, 2038, 5438, 2549, 2310, 6574, 2496, 6535, 1500, 5451, 1500, 2240, 6480, 2643, 7688, 2183, 2584, 2099, 2644, 2626, 1555, 2611, 2611, 1702, 4539, 2450, 2241, 1570, 1641, 1555, 1702, 2450, 1570, 3308, 7270, 2043, 2128, 3518, 3518, 2582, 2398, 2226, 2226, 2643, 2093, 3382, 3420, 5500, 6576, 2142, 2531, 4577, 6536, 6535, 6480, 2643, 2124, 2584, 2644, 2298, 2608, 2611, 1855, 1880, 2175, 2611, 2241, 1555, 1702, 2450, 1570, 5687, 3308, 1177, 2043, 965, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1500, 2142, 1500, 4577, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 2549, 6574, 7688, 2183, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 7270, 2128, 4627, 5431, 4529, 2627, 2094, 2038, 5438, 2310, 2496, 6386, 6368, 6535, 11292, 2136, 5407, 2240, 2099, 5451, 2626, 6480, 2643, 1555, 2584, 2116, 2644, 1702, 2611, 2116, 2450, 5398, 2611, 1570, 2241, 4539, 4627, 1641, 1555, 1500, 5431, 1500, 1702, 2450, 4529, 1570, 2627, 2094, 3518, 2038, 5438, 3308, 2310, 2043, 3518, 2226, 2496, 2643, 3382, 5451, 1575, 2240, 5500, 2582, 2398, 2226, 2099, 2626, 2093, 3420, 1555, 2531, 6576, 2124, 1702, 4539, 2450, 2142, 1570, 1641, 4627, 6536, 2298, 2608, 1855, 1880, 2175, 5687, 1177, 965, 3518, 2226, 2643, 3382, 5500, 2531, 1575, 2124, 2549, 6574, 6386, 7688, 2183, 6368, 1568, 5687, 1177, 11292, 965, 2136, 5407, 1500, 1500, 2116, 2116, 5398, 7270, 2128, 1712, 1575, 4627, 1154, 5431, 4529, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 5451, 1676, 2099, 2626, 1555, 1220, 1702, 2450, 1575, 1570, 2549, 6574, 6535, 1624, 4539, 7688, 2183, 1641, 1500, 1500, 6480, 2643, 3518, 1153, 2584, 2644, 2226, 4817, 2611, 2643, 2611, 1590, 3382, 2241, 5500, 1624, 1555, 2559, 2561, 2559, 2531, 1702, 2124, 7270, 2579, 2579, 2450, 1611, 1570, 2128, 3308, 1102, 1701, 2449, 2043, 3518, 1597, 1106, 2582, 5687, 2398, 2226, 1177, 2093, 3420, 6576, 965, 6535, 2142, 4601, 6536, 6480, 2643, 2584, 2644, 2298, 1500, 2608, 1500, 2611, 1855, 1880, 2175, 2611, 2241, 1555, 1702, 2450, 1570, 1575, 3308, 2043, 3518, 1575, 2549, 6574, 2582, 2398, 2226, 7688, 2093, 2183, 3420, 6576, 2142, 4601, 6536, 2298, 6386, 2608, 6368, 1855, 1880, 2175, 7270, 2128, 11292, 2136, 5407, 2116, 2116, 5398, 4601, 5431, 4531, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 1500, 5451, 1500, 6535, 2099, 2626, 1555, 6480, 2643, 2584, 1702, 2644, 2450, 2611, 1570, 2611, 2241, 1555, 4539, 1641, 1702, 2450, 1570, 3308, 2043, 3518, 3518, 2582, 2226, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 6576, 2531, 2142, 4629, 2124, 6536, 2298, 2608, 1855, 1880, 2175, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4629, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 7291, 2128, 6534, 6479, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4629, 7291, 2128, 6536, 2298, 6534, 2608, 1855, 1880, 2175, 6479, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 1500, 2093, 3420, 1500, 6576, 2142, 4629, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1500, 1500, 1220, 1624, 1153, 4412, 1676, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1676, 1220, 1624, 1153, 4412, 1597, 908, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1676, 1220, 1624, 1153, 1500, 4412, 1500, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1597, 908, 1500, 1500, 1676, 1220, 1624, 1153, 4412, 1590, 1500, 1500, 1500, 1500, 1500, 1500././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/tools/setup-scenario-env.sh0000775000175000017500000000230200000000000022003 0ustar00zuulzuul00000000000000#!/bin/bash set -e . tools/functions.sh SCENARIO=${SCENARIO:-"scenario01"} function _setup_kafka { SCALA_VERSION=${SCALA_VERSION:-"2.12"} KAFKA_VERSION=${KAFKA_VERSION:-"2.0.0"} if [[ -z "$(which kafka-server-start)" ]] && [[ -z $(which kafka-server-start.sh) ]]; then DATADIR=$(mktemp -d /tmp/OSLOMSG-KAFKA.XXXXX) trap "clean_exit $DATADIR" EXIT tarball=kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz wget http://archive.apache.org/dist/kafka/${KAFKA_VERSION}/$tarball -O $DATADIR/$tarball tar -xzf $DATADIR/$tarball -C $DATADIR export PATH=$DATADIR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}/bin:$PATH fi } case $SCENARIO in scenario01) export RPC_TRANSPORT_URL=rabbit://pifpaf:secret@127.0.0.1:5682/ export NOTIFY_TRANSPORT_URL=rabbit://pifpaf:secret@127.0.0.1:5682/ RUN="--env-prefix RABBITMQ run rabbitmq" ;; scenario02) _setup_kafka export RPC_TRANSPORT_URL=rabbit://pifpaf:secret@127.0.0.1:5682/ export NOTIFY_TRANSPORT_URL=kafka://127.0.0.1:9092/ RUN="--env-prefix RABBITMQ run rabbitmq -- pifpaf --env-prefix KAFKA run kafka" ;; *) ;; esac pifpaf $RUN -- $* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/tools/simulator.py0000775000175000017500000007172200000000000020325 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. try: # Avoid https://github.com/PyCQA/pycodestyle/issues/472 import eventlet eventlet.monkey_patch() except ImportError: raise import argparse import bisect import collections import functools import itertools import json import logging import os import random import signal import socket import string import sys import threading import time import yaml from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging import notify # noqa from oslo_messaging import rpc # noqa from oslo_utils import timeutils LOG = logging.getLogger() CURRENT_PID = None CURRENT_HOST = None CLIENTS = [] MESSAGES = [] IS_RUNNING = True SERVERS = [] TRANSPORT = None USAGE = """ Usage: ./simulator.py [-h] [--url URL] [-d DEBUG]\ {notify-server,notify-client,rpc-server,rpc-client} ... Usage example: python tools/simulator.py\ --url rabbit://stackrabbit:secretrabbit@localhost/ rpc-server python tools/simulator.py\ --url rabbit://stackrabbit:secretrabbit@localhost/ rpc-client\ --exit-wait 15000 -p 64 -m 64""" MESSAGES_LIMIT = 1000 DISTRIBUTION_BUCKET_SIZE = 500 def init_random_generator(): data = [] file_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(file_dir, 'messages_length.yaml')) as m_file: content = yaml.safe_load(m_file) data += [int(n) for n in content[ 'test_data']['string_lengths'].split(', ')] ranges = collections.defaultdict(int) for msg_length in data: range_start = ((msg_length // DISTRIBUTION_BUCKET_SIZE) * DISTRIBUTION_BUCKET_SIZE + 1) ranges[range_start] += 1 ranges_start = sorted(ranges.keys()) total_count = len(data) accumulated_distribution = [] running_total = 0 for range_start in ranges_start: norm = float(ranges[range_start]) / total_count running_total += norm accumulated_distribution.append(running_total) def weighted_random_choice(): r = random.random() * running_total start = ranges_start[bisect.bisect_right(accumulated_distribution, r)] return random.randrange(start, start + DISTRIBUTION_BUCKET_SIZE) return weighted_random_choice class LoggingNoParsingFilter(logging.Filter): def filter(self, record): msg = record.getMessage() for i in ['received {', 'MSG_ID is ']: if i in msg: return False return True Message = collections.namedtuple( 'Message', ['seq', 'cargo', 'client_ts', 'server_ts', 'return_ts']) def make_message(seq, cargo, client_ts=0, server_ts=0, return_ts=0): return Message(seq, cargo, client_ts, server_ts, return_ts) def update_message(message, **kwargs): return Message(*message)._replace(**kwargs) class MessageStatsCollector(object): def __init__(self, label): self.label = label self.buffer = [] # buffer to store messages during report interval self.series = [] # stats for every report interval now = time.time() diff = int(now) - now + 1 # align start to whole seconds threading.Timer(diff, self.monitor).start() # schedule in a second def monitor(self): global IS_RUNNING if IS_RUNNING: # NOTE(kbespalov): this way not properly works # because the monitor starting with range 1sec +-150 ms # due to high threading contention between rpc clients threading.Timer(1.0, self.monitor).start() now = time.time() count = len(self.buffer) size = 0 min_latency = sys.maxsize max_latency = 0 sum_latencies = 0 for i in range(count): p = self.buffer[i] size += len(p.cargo) latency = None if p.return_ts: latency = p.return_ts - p.client_ts # round-trip elif p.server_ts: latency = p.server_ts - p.client_ts # client -> server if latency: sum_latencies += latency min_latency = min(min_latency, latency) max_latency = max(max_latency, latency) del self.buffer[:count] # trim processed items seq = len(self.series) stats = dict(seq=seq, timestamp=now, count=count, size=size) msg = ('%-14s: seq: %-4d count: %-6d bytes: %-10d' % (self.label, seq, count, size)) if sum_latencies: latency = sum_latencies / count stats.update(dict(latency=latency, min_latency=min_latency, max_latency=max_latency)) msg += (' latency: %-9.3f min: %-9.3f max: %-9.3f' % (latency, min_latency, max_latency)) self.series.append(stats) LOG.info(msg) def push(self, parsed_message): self.buffer.append(parsed_message) def get_series(self): return self.series @staticmethod def calc_stats(label, *collectors): count = 0 size = 0 min_latency = sys.maxsize max_latency = 0 sum_latencies = 0 start = sys.maxsize end = 0 for point in itertools.chain(*(c.get_series() for c in collectors)): count += point['count'] size += point['size'] if point['count']: # NOTE(kbespalov): # we except the start and end time as time of # first and last processed message, no reason # to set boundaries if server was idle before # running of clients and after. start = min(start, point['timestamp']) end = max(end, point['timestamp']) if 'latency' in point: sum_latencies += point['latency'] * point['count'] min_latency = min(min_latency, point['min_latency']) max_latency = max(max_latency, point['max_latency']) # start is the timestamp of the earliest block, which inclides samples # for the prior second start -= 1 duration = end - start if count else 0 stats = dict(count=count, size=size, duration=duration, count_p_s=0, size_p_s=0) if duration: stats.update(dict(start=start, end=end, count_p_s=count / duration, size_p_s=size / duration)) msg = ('%s: duration: %.2f count: %d (%.1f msg/sec) ' 'bytes: %d (%.0f bps)' % (label, duration, count, stats['count_p_s'], size, stats['size_p_s'])) if sum_latencies: latency = sum_latencies / count stats.update(dict(latency=latency, min_latency=min_latency, max_latency=max_latency)) msg += (' latency: %.3f min: %.3f max: %.3f' % (latency, min_latency, max_latency)) LOG.info(msg) return stats class NotifyEndpoint(object): def __init__(self, wait_before_answer, requeue): self.wait_before_answer = wait_before_answer self.requeue = requeue self.received_messages = MessageStatsCollector('server') self.cache = set() def info(self, ctxt, publisher_id, event_type, payload, metadata): LOG.debug("%s %s %s %s", ctxt, publisher_id, event_type, payload) server_ts = time.time() message = update_message(payload, server_ts=server_ts) self.received_messages.push(message) if self.requeue and message.seq not in self.cache: self.cache.add(message.seq) if self.wait_before_answer > 0: time.sleep(self.wait_before_answer) return messaging.NotificationResult.REQUEUE return messaging.NotificationResult.HANDLED def notify_server(transport, topic, wait_before_answer, duration, requeue): endpoints = [NotifyEndpoint(wait_before_answer, requeue)] target = messaging.Target(topic=topic) server = notify.get_notification_listener(transport, [target], endpoints, executor='eventlet') run_server(server, duration=duration) return endpoints[0] class BatchNotifyEndpoint(object): def __init__(self, wait_before_answer, requeue): self.wait_before_answer = wait_before_answer self.requeue = requeue self.received_messages = MessageStatsCollector('server') self.cache = set() def info(self, batch): LOG.debug('msg rcv') LOG.debug("%s", batch) server_ts = time.time() for item in batch: message = update_message(item['payload'], server_ts=server_ts) self.received_messages.push(message) return messaging.NotificationResult.HANDLED def batch_notify_server(transport, topic, wait_before_answer, duration, requeue): endpoints = [BatchNotifyEndpoint(wait_before_answer, requeue)] target = messaging.Target(topic=topic) server = notify.get_batch_notification_listener( transport, [target], endpoints, executor='eventlet', batch_size=1000, batch_timeout=5) run_server(server, duration=duration) return endpoints[0] class RpcEndpoint(object): def __init__(self, wait_before_answer): self.wait_before_answer = wait_before_answer self.received_messages = MessageStatsCollector('server') def info(self, ctxt, message): server_ts = time.time() LOG.debug("######## RCV: %s", message) reply = update_message(message, server_ts=server_ts) self.received_messages.push(reply) if self.wait_before_answer > 0: time.sleep(self.wait_before_answer) return reply class ServerControlEndpoint(object): def __init__(self, controlled_server): self.connected_clients = set() self.controlled_server = controlled_server def sync_start(self, ctx, message): """Handle start reports from clients""" client_id = message['id'] LOG.info('The client %s started to send messages' % client_id) self.connected_clients.add(client_id) def sync_done(self, ctx, message): """Handle done reports from clients""" client_id = message['id'] LOG.info('The client %s finished msg sending.' % client_id) if client_id in self.connected_clients: self.connected_clients.remove(client_id) if not self.connected_clients: LOG.info( 'The clients sent all messages. Shutting down the server..') threading.Timer(1, self._stop_server_with_delay).start() def _stop_server_with_delay(self): self.controlled_server.stop() self.controlled_server.wait() class Client(object): def __init__(self, client_id, client, method, has_result, wait_after_msg): self.client_id = client_id self.client = client self.method = method self.wait_after_msg = wait_after_msg self.seq = 0 self.messages_count = len(MESSAGES) # Start sending the messages from a random position to avoid # memory re-usage and generate more realistic load on the library # and a message transport self.position = random.randint(0, self.messages_count - 1) self.sent_messages = MessageStatsCollector('client-%s' % client_id) self.errors = MessageStatsCollector('error-%s' % client_id) if has_result: self.round_trip_messages = MessageStatsCollector( 'round-trip-%s' % client_id) def host_based_id(self): _id = "%(client_id)s %(salt)s@%(hostname)s" return _id % {'hostname': CURRENT_HOST, 'salt': hex(id(self))[2:], 'client_id': self.client_id} def send_msg(self): msg = make_message(self.seq, MESSAGES[self.position], time.time()) self.sent_messages.push(msg) res = None try: res = self.method(self.client, msg) except Exception: self.errors.push(msg) else: LOG.debug("SENT: %s", msg) if res: return_ts = time.time() res = update_message(res, return_ts=return_ts) self.round_trip_messages.push(res) self.seq += 1 self.position = (self.position + 1) % self.messages_count if self.wait_after_msg > 0: time.sleep(self.wait_after_msg) class RPCClient(Client): def __init__(self, client_id, transport, target, timeout, is_cast, wait_after_msg, sync_mode=False): client = rpc.get_rpc_client(transport, target) method = _rpc_cast if is_cast else _rpc_call super(RPCClient, self).__init__(client_id, client.prepare(timeout=timeout), method, not is_cast, wait_after_msg) self.sync_mode = sync_mode self.is_sync = False # prepare the sync client if sync_mode: if sync_mode == 'call': self.sync_client = self.client else: self.sync_client = client.prepare(fanout=True, timeout=timeout) def send_msg(self): if self.sync_mode and not self.is_sync: self.is_sync = self.sync_start() super(RPCClient, self).send_msg() def sync_start(self): try: msg = {'id': self.host_based_id()} method = _rpc_call if self.sync_mode == 'call' else _rpc_cast method(self.sync_client, msg, 'sync_start') except Exception: LOG.error('The client: %s failed to sync with %s.' % (self.client_id, self.client.target)) return False LOG.info('The client: %s successfully sync with %s' % ( self.client_id, self.client.target)) return True def sync_done(self): try: msg = {'id': self.host_based_id()} method = _rpc_call if self.sync_mode == 'call' else _rpc_cast method(self.sync_client, msg, 'sync_done') except Exception: LOG.error('The client: %s failed finish the sync with %s.' % (self.client_id, self.client.target)) return False LOG.info('The client: %s successfully finished sync with %s' % (self.client_id, self.client.target)) return True class NotifyClient(Client): def __init__(self, client_id, transport, topic, wait_after_msg): client = notify.Notifier(transport, driver='messaging', topics=topic) client = client.prepare(publisher_id='publisher-%d' % client_id) method = _notify super(NotifyClient, self).__init__(client_id, client, method, False, wait_after_msg) def generate_messages(messages_count): # Limit the messages amount. Clients will reiterate the array again # if an amount of messages to be sent is bigger than MESSAGES_LIMIT if messages_count > MESSAGES_LIMIT: messages_count = MESSAGES_LIMIT LOG.info("Generating %d random messages", messages_count) generator = init_random_generator() for i in range(messages_count): length = generator() msg = ''.join(random.choice( string.ascii_lowercase) for x in range(length)) MESSAGES.append(msg) LOG.info("Messages has been prepared") def wrap_sigexit(f): def inner(*args, **kwargs): try: return f(*args, **kwargs) except SignalExit as e: LOG.info('Signal %s is caught. Interrupting the execution', e.signo) for server in SERVERS: server.stop() server.wait() finally: if TRANSPORT: TRANSPORT.cleanup() return inner @wrap_sigexit def run_server(server, duration=None): global IS_RUNNING SERVERS.append(server) server.start() if duration: with timeutils.StopWatch(duration) as stop_watch: while not stop_watch.expired() and IS_RUNNING: time.sleep(1) server.stop() IS_RUNNING = False server.wait() LOG.info('The server is terminating') time.sleep(1) # wait for stats collector to process the last second def rpc_server(transport, target, wait_before_answer, executor, duration): endpoints = [RpcEndpoint(wait_before_answer)] server = rpc.get_rpc_server(transport, target, endpoints, executor) # make the rpc server controllable by rpc clients endpoints.append(ServerControlEndpoint(server)) LOG.debug("starting RPC server for target %s", target) run_server(server, duration=duration) return server.dispatcher.endpoints[0] @wrap_sigexit def spawn_rpc_clients(threads, transport, targets, wait_after_msg, timeout, is_cast, messages_count, duration, sync_mode): p = eventlet.GreenPool(size=threads) targets = itertools.cycle(targets) for i in range(threads): target = next(targets) LOG.debug("starting RPC client for target %s", target) client_builder = functools.partial(RPCClient, i, transport, target, timeout, is_cast, wait_after_msg, sync_mode) p.spawn_n(send_messages, i, client_builder, messages_count, duration) p.waitall() @wrap_sigexit def spawn_notify_clients(threads, topic, transport, message_count, wait_after_msg, timeout, duration): p = eventlet.GreenPool(size=threads) for i in range(threads): client_builder = functools.partial(NotifyClient, i, transport, [topic], wait_after_msg) p.spawn_n(send_messages, i, client_builder, message_count, duration) p.waitall() def send_messages(client_id, client_builder, messages_count, duration): global IS_RUNNING client = client_builder() CLIENTS.append(client) # align message sending closer to whole seconds now = time.time() diff = int(now) - now + 1 time.sleep(diff) if duration: with timeutils.StopWatch(duration) as stop_watch: while not stop_watch.expired() and IS_RUNNING: client.send_msg() eventlet.sleep() IS_RUNNING = False else: LOG.debug("Sending %d messages using client %d", messages_count, client_id) for _ in range(messages_count): client.send_msg() eventlet.sleep() if not IS_RUNNING: break LOG.debug("Client %d has sent %d messages", client_id, messages_count) # wait for replies to be collected time.sleep(1) # send stop request to the rpc server if isinstance(client, RPCClient) and client.is_sync: client.sync_done() def _rpc_call(client, msg, remote_method='info'): try: res = client.call({}, remote_method, message=msg) except Exception as e: LOG.exception('Error %s on CALL for message %s', str(e), msg) raise else: LOG.debug("SENT: %s, RCV: %s", msg, res) return res def _rpc_cast(client, msg, remote_method='info'): try: client.cast({}, remote_method, message=msg) except Exception as e: LOG.exception('Error %s on CAST for message %s', str(e), msg) raise else: LOG.debug("SENT: %s", msg) def _notify(notification_client, msg): notification_client.info({}, 'compute.start', msg) def show_server_stats(endpoint, json_filename): LOG.info('=' * 35 + ' summary ' + '=' * 35) output = dict(series={}, summary={}) output['series']['server'] = endpoint.received_messages.get_series() stats = MessageStatsCollector.calc_stats( 'server', endpoint.received_messages) output['summary'] = stats if json_filename: write_json_file(json_filename, output) def show_client_stats(clients, json_filename, has_reply=False): LOG.info('=' * 35 + ' summary ' + '=' * 35) output = dict(series={}, summary={}) for cl in clients: cl_id = cl.client_id output['series']['client_%s' % cl_id] = cl.sent_messages.get_series() output['series']['error_%s' % cl_id] = cl.errors.get_series() if has_reply: output['series']['round_trip_%s' % cl_id] = ( cl.round_trip_messages.get_series()) sent_stats = MessageStatsCollector.calc_stats( 'client', *(cl.sent_messages for cl in clients)) output['summary']['client'] = sent_stats error_stats = MessageStatsCollector.calc_stats( 'error', *(cl.errors for cl in clients)) output['summary']['error'] = error_stats if has_reply: round_trip_stats = MessageStatsCollector.calc_stats( 'round-trip', *(cl.round_trip_messages for cl in clients)) output['summary']['round_trip'] = round_trip_stats if json_filename: write_json_file(json_filename, output) def write_json_file(filename, output): with open(filename, 'w') as f: f.write(json.dumps(output)) LOG.info('Stats are written into %s', filename) class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo def signal_handler(signum, frame): global IS_RUNNING IS_RUNNING = False raise SignalExit(signum) def _setup_logging(is_debug): log_level = logging.DEBUG if is_debug else logging.INFO logging.basicConfig( stream=sys.stdout, level=log_level, format="%(asctime)-15s %(levelname)s %(name)s %(message)s") logging.getLogger().handlers[0].addFilter(LoggingNoParsingFilter()) for i in ['kombu', 'amqp', 'stevedore', 'qpid.messaging' 'oslo.messaging._drivers.amqp', ]: logging.getLogger(i).setLevel(logging.WARN) def main(): parser = argparse.ArgumentParser( description='Tools to play with oslo.messaging\'s RPC', usage=USAGE, ) parser.add_argument('--url', dest='url', help="oslo.messaging transport url") parser.add_argument('-d', '--debug', dest='debug', action='store_true', help="Turn on DEBUG logging level instead of WARN") parser.add_argument('-tp', '--topic', dest='topic', default="profiler_topic", help="Topics to publish/receive messages to/from.") parser.add_argument('-s', '--server', dest='server', default="profiler_server", help="Servers to publish/receive messages to/from.") parser.add_argument('-tg', '--targets', dest='targets', nargs="+", default=["profiler_topic.profiler_server"], help="Targets to publish/receive messages to/from.") parser.add_argument('-l', dest='duration', type=int, help='send messages for certain time') parser.add_argument('-j', '--json', dest='json_filename', help='File name to store results in JSON format') parser.add_argument('--config-file', dest='config_file', type=str, help="Oslo messaging config file") subparsers = parser.add_subparsers(dest='mode', help='notify/rpc server/client mode') server = subparsers.add_parser('notify-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--requeue', dest='requeue', action='store_true') server = subparsers.add_parser('batch-notify-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--requeue', dest='requeue', action='store_true') client = subparsers.add_parser('notify-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, help='sleep time between two messages') client.add_argument('--timeout', dest='timeout', type=int, default=3, help='client timeout') server = subparsers.add_parser('rpc-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('-e', '--executor', dest='executor', type=str, default='eventlet', help='name of a message executor') client = subparsers.add_parser('rpc-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, help='sleep time between two messages') client.add_argument('--timeout', dest='timeout', type=int, default=3, help='client timeout') client.add_argument('--exit-wait', dest='exit_wait', type=int, default=0, help='Keep connections open N seconds after calls ' 'have been done') client.add_argument('--is-cast', dest='is_cast', action='store_true', help='Use `call` or `cast` RPC methods') client.add_argument('--is-fanout', dest='is_fanout', action='store_true', help='fanout=True for CAST messages') client.add_argument('--sync', dest='sync', choices=('call', 'fanout'), help="stop server when all msg was sent by clients") args = parser.parse_args() _setup_logging(is_debug=args.debug) if args.config_file: cfg.CONF(["--config-file", args.config_file]) global TRANSPORT if args.mode in ['rpc-server', 'rpc-client']: TRANSPORT = messaging.get_transport(cfg.CONF, url=args.url) else: TRANSPORT = messaging.get_notification_transport(cfg.CONF, url=args.url) if args.mode in ['rpc-client', 'notify-client']: # always generate maximum number of messages for duration-limited tests generate_messages(MESSAGES_LIMIT if args.duration else args.messages) # oslo.config defaults cfg.CONF.heartbeat_interval = 5 cfg.CONF.prog = os.path.basename(__file__) cfg.CONF.project = 'oslo.messaging' signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if args.mode == 'rpc-server': target = messaging.Target(topic=args.topic, server=args.server) endpoint = rpc_server(TRANSPORT, target, args.wait_before_answer, args.executor, args.duration) show_server_stats(endpoint, args.json_filename) elif args.mode == 'notify-server': endpoint = notify_server(TRANSPORT, args.topic, args.wait_before_answer, args.duration, args.requeue) show_server_stats(endpoint, args.json_filename) elif args.mode == 'batch-notify-server': endpoint = batch_notify_server(TRANSPORT, args.topic, args.wait_before_answer, args.duration, args.requeue) show_server_stats(endpoint, args.json_filename) elif args.mode == 'notify-client': spawn_notify_clients(args.threads, args.topic, TRANSPORT, args.messages, args.wait_after_msg, args.timeout, args.duration) show_client_stats(CLIENTS, args.json_filename) elif args.mode == 'rpc-client': targets = [] for target in args.targets: tp, srv = target.partition('.')[::2] t = messaging.Target(topic=tp, server=srv, fanout=args.is_fanout) targets.append(t) spawn_rpc_clients(args.threads, TRANSPORT, targets, args.wait_after_msg, args.timeout, args.is_cast, args.messages, args.duration, args.sync) show_client_stats(CLIENTS, args.json_filename, not args.is_cast) if args.exit_wait: LOG.info("Finished. waiting for %d seconds", args.exit_wait) time.sleep(args.exit_wait) if __name__ == '__main__': CURRENT_PID = os.getpid() CURRENT_HOST = socket.gethostname() main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/tools/test-setup.sh0000775000175000017500000000140100000000000020370 0ustar00zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developer should setup their test systems in a similar way. # This setup for amqp1 needs to be run by a user that can run sudo. # qdrouterd needs to be installed from qpid/testing repo in Ubuntu. # bindep does not allow setting up another repo, so we just install # this package here. # inspired from project-config install-distro-packages.sh #if apt-get -v >/dev/null 2>&1 ; then # sudo add-apt-repository -y ppa:qpid/testing # sudo apt-get -qq update # sudo PATH=/usr/sbin:/sbin:$PATH DEBIAN_FRONTEND=noninteractive \ # apt-get -q --option "Dpkg::Options::=--force-confold" \ # --assume-yes install qdrouterd #fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724686509.0 oslo.messaging-14.9.0/tox.ini0000664000175000017500000000530000000000000016071 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 envlist = py3, pep8 [testenv] passenv = OS_* ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = stestr run --slowest {posargs} [testenv:pep8] commands = pre-commit run -a # run security linter bandit -r oslo_messaging -x tests -n5 [testenv:cover] setenv = PYTHON=coverage run --source oslo_messaging --parallel-mode commands = coverage erase stestr run --slowest {posargs} coverage combine coverage html -d cover coverage report coverage report --show-missing [testenv:venv] commands = {posargs} [testenv:docs] allowlist_externals = rm deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -fr doc/build sphinx-build -W --keep-going -b html doc/source doc/build/html # The following functional test scenarios are defined for the # testing of the messaging backends and to demonstrated the functional # correctness across driver combinations (e.g. RPC and Notify) # # RPC Notify # -------- -------- # scenario01 rabbit rabbit # scenario02 rabbit kafka # [testenv:py310-func-scenario01] setenv = SCENARIO=scenario01 allowlist_externals = {toxinidir}/tools/setup-scenario-env.sh commands = {toxinidir}/tools/setup-scenario-env.sh stestr run --slowest {posargs:oslo_messaging.tests.functional} [testenv:py310-func-scenario02] setenv = SCENARIO=scenario02 allowlist_externals = {toxinidir}/tools/setup-scenario-env.sh commands = {toxinidir}/tools/setup-scenario-env.sh stestr run --slowest {posargs:oslo_messaging.tests.functional} [testenv:bandit] # NOTE(kgiusti): This is required for the integration test job of the bandit # project. Please do not remove. commands = bandit -r oslo_messaging -x tests -n5 [flake8] show-source = True enable-extensions = H203,H106 # E731 skipped as assign a lambda expression ignore = E731,H405,W504 exclude = .tox,dist,doc,*.egg,build,__init__.py [hacking] import_exceptions = [flake8:local-plugins] extension = O321 = checks:check_oslo_namespace_imports O324 = checks:CheckForLoggingIssues paths = ./oslo_messaging/hacking [testenv:releasenotes] allowlist_externals = rm commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt [testenv:bindep] deps = bindep commands = bindep {posargs}