././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/0000775000175000017500000000000000000000000014154 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/.coveragerc0000664000175000017500000000014600000000000016276 0ustar00zuulzuul00000000000000[run] branch = True source = oslo_service omit = oslo_service/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/.mailmap0000664000175000017500000000013100000000000015570 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/.pre-commit-config.yaml0000664000175000017500000000204300000000000020434 0ustar00zuulzuul00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: trailing-whitespace # Replaces or checks mixed line ending - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' # Forbid files which have a UTF-8 byte-order marker - id: check-byte-order-marker # Checks that non-binary executables have a proper shebang - id: check-executables-have-shebangs # Check for files that contain merge conflict strings. - id: check-merge-conflict # Check for debugger imports and py37+ breakpoint() # calls in python source - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ - repo: https://opendev.org/openstack/hacking rev: 6.1.0 hooks: - id: hacking additional_dependencies: [] - repo: https://github.com/PyCQA/bandit rev: 1.7.6 hooks: - id: bandit args: ['-x', 'tests'] - repo: https://github.com/PyCQA/doc8 rev: v1.1.1 hooks: - id: doc8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/.stestr.conf0000664000175000017500000000006500000000000016426 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./oslo_service/tests top_path=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/.zuul.yaml0000664000175000017500000000033300000000000016114 0ustar00zuulzuul00000000000000- project: templates: - check-requirements - lib-forward-testing-python3 - openstack-python3-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/AUTHORS0000664000175000017500000001402200000000000015223 0ustar00zuulzuul00000000000000Accela Zhao Akihiro Motoki Alan Pevec Alberto Murillo Alessandro Pilotti Alex Gaynor Alex Holden Alexander Gorodnev Alexis Lee Allain Legacy Andreas Jaeger Andreas Jaeger Angus Salkeld Ann Kamyshnikova Ben Nemec Bence Romsics Bernhard M. Wiedemann Brant Knudson Brian Elliott Carl Baldwin Cedric Brandily Chang Bo Guo ChangBo Guo(gcb) ChangBo Guo(gcb) Charles Short Christian Berendt Christopher Lefelhocz Chuck Short Claudiu Belu Clif Houck Corey Bryant Dan Prince Daniel Bengtsson Daniel P. Berrange Davanum Srinivas (dims) Davanum Srinivas Davanum Srinivas David Ripton DennyZhang Dina Belova Dirk Mueller Dmitry Tantsur Doug Hellmann Doug Hellmann Duan Jiong Duc Truong Elena Ezhova Eoghan Glynn Eric Brown Eric Fried Eric Fried Eric Guo Eric Windisch Fengqian.Gao Flavio Percoco Gary Kotton Ghanshyam Ghanshyam Mann Gorka Eguileor Hengqing Hu Hervé Beraud Ian Wienand Ihar Hrachyshka Ilya Shakhat Iswarya_Vakati James Carey Jason Dunsmore Jason Kölker Javeme Javier Pena Jay Pipes Jeff Peeler Joe Gordon Joe Heck John L. Villalovos Joshua Harlow Joshua Harlow Joshua Harlow Julia Kreger Julien Danjou Kenneth Giusti Kevin L. Mitchell Kiall Mac Innes Kirill Bespalov Kurt Taylor Marian Horban Mark McClain Mark McLoughlin Maru Newby Matt Riedemann Matthew Treinish Michael Johnson Michael Still Mitsuru Kanabuchi Mohammed Naser Moisés Guimarães de Medeiros Monty Taylor OpenStack Release Bot Pavlo Shchelokovskyy Qin Zhao Raymond Pekowski Rodolfo Alonso Hernandez Rohit Jaiswal Roman Podoliaka Ronald Bradford Russell Bryant Sean Dague Sean McGinnis Sean McGinnis Sebastian Lohff Sergey Kraynev Sergey Lukjanov Sergey Vilgelm Slawek Kaplonski Soren Hansen Stephen Finucane Steve Martinelli Steven Hardy Surojit Pathak Takashi Kajinami Takashi Natsume Thomas Herve Thomas Herve Tianhua Huang Tom Cammann TommyLike Tony Breeds Victor Sergeyev Victor Stinner Vu Cong Tuan Wenzhi Yu YuehuiLei Zane Bitter ZhiQiang Fan ZhijunWei ZhongShengping Zhongyue Luo apporc caoyuan chenke dengzhaosen fujioka yuuichi gecong1973 gongysh jacky06 jun923.gu lei zhang likui lin-hua-cheng liu-sheng liyingjun melanie witt melissaml pengyuesheng ravikumar-venkatesan ricolin sonu.kumar stanzgy venkata anil venkatamahesh wu.shiming xhzhf xuanyandong yan.haifeng zhangboye zwei ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/CONTRIBUTING.rst0000664000175000017500000000132600000000000016617 0ustar00zuulzuul00000000000000If you would like to contribute to the development of oslo's libraries, first you must take a look to this page: https://specs.openstack.org/openstack/oslo-specs/specs/policy/contributing.html If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/ChangeLog0000664000175000017500000005632100000000000015735 0ustar00zuulzuul00000000000000CHANGES ======= 3.4.0 ----- * Switch to coverage command * reno: Update master for unmaintained/yoga * pre-commit: Integrate doc8 and bandit * pre-commit: Bump versions * Bump hacking * Update python classifier in setup.cfg 3.3.0 ----- * Update master for stable/2023.2 3.2.0 ----- * Imported Translations from Zanata * Bump bandit * Revert "Moves supported python runtimes from version 3.8 to 3.10" * Moves supported python runtimes from version 3.8 to 3.10 * Update master for stable/2023.1 3.1.1 ----- * Fix issues related to tox4 3.1.0 ----- * Fix misuse of assertTrue * Imported Translations from Zanata * Add Python3 antelope unit tests * Update master for stable/zed * Fix native threads on child process 3.0.0 ----- * Imported Translations from Zanata * Drop python3.6/3.7 support in testing runtime * Add Python3 zed unit tests * Update master for stable/yoga 2.8.0 ----- * Make debug option of wsgi server configurable 2.7.0 ----- * Fix fo() backdoor command for non-class objects * Fix BackOffLoopingCall error so it is not misleading * Add Python3 yoga unit tests * Update master for stable/xena 2.6.0 ----- * setup.cfg: Replace dashes with underscores * Remove references to 'sys.version\_info' * Move flake8 as a pre-commit local target * Add Python3 xena unit tests * Update master for stable/wallaby * Remove lower-constraints remnants 2.5.0 ----- * remove unicode from code * Use TOX\_CONSTRAINTS\_FILE * Dropping lower constraints testing * Drop custom implementation of EVENTLET\_HUB * Use TOX\_CONSTRAINTS\_FILE * Use py3 as the default runtime for tox * Use TOX\_CONSTRAINTS\_FILE * Add Python3 wallaby unit tests * Update master for stable/victoria * Adding pre-commit 2.4.0 ----- * [goal] Migrate testing to ubuntu focal * Bump bandit version 2.3.2 ----- * Do not import "oslo.log" in the main module 2.3.1 ----- 2.3.0 ----- * Fix wsgi SSL tests for wsgi module under python 3 * Reactivate wsgi test related to socket option under python 3 * Fix wsgi/SSL/ipv6 tests for wsgi module under python 3 * Fix some SSL tests for wsgi module under python 3 * Raise minimum version of eventlet to 0.25.2 * Fix pygments style * Stop to use the \_\_future\_\_ module 2.2.0 ----- * Drop six usages * Fix hacking min version to 3.0.1 * Switch to newer openstackdocstheme and reno versions * Remove the unused coding style modules * Remove translation sections from setup.cfg * Align tests with monkey patch original current\_thread \_active * Remove monotonic usage * Align contributing doc with oslo's policy * Monkey patch original current\_thread \_active * Bump default tox env from py37 to py38 * Add py38 package metadata * Use unittest.mock instead of third party mock * Add release notes links to doc index * Add Python3 victoria unit tests * Update master for stable/ussuri * Cleanup py27 support 2.1.1 ----- * Update hacking for Python3 2.1.0 ----- * Update eventlet * Update the minversion parameter * remove outdated header * reword releasenote for py27 support dropping 2.0.0 ----- * [ussuri][goal] Drop python 2.7 support and testing * tox: Trivial cleanup 1.41.1 ------ * Add 'is\_available' function * tox: Keeping going with docs * Switch to official Ussuri jobs * Extend test cert validity to 2049 * Update the constraints url 1.41.0 ------ * Update master for stable/train 1.40.2 ------ * Reno for SIGHUP fix 1.40.1 ------ * Polish usage.rst * restart: don't stop process on sighup when mutating * Move doc related modules to doc/requirements.txt * Add Python 3 Train unit tests 1.40.0 ------ * Stop using pbr to build docs * Make PID availabe as formatstring in backdoor path 1.39.0 ------ * Cap Bandit below 1.6.0 and update Sphinx requirement * Add workers' type check before launching the services * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * Dropping the py35 testing * Update master for stable/stein 1.38.0 ------ * Update oslo.service to require yappi 1.0 or newer * add python 3.7 unit test job * Update hacking version 1.37.0 ------ * Bump oslo.utils lower constraint to 3.40.2 1.36.0 ------ * Profile Oslo Service processes * Use eventletutils Event class * Avoid eventlet\_backdoor listing on same port 1.35.0 ------ * Use template for lower-constraints * Deprecate the ThreadGroup.cancel() API * Document the threadgroup module * Actually test child SIGHUP signal * Restore correct signal handling in Python3 * Add stop\_on\_exception to TG timers * Add better timer APIs to ThreadGroup * Update mailinglist from dev to discuss * Use SleepFixture in looping call test suite 1.33.0 ------ * Fixture to mock loopingcall wait() * Limit monotonic to py2 1.32.1 ------ * Fix stop of loopingcall * Use eventlet Event for loopingcall events * Clean up .gitignore references to personal tools * Always build universal wheels 1.32.0 ------ * Ensure connection is active in graceful shutdown tests * Stop asserting on Eventlet internals * Skips signal handling on Windows * add lib-forward-testing-python3 test job * add python 3.6 unit test job * import zuul job settings from project-config * Update reno for stable/rocky 1.31.3 ------ * Remove unnecessary pyNN testenv * Convert oslo.service to using stestr * Add release notes link to README * Fix oslo.service ProcessLauncher fails to call stop * fix tox python3 overrides * Add test dependency on requests * Remove moxstubout 1.31.2 ------ * [ThreadGroup] Don't remove timer when stop timers * Make lower-constraints job voting * tox.ini: Use python3.5 in py35 environment * Python 3: Fix eventlet wakeup after signal * Python 3: Fix non-deterministic test * Remove stale pip-missing-reqs tox test * Trivial: Update pypi url to new url * add lower-constraints job * move doc8 test to pep8 job * set default python to python3 1.31.1 ------ * Revert "Revert "Permit aborting loopingcall while sleeping"" 1.31.0 ------ * Remove eventlet cap * Fixup certificates and skip failing test 1.30.0 ------ * Imported Translations from Zanata * Imported Translations from Zanata * Update links in README * Imported Translations from Zanata * Updated from global requirements * Update reno for stable/queens * Updated from global requirements * Updated from global requirements * Updated from global requirements 1.29.0 ------ * Maintain shared memory after fork in Python >=3.7 * Updated from global requirements * Revert "Permit aborting loopingcall while sleeping" 1.28.1 ------ 1.28.0 ------ * Remove -U from pip install * Avoid tox\_install.sh for constraints support * Updated from global requirements * Remove setting of version/release from releasenotes * Updated from global requirements 1.27.0 ------ * Updated from global requirements * change periodic\_task to catch all exceptions including BaseException * Fix bandit scan and make it voting * Imported Translations from Zanata 1.26.0 ------ * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Imported Translations from Zanata * Updated from global requirements * Updated from global requirements * Update reno for stable/pike * Updated from global requirements 1.25.0 ------ * Update URLs in documents according to document migration 1.24.1 ------ * rearrange existing documentation to fit the new standard layout * switch from oslosphinx to openstackdocstheme 1.24.0 ------ * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Permit aborting loopingcall while sleeping * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements 1.23.0 ------ * Add min\_interval to BackOffLoopingCall 1.22.0 ------ * Updated from global requirements * Updated from global requirements 1.21.0 ------ * Remove log translations * Use Sphinx 1.5 warning-is-error * Fix some reST field lists in docstrings * Updated from global requirements 1.20.0 ------ * Updated from global requirements * [Fix gate]Update test requirement * Updated from global requirements * Updated from global requirements * Fix race condition with fast threads * pbr.version.VersionInfo needs package name (oslo.xyz and not oslo\_xyz) * Remove duplicated register\_opts call * Update reno for stable/ocata * Remove references to Python 3.4 1.19.0 ------ * Add FixedIntervalWithTimeoutLoopingCall * Add Constraints support * Show team and repo badges on README 1.18.0 ------ * Updated from global requirements * Updated from global requirements * Updated from global requirements * Imported Translations from Zanata * Update .coveragerc after the removal of respective directory * Delete python bytecode file 1.17.0 ------ * Changed the home-page link * Updated from global requirements * Replace 'MagicMock' with 'Mock' * Enable release notes translation * Updated from global requirements * Updated from global requirements * Updated from global requirements 1.16.0 ------ * Updated from global requirements * Stay alive on double SIGHUP 1.15.0 ------ * Updated from global requirements 1.14.0 ------ * Updated from global requirements * Fix parameters of assertEqual are misplaced 1.13.0 ------ * Updated from global requirements * Updated from global requirements * Updated from global requirements * Add reno for release notes management * Updated from global requirements 1.12.0 ------ * Imported Translations from Zanata * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements 1.11.0 ------ * Trivial: ignore openstack/common in flake8 exclude list 1.10.0 ------ * [Trivial] Remove executable privilege of doc/source/conf.py 1.9.0 ----- * Updated from global requirements * Offer mutate\_config\_files * Make \_spawn\_service more flexible * Remove direct dependency on babel * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix argument type for \_sd\_notify() on python3 * Use a timeutils.StopWatch for cancel timing * Add ability to cancel Threads and ThreadGroups * Exception: message need '\_' function * Fix Heartbeats stop when time is changed * Updated from global requirements 1.7.0 ----- * Updated from global requirements * Correct some help text * Fix typo in help text * wsgi: decrease the default number of greenthreads in pool * Updated from global requirements 1.6.0 ----- * Updated from global requirements * Allow the backdoor to serve from a local unix domain socket * Updated from global requirements 1.5.0 ----- * Use requests in TestWSGIServerWithSSL instead of raw socket client 1.4.0 ----- * Updated from global requirements * Updated from global requirements * Fix misspelling and rewrite sentence * Add a more useful/detailed frame dumping function * Updated from global requirements * Update translation setup * Fix race condition on handling signals * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix artificial service.wait() 1.3.0 ----- * Graceful shutdown added to ServiceLauncher * Fix test execution on CentOS 7 * Updated from global requirements * Fix some inconsistency in docstrings * Refactoring of tests/eventlet\_service.py * Updated from global requirements * Remove argument ServiceLauncher.wait() method * fix a couple of assert issues * Run sslutils and wsgi tests for python3 * Updated from global requirements 1.2.0 ----- * Updated from global requirements * Fix a race condition in signal handlers * Enable py3 mock.patch of RuntimeError * Delete python bytecode before every test run * Trival: Remove 'MANIFEST.in' 1.1.0 ----- * Avoid warning when time taken is close to zero * Update the \_i18n.py file and fix the domain value * Add Bandit to tox for security static analysis * Code refactoring of ThreadGroup::stop\_timers() 1.0.0 ----- * Updated from global requirements * Updated from global requirements * Add functionality for creating Unix domain WSGI servers * Use reflection.get\_class\_name() from oslo.utils * Remove Python 2.6 classifier * Remove openstack-common.conf * cleanup tox.ini * Change "started child" messages to DEBUG * Support for SSL protocol and cipher controls 0.13.0 ------ * Default value of graceful\_shutdown\_timeout is set to 60sec * Updated from global requirements * Logger name argument was added into wsgi.Server constructor * Avoid the dual-naming confusion * Forbid launching services with 0 or negative number of workers 0.12.0 ------ * Document graceful\_shutdown\_timeout config option * Remove py26 env from test list * Added config option graceful\_shutdown\_timeout * Updated from global requirements * Add docstring for LoopingCallBase.\_start() * Updated from global requirements 0.11.0 ------ * Updated from global requirements * Add doc8 to py27 tox env and fix raised issues * Document termination of children on SIGHUP * Updated from global requirements * Updated from global requirements 0.10.0 ------ * RetryDecorator should not log warnings/errors for expected exceptions * Termination children on SIGHUP added * Fix coverage configuration and execution * Add register\_opts function to sslutils * Move the common thread manipulating routine to a shared routine * Update log string to correctly denote what it waits on * Avoid removing entries for timers that didn't stop * Cleanup thread on thread done callback * Move 'history' -> release notes section * Add unit tests for sslutils * Expand README and clean up intro to sphinx docs * Add shields.io version/downloads links/badges into README.rst * add auto-generated docs for config options * Move backoff looping call from IPA to oslo.service * Change ignore-errors to ignore\_errors * Fix the home-page value in setup.cfg * WSGI module was corrected * Updated from global requirements * ThreadGroup's stop didn't recognise the current thread correctly * doing monkey\_patch for unittest 0.9.0 ----- * Handling corner cases in dynamic looping call * Change DEBUG log in loopingcall to TRACE level log * Updated from global requirements 0.8.0 ----- * Added wsgi functionality 0.7.0 ----- * Updated from global requirements * Update "Signal handling" section of usage docs * Use oslo\_utils reflection to get 'f' callable name * Updated from global requirements * Prefix the 'safe\_wrapper' function to be '\_safe\_wrapper' * Setup translations * Check that sighup is supported before accessing signal.SIGHUP * Use contextlib.closing instead of try ... finally: sock.close * Avoid using the global lockutils semaphore collection * Updated from global requirements 0.6.0 ----- * Added newline at end of file * Added class SignalHandler * Updated from global requirements * Activate pep8 check that \_ is imported * Denote what happens when no exceptions are passed in * Allow LoopingCall to continue on exception in callee 0.5.0 ----- * Updated from global requirements * Updated from global requirements * Updated from global requirements * Add oslo\_debug\_helper to tox.ini * Add usage documentation for oslo\_service.service module 0.4.0 ----- * Updated from global requirements * save docstring, name etc using six.wraps * Move backdoor-related tests from test\_service * Add mock to test\_requirements * Remove usage of mox in test\_eventlet\_backdoor 0.3.0 ----- * Copy RetryDecorator from oslo.vmware * Increase test coverage of systemd * Ensure we set the event and wait on the timer in the test * Make it easier to use the eventlet backdoor locally * Track created thread and disallow more than one start being active 0.2.0 ----- * Documentation on the use of the oslo-config-generator * Add greenlet to requirements * Add tox target to find missing requirements * Enforce H405 check * Enforce H301 check * Return timer after adding it to internal list * Updated from global requirements * Have all the looping calls share a common run loop * Move service abstract base class check to launch\_service methods * Fix a typo in a comment * Updated from global requirements * Use a signal name->sigval and sigval->signal name mapping 0.1.0 ----- * Test for instantaneous shutdown fixed * Graceful shutdown WSGI/RPC server * Use monotonic.monotonic and stopwatches instead of time.time * Updated from global requirements * Eventlet service fixed * Add documentation for the service module * Improve test coverage for loopingcall module * Add oslo.service documentation * Remove usage of global CONF * Make logging option values configurable * Introduce abstract base class for services * Add entrypoints for option discovery * Updated from global requirements * Move the option definitions into a private file * Fix unit tests * Fix pep8 * exported from oslo-incubator by graduate.sh * Clean up logging to conform to guidelines * Port service to Python 3 * Test for shutting down eventlet server on signal * service child process normal SIGTERM exit * Revert "Revert "Revert "Optimization of waiting subprocesses in ProcessLauncher""" * Revert "Revert "Optimization of waiting subprocesses in ProcessLauncher"" * Revert "Optimization of waiting subprocesses in ProcessLauncher" * ProcessLauncher: reload config file in parent process on SIGHUP * Add check to test\_\_signal\_handlers\_set * Store ProcessLauncher signal handlers on class level * Remove unused validate\_ssl\_version * Update tests for optional sslv3 * Fixed ssl.PROTOCOL\_SSLv3 not supported by Python 2.7.9 * Optimization of waiting subprocesses in ProcessLauncher * Switch from oslo.config to oslo\_config * Change oslo.config to oslo\_config * Remove oslo.log code and clean up versionutils API * Replace mox by mox3 * Allow overriding name for periodic tasks * Separate add\_periodic\_task from the metaclass \_\_init\_\_ * Upgrade to hacking 0.10 * Remove unnecessary import of eventlet * Added graceful argument on Service.stop method * Remove extra white space in log message * Prefer delayed %r formatting over explicit repr use * ServiceRestartTest: make it more resilient * threadgroup: don't log GreenletExit * add list\_opts to all modules with configuration options * Remove code that moved to oslo.i18n * Remove graduated test and fixtures libraries * rpc, notifier: remove deprecated modules * Let oslotest manage the six.move setting for mox * Remove usage of readlines() * Allow test\_service to run in isolation * Changes calcuation of variable delay * Use timestamp in loopingcall * Remove unnecessary setUp function * Log the function name of looping call * pep8: fixed multiple violations * Make periodic tasks run on regular spacing interval * Use moxstubout and mockpatch from oslotest * Implement stop method in ProcessLauncher * Fix parenthesis typo misunderstanding in periodic\_task * Fix docstring indentation in systemd * Remove redundant default=None for config options * Make unspecified periodic spaced tasks run on default interval * Make stop\_timers() method public * Remove deprecated LoopingCall * Fixed several typos * Add graceful stop function to ThreadGroup.stop * Use oslotest instead of common test module * Remove duplicated "caught" message * Move notification point to a better place * Remove rendundant parentheses of cfg help strings * Adds test condition in test\_periodic * Fixed spelling error - occured to occurred * Add missing \_LI for LOG.info in service module * notify calling process we are ready to serve * Reap child processes gracefully if greenlet thread gets killed * Improve help strings for sslutils module * Remove unnecessary usage of noqa * Removes use of timeutils.set\_time\_override * Update oslo log messages with translation domains * Refactor unnecessary arithmetic ops in periodic\_task * Refactor if logic in periodic\_task * Use timestamp in periodic tasks * Add basic Python 3 tests * Clear time override in test\_periodic * Don't share periodic\_task instance data in a class attr * Revert "service: replace eventlet event by threading" * Simplify launch method * Simple typo correction * Cleanup unused log related code * Utilizes assertIsNone and assertIsNotNone * Fix filter() usage due to python 3 compability * Use hacking import\_exceptions for gettextutils.\_ * threadgroup: use threading rather than greenthread * disable SIGHUP restart behavior in foreground * service: replace eventlet event by threading * Allow configurable ProcessLauncher liveness check * Make wait & stop methods work on all threads * Typos fix in db and periodic\_task module * Remove vim header * os.\_exit in \_start\_child may cause unexpected exception * Adjust import order according to PEP8 imports rule * Add a link method to Thread * Use multiprocessing.Event to ensure services have started * Apply six for metaclass * Removed calls to locals() * Move comment in service.py to correct location * Fixes issue with SUGHUP in services on Windows * Replace using tests.utils part2 * Bump hacking to 0.7.0 * Replace using tests.utils with openstack.common.test * Refactors boolean returns * Add service restart function in oslo-incubator * Fix stylistic problems with help text * Enable H302 hacking check * Convert kombu SSL version string into integer * Allow launchers to be stopped multiple times * Ignore any exceptions from rpc.cleanup() * Add graceful service shutdown support to Launcher * Improve usability when backdoor\_port is nonzero * Enable hacking H404 test * Enable hacking H402 test * Enable hacking H401 test * Fixes import order nits * Add DynamicLoopCall timers to ThreadGroups * Pass backdoor\_port to services being launched * Improve python3 compatibility * Use print\_function \_\_future\_\_ import * Improve Python 3.x compatibility * Import nova's looping call * Copy recent changes in periodic tasks from nova * Fix IBM copyright strings * Removes unused imports in the tests module * update OpenStack, LLC to OpenStack Foundation * Add function for listing native threads to eventlet backdoor * Use oslo-config-2013.1b3 * Support for SSL in wsgi.Service * Replace direct use of testtools BaseTestCase * Use testtools as test base class * ThreadGroup remove unused name parameters * Implement importutils.try\_import * Fix test cases in tests.unit.test\_service * Don't rely on os.wait() blocking * Use Service thread group for WSGI request handling * Make project pyflakes clean * Replace try: import with extras.try\_import * raise\_on\_error parameter shouldn't be passed to task function * Account for tasks duration in LoopingCall delay * updating sphinx documentation * Enable eventlet\_backdoor to return port * Use the ThreadGroup for the Launcher * Change RPC cleanup ordering * threadgroup : greethread.cancel() should be kill() * Use spawn\_n when not capturing return value * Make ThreadGroup derived from object to make mocking possible * Don't log exceptions for GreenletExit and thread\_done * Log CONF from ProcessLauncher.wait, like ServiceLauncher * Import order clean-up * Added a missing \`cfg\` import in service.py * Log config on startup * Integrate eventlet backdoor * Add the rpc service and delete manager * Use pep8 v1.3.3 * Add threadgroup to manage timers and greenthreads * Add basic periodic task infrastructure * Add multiprocess service launcher * Add signal handling to service launcher * Basic service launching infrastructure * Move manager.py and service.py into common * Copy eventlet\_backdoor into common from nova * Copy LoopingCall from nova for service.py * initial commit * Initial skeleton project ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/HACKING.rst0000664000175000017500000000022200000000000015746 0ustar00zuulzuul00000000000000oslo.service Style Commandments =============================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/LICENSE0000664000175000017500000002363700000000000015174 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/PKG-INFO0000664000175000017500000000467400000000000015264 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: oslo.service Version: 3.4.0 Summary: oslo.service library Home-page: https://docs.openstack.org/oslo.service/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.service.svg :target: https://governance.openstack.org/tc/ference/tags/index.html .. Change things from this point on ======================================================== oslo.service -- Library for running OpenStack services ======================================================== .. image:: https://img.shields.io/pypi/v/oslo.service.svg :target: https://pypi.org/project/oslo.service/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.service.svg :target: https://pypi.org/project/oslo.service/ :alt: Downloads oslo.service provides a framework for defining new long-running services using the patterns established by other OpenStack applications. It also includes utilities long-running applications might need for working with SSL or WSGI, performing periodic operations, interacting with systemd, etc. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.service/latest/ * Source: https://opendev.org/openstack/oslo.service * Bugs: https://bugs.launchpad.net/oslo.service * Release notes: https://docs.openstack.org/releasenotes/oslo.service/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/README.rst0000664000175000017500000000235500000000000015650 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.service.svg :target: https://governance.openstack.org/tc/ference/tags/index.html .. Change things from this point on ======================================================== oslo.service -- Library for running OpenStack services ======================================================== .. image:: https://img.shields.io/pypi/v/oslo.service.svg :target: https://pypi.org/project/oslo.service/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.service.svg :target: https://pypi.org/project/oslo.service/ :alt: Downloads oslo.service provides a framework for defining new long-running services using the patterns established by other OpenStack applications. It also includes utilities long-running applications might need for working with SSL or WSGI, performing periodic operations, interacting with systemd, etc. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.service/latest/ * Source: https://opendev.org/openstack/oslo.service * Bugs: https://bugs.launchpad.net/oslo.service * Release notes: https://docs.openstack.org/releasenotes/oslo.service/ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/doc/0000775000175000017500000000000000000000000014721 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/requirements.txt0000664000175000017500000000045700000000000020213 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. openstackdocstheme>=2.2.0 # Apache-2.0 sphinx>=2.0.0,!=2.1.0 # BSD reno>=3.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/doc/source/0000775000175000017500000000000000000000000016221 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/conf.py0000664000175000017500000000500600000000000017521 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'openstackdocstheme', 'oslo_config.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.service' openstackdocs_bug_project = 'oslo.service' openstackdocs_bug_tag = '' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'oslo.service' copyright = '2014, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, '%s Documentation' % project, 'OpenStack Foundation', 'manual'), ] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/doc/source/configuration/0000775000175000017500000000000000000000000021070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/configuration/index.rst0000664000175000017500000000136300000000000022734 0ustar00zuulzuul00000000000000===================== Configuration Options ===================== oslo.service uses oslo.config to define and manage configuration options to allow the deployer to control how an application uses this library. periodic_task ============= These options apply to services using the periodic task features of oslo.service. .. show-options:: oslo.service.periodic_task service ======= These options apply to services using the basic service framework. .. show-options:: oslo.service.service sslutils ======== These options apply to services using the SSL utilities module. .. show-options:: oslo.service.sslutils wsgi ==== These options apply to services using the WSGI (Web Service Gateway Interface) module. .. show-options:: oslo.service.wsgi ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/doc/source/contributor/0000775000175000017500000000000000000000000020573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/contributor/index.rst0000664000175000017500000000011700000000000022433 0ustar00zuulzuul00000000000000============ Contributing ============ .. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/index.rst0000664000175000017500000000147500000000000020071 0ustar00zuulzuul00000000000000====================================================== oslo.service -- Library for running OpenStack services ====================================================== oslo.service provides a framework for defining new long-running services using the patterns established by other OpenStack applications. It also includes utilities long-running applications might need for working with SSL or WSGI, performing periodic operations, interacting with systemd, etc. Contents ======== .. toctree:: :maxdepth: 2 install/index user/index configuration/index reference/index contributor/index Release Notes ============= Read also the `oslo.service Release Notes `_. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/doc/source/install/0000775000175000017500000000000000000000000017667 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/install/index.rst0000664000175000017500000000013600000000000021530 0ustar00zuulzuul00000000000000============ Installation ============ At the command line:: $ pip install oslo.service ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/doc/source/reference/0000775000175000017500000000000000000000000020157 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/eventlet_backdoor.rst0000664000175000017500000000023400000000000024402 0ustar00zuulzuul00000000000000================= eventlet_backdoor ================= .. automodule:: oslo_service.eventlet_backdoor :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/fixture.rst0000664000175000017500000000017100000000000022376 0ustar00zuulzuul00000000000000========= fixture ========= .. automodule:: oslo_service.fixture :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/index.rst0000664000175000017500000000027200000000000022021 0ustar00zuulzuul00000000000000============= API Reference ============= .. toctree:: :maxdepth: 1 eventlet_backdoor fixture loopingcall periodic_task service sslutils systemd threadgroup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/loopingcall.rst0000664000175000017500000000021100000000000023206 0ustar00zuulzuul00000000000000============= loopingcall ============= .. automodule:: oslo_service.loopingcall :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/periodic_task.rst0000664000175000017500000000021700000000000023531 0ustar00zuulzuul00000000000000============== periodic_task ============== .. automodule:: oslo_service.periodic_task :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/service.rst0000664000175000017500000000014600000000000022352 0ustar00zuulzuul00000000000000========= service ========= .. automodule:: oslo_service.service :members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/sslutils.rst0000664000175000017500000000017500000000000022576 0ustar00zuulzuul00000000000000========== sslutils ========== .. automodule:: oslo_service.sslutils :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/systemd.rst0000664000175000017500000000017100000000000022400 0ustar00zuulzuul00000000000000========= systemd ========= .. automodule:: oslo_service.systemd :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/reference/threadgroup.rst0000664000175000017500000000021100000000000023227 0ustar00zuulzuul00000000000000============= threadgroup ============= .. automodule:: oslo_service.threadgroup :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/doc/source/user/0000775000175000017500000000000000000000000017177 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/user/history.rst0000664000175000017500000000004000000000000021424 0ustar00zuulzuul00000000000000.. include:: ../../../ChangeLog ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/user/index.rst0000664000175000017500000000032300000000000021036 0ustar00zuulzuul00000000000000================== Using oslo.service ================== .. toctree:: :maxdepth: 2 usage .. history contains a lot of sections, toctree with maxdepth 1 is used. .. toctree:: :maxdepth: 1 history ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/doc/source/user/usage.rst0000664000175000017500000001746700000000000021054 0ustar00zuulzuul00000000000000===== Usage ===== To use oslo.service in a project:: import oslo_service Migrating to oslo.service ========================= The ``oslo.service`` library no longer assumes a global configuration object is available. Instead the following functions and classes have been changed to expect the consuming application to pass in an ``oslo.config`` configuration object: * :func:`~oslo_service.eventlet_backdoor.initialize_if_enabled` * :py:class:`oslo_service.periodic_task.PeriodicTasks` * :func:`~oslo_service.service.launch` * :py:class:`oslo_service.service.ProcessLauncher` * :py:class:`oslo_service.service.ServiceLauncher` * :func:`~oslo_service.sslutils.is_enabled` * :func:`~oslo_service.sslutils.wrap` When using service from oslo-incubator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: from foo.openstack.common import service launcher = service.launch(service, workers=2) When using oslo.service ~~~~~~~~~~~~~~~~~~~~~~~ :: from oslo_config import cfg from oslo_service import service CONF = cfg.CONF launcher = service.launch(CONF, service, workers=2) Using oslo.service with oslo-config-generator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``oslo.service`` provides several entry points to generate configuration files. * :func:`oslo.service.service ` The options from the :mod:`~oslo_service.service` and :mod:`~oslo_service.eventlet_backdoor` modules for the ``[DEFAULT]`` section. * :func:`oslo.service.periodic_task ` The options from the :mod:`~oslo_service.periodic_task` module for the ``[DEFAULT]`` section. * :func:`oslo.service.sslutils ` The options from the :mod:`~oslo_service.sslutils` module for the :oslo.config:group:`ssl` section. * :func:`oslo.service.wsgi ` The options from the :mod:`~oslo_service.wsgi` module for the ``[DEFAULT]`` section. .. todo:: The ref page for oslo_service.wsgi doesn't seem to be rendering, so the above doesn't link. .. todo:: Attempting to use :oslo.config:group:`DEFAULT` above only links to the first DEFAULT section in the configuration/index doc because the #DEFAULT anchor is duplicated for each of the show-options sections. **ATTENTION:** The library doesn't provide an oslo.service entry point. .. code-block:: bash $ oslo-config-generator --namespace oslo.service.service \ --namespace oslo.service.periodic_task \ --namespace oslo.service.sslutils Launching and controlling services ================================== The :mod:`oslo_service.service` module provides tools for launching OpenStack services and controlling their lifecycles. A service is an instance of any class that subclasses :py:class:`oslo_service.service.ServiceBase`. :py:class:`ServiceBase ` is an abstract class that defines an interface every service should implement. :py:class:`oslo_service.service.Service` can serve as a base for constructing new services. Launchers ~~~~~~~~~ The :mod:`oslo_service.service` module provides two launchers for running services: * :py:class:`oslo_service.service.ServiceLauncher` - used for running one or more service in a parent process. * :py:class:`oslo_service.service.ProcessLauncher` - forks a given number of workers in which service(s) are then started. It is possible to initialize whatever launcher is needed and then launch a service using it. .. code-block:: python from oslo_config import cfg from oslo_service import service CONF = cfg.CONF service_launcher = service.ServiceLauncher(CONF) service_launcher.launch_service(service.Service()) process_launcher = service.ProcessLauncher(CONF, wait_interval=1.0) process_launcher.launch_service(service.Service(), workers=2) Or one can simply call :func:`oslo_service.service.launch` which will automatically pick an appropriate launcher based on a number of workers that are passed to it (:py:class:`~oslo_service.service.ServiceLauncher` if ``workers=1`` or ``None`` and :py:class:`~oslo_service.service.ProcessLauncher` in other case). .. code-block:: python from oslo_config import cfg from oslo_service import service CONF = cfg.CONF launcher = service.launch(CONF, service.Service(), workers=3) .. note:: It is highly recommended to use no more than one instance of the :py:class:`~oslo_service.service.ServiceLauncher` or :py:class:`~oslo_service.service.ProcessLauncher` class per process. Signal handling ~~~~~~~~~~~~~~~ :mod:`oslo_service.service` provides handlers for such signals as ``SIGTERM``, ``SIGINT``, and ``SIGHUP``. ``SIGTERM`` is used for graceful termination of services. This can allow a server to wait for all clients to close connections while rejecting new incoming requests. Config option :oslo.config:option:`graceful_shutdown_timeout` specifies how many seconds after receiving a ``SIGTERM`` signal a server should continue to run, handling the existing connections. Setting :oslo.config:option:`graceful_shutdown_timeout` to zero means that the server will wait indefinitely until all remaining requests have been fully served. To force instantaneous termination the ``SIGINT`` signal must be sent. The behavior on receiving ``SIGHUP`` varies based on how the service is configured. If the launcher uses ``restart_method='reload'`` (the default), then the service will reload its configuration and any threads will be completely restarted. If ``restart_method='mutate'`` is used, then only the configuration options marked as mutable will be reloaded and the service threads will not be restarted. See :py:class:`oslo_service.service.Launcher` for more details on the ``restart_method`` parameter. .. note:: ``SIGHUP`` is not supported on Windows. .. note:: Config option :oslo.config:option:`graceful_shutdown_timeout` is not supported on Windows. Below is an example of a service with a reset method that allows reloading logging options by sending a ``SIGHUP``. .. code-block:: python from oslo_config import cfg from oslo_log import log as logging from oslo_service import service CONF = cfg.CONF LOG = logging.getLogger(__name__) class FooService(service.ServiceBase): def start(self): pass def wait(self): pass def stop(self): pass def reset(self): logging.setup(cfg.CONF, 'foo') Profiling ~~~~~~~~~ Processes spawned through :mod:`oslo_service.service` can be profiled (function calltrace) through the :mod:`~oslo_service.eventlet_backdoor` module. The service must be configured with the :oslo.config:option:`backdoor_port` option to enable its workers to listen on TCP ports. The user can then send the ``prof()`` command to capture the worker process's function calltrace. 1) To start profiling send the ``prof()`` command on the process's listening port 2) To stop profiling and capture pstat calltrace to a file, send the ``prof()`` command with a file basename as an argument (``prof(basename)``) to the worker process's listening port. A stats file (in pstat format) will be generated in the temp directory with the user-provided basename with a ``.prof`` suffix . For example, to profile a neutron server process (which is listening on port 8002 configured through the :oslo.config:option:`backdoor_port` option): .. code-block:: bash $ echo "prof()" | nc localhost 8002 $ neutron net-create n1; neutron port-create --name p1 n1; $ neutron port-delete p1; neutron port-delete p1 $ echo "prof('neutron')" | nc localhost 8002 This will generate a stats file in ``/tmp/neutron.prof``. Stats can be printed from the trace file as follows: .. code-block:: python import pstats stats = pstats.Stats('/tmp/neutron.prof') stats.print_stats() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/oslo.service.egg-info/0000775000175000017500000000000000000000000020261 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/PKG-INFO0000664000175000017500000000467400000000000021371 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: oslo.service Version: 3.4.0 Summary: oslo.service library Home-page: https://docs.openstack.org/oslo.service/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.service.svg :target: https://governance.openstack.org/tc/ference/tags/index.html .. Change things from this point on ======================================================== oslo.service -- Library for running OpenStack services ======================================================== .. image:: https://img.shields.io/pypi/v/oslo.service.svg :target: https://pypi.org/project/oslo.service/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.service.svg :target: https://pypi.org/project/oslo.service/ :alt: Downloads oslo.service provides a framework for defining new long-running services using the patterns established by other OpenStack applications. It also includes utilities long-running applications might need for working with SSL or WSGI, performing periodic operations, interacting with systemd, etc. * Free software: Apache license * Documentation: https://docs.openstack.org/oslo.service/latest/ * Source: https://opendev.org/openstack/oslo.service * Bugs: https://bugs.launchpad.net/oslo.service * Release notes: https://docs.openstack.org/releasenotes/oslo.service/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/SOURCES.txt0000664000175000017500000000665500000000000022161 0ustar00zuulzuul00000000000000.coveragerc .mailmap .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/configuration/index.rst doc/source/contributor/index.rst doc/source/install/index.rst doc/source/reference/eventlet_backdoor.rst doc/source/reference/fixture.rst doc/source/reference/index.rst doc/source/reference/loopingcall.rst doc/source/reference/periodic_task.rst doc/source/reference/service.rst doc/source/reference/sslutils.rst doc/source/reference/systemd.rst doc/source/reference/threadgroup.rst doc/source/user/history.rst doc/source/user/index.rst doc/source/user/usage.rst oslo.service.egg-info/PKG-INFO oslo.service.egg-info/SOURCES.txt oslo.service.egg-info/dependency_links.txt oslo.service.egg-info/entry_points.txt oslo.service.egg-info/not-zip-safe oslo.service.egg-info/pbr.json oslo.service.egg-info/requires.txt oslo.service.egg-info/top_level.txt oslo_service/__init__.py oslo_service/_i18n.py oslo_service/_options.py oslo_service/eventlet_backdoor.py oslo_service/fixture.py oslo_service/loopingcall.py oslo_service/periodic_task.py oslo_service/service.py oslo_service/sslutils.py oslo_service/systemd.py oslo_service/threadgroup.py oslo_service/version.py oslo_service/wsgi.py oslo_service/locale/en_GB/LC_MESSAGES/oslo_service.po oslo_service/tests/__init__.py oslo_service/tests/base.py oslo_service/tests/eventlet_service.py oslo_service/tests/test_eventlet_backdoor.py oslo_service/tests/test_fixture.py oslo_service/tests/test_loopingcall.py oslo_service/tests/test_periodic.py oslo_service/tests/test_service.py oslo_service/tests/test_sslutils.py oslo_service/tests/test_systemd.py oslo_service/tests/test_threadgroup.py oslo_service/tests/test_wsgi.py oslo_service/tests/ssl_cert/ca.crt oslo_service/tests/ssl_cert/ca.key oslo_service/tests/ssl_cert/certificate.crt oslo_service/tests/ssl_cert/privatekey.key releasenotes/notes/add-timeout-looping-call-5cc396b75597c3c2.yaml releasenotes/notes/add-wsgi_server_debug-opt-70d818b5b78bfc7c.yaml releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml releasenotes/notes/drop-python27-support-1cfdf65193a03f3a.yaml releasenotes/notes/fix-find-object-in-backdoor-487bf78c4c502594.yaml releasenotes/notes/native-threads-on-child-7150690c7caa1013.yaml releasenotes/notes/profile-worker-5d3fd0f0251d62b8.yaml releasenotes/notes/service-children-SIGHUP-15d0cf6d2a1bdbf9.yaml releasenotes/notes/support-pid-in-eventlet-backdoor-socket-path-1863eaad1dd08556.yaml releasenotes/notes/threadgroup-cancel-bd89d72f383a3d9b.yaml releasenotes/notes/timer-args-f578c8f9d08b217d.yaml releasenotes/notes/timer-stop_on_exception-9f21d7c4d6d1b0d9.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/dependency_links.txt0000664000175000017500000000000100000000000024327 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/entry_points.txt0000664000175000017500000000036400000000000023562 0ustar00zuulzuul00000000000000[oslo.config.opts] oslo.service.periodic_task = oslo_service.periodic_task:list_opts oslo.service.service = oslo_service.service:list_opts oslo.service.sslutils = oslo_service.sslutils:list_opts oslo.service.wsgi = oslo_service.wsgi:list_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/not-zip-safe0000664000175000017500000000000100000000000022507 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/pbr.json0000664000175000017500000000005600000000000021740 0ustar00zuulzuul00000000000000{"git_version": "f425dd1", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/requires.txt0000664000175000017500000000033700000000000022664 0ustar00zuulzuul00000000000000Paste>=2.0.2 PasteDeploy>=1.5.0 Routes>=2.3.1 WebOb>=1.7.1 Yappi>=1.0 debtcollector>=1.2.0 eventlet>=0.25.2 greenlet>=0.4.15 oslo.concurrency>=3.25.0 oslo.config>=5.1.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.utils>=3.40.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611940.0 oslo.service-3.4.0/oslo.service.egg-info/top_level.txt0000664000175000017500000000001500000000000023007 0ustar00zuulzuul00000000000000oslo_service ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.276123 oslo.service-3.4.0/oslo_service/0000775000175000017500000000000000000000000016650 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/__init__.py0000664000175000017500000000000000000000000020747 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/_i18n.py0000664000175000017500000000212300000000000020136 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html . """ import oslo_i18n DOMAIN = "oslo_service" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/_options.py0000664000175000017500000001325200000000000021057 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg help_for_backdoor_port = ( "Acceptable values are 0, , and :, where 0 results " "in listening on a random tcp port number; results in listening " "on the specified port number (and not enabling backdoor if that port " "is in use); and : results in listening on the smallest " "unused port number within the specified range of port numbers. The " "chosen port is displayed in the service's log file.") eventlet_backdoor_opts = [ cfg.StrOpt('backdoor_port', help="Enable eventlet backdoor. %s" % help_for_backdoor_port), cfg.StrOpt('backdoor_socket', help="Enable eventlet backdoor, using the provided path" " as a unix socket that can receive connections. This" " option is mutually exclusive with 'backdoor_port' in" " that only one should be provided. If both are provided" " then the existence of this option overrides the usage of" " that option. Inside the path {pid} will be replaced with" " the PID of the current process.") ] periodic_opts = [ cfg.BoolOpt('run_external_periodic_tasks', default=True, help='Some periodic tasks can be run in a separate process. ' 'Should we run them here?'), ] service_opts = [ cfg.BoolOpt('log_options', default=True, help='Enables or disables logging values of all registered ' 'options when starting a service (at DEBUG level).'), cfg.IntOpt('graceful_shutdown_timeout', default=60, help='Specify a timeout after which a gracefully shutdown ' 'server will exit. Zero value means endless wait.'), ] wsgi_opts = [ cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for api service'), cfg.StrOpt('wsgi_log_format', default='%(client_ip)s "%(request_line)s" status: ' '%(status_code)s len: %(body_length)s time:' ' %(wall_seconds).7f', help='A python format string that is used as the template to ' 'generate log lines. The following values can be' 'formatted into it: client_ip, date_time, request_line, ' 'status_code, body_length, wall_seconds.'), cfg.IntOpt('tcp_keepidle', default=600, help="Sets the value of TCP_KEEPIDLE in seconds for each " "server socket. Not supported on OS X."), cfg.IntOpt('wsgi_default_pool_size', default=100, help="Size of the pool of greenthreads used by wsgi"), cfg.IntOpt('max_header_line', default=16384, help="Maximum line size of message headers to be accepted. " "max_header_line may need to be increased when using " "large tokens (typically those generated when keystone " "is configured to use PKI tokens with big service " "catalogs)."), cfg.BoolOpt('wsgi_keep_alive', default=True, help="If False, closes the client socket connection " "explicitly."), cfg.IntOpt('client_socket_timeout', default=900, help="Timeout for client connections' socket operations. " "If an incoming connection is idle for this number of " "seconds it will be closed. A value of '0' means " "wait forever."), cfg.BoolOpt('wsgi_server_debug', default=False, help="True if the server should send exception tracebacks to " "the clients on 500 errors. If False, the server will " "respond with empty bodies."), ] ssl_opts = [ cfg.StrOpt('ca_file', help="CA certificate file to use to verify " "connecting clients.", deprecated_group='DEFAULT', deprecated_name='ssl_ca_file'), cfg.StrOpt('cert_file', help="Certificate file to use when starting " "the server securely.", deprecated_group='DEFAULT', deprecated_name='ssl_cert_file'), cfg.StrOpt('key_file', help="Private key file to use when starting " "the server securely.", deprecated_group='DEFAULT', deprecated_name='ssl_key_file'), cfg.StrOpt('version', help='SSL version to use (valid only if SSL enabled). ' 'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, ' 'TLSv1_1, and TLSv1_2 may be available on some ' 'distributions.' ), cfg.StrOpt('ciphers', help='Sets the list of available ciphers. value should be a ' 'string in the OpenSSL cipher list format.' ), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/eventlet_backdoor.py0000664000175000017500000002243000000000000022715 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation. # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import gc import logging import os import pprint import sys import tempfile import traceback import eventlet.backdoor import greenlet import yappi from eventlet.green import socket from oslo_service._i18n import _ from oslo_service import _options LOG = logging.getLogger(__name__) class EventletBackdoorConfigValueError(Exception): def __init__(self, port_range, help_msg, ex): msg = (_('Invalid backdoor_port configuration %(range)s: %(ex)s. ' '%(help)s') % {'range': port_range, 'ex': ex, 'help': help_msg}) super(EventletBackdoorConfigValueError, self).__init__(msg) self.port_range = port_range def _dont_use_this(): print("Don't use this, just disconnect instead") def _dump_frame(f, frame_chapter): co = f.f_code print(" %s Frame: %s" % (frame_chapter, co.co_name)) print(" File: %s" % (co.co_filename)) print(" Captured at line number: %s" % (f.f_lineno)) co_locals = set(co.co_varnames) if len(co_locals): not_set = co_locals.copy() set_locals = {} for var_name in f.f_locals.keys(): if var_name in co_locals: set_locals[var_name] = f.f_locals[var_name] not_set.discard(var_name) if set_locals: print(" %s set local variables:" % (len(set_locals))) for var_name in sorted(set_locals.keys()): print(" %s => %r" % (var_name, f.f_locals[var_name])) else: print(" 0 set local variables.") if not_set: print(" %s not set local variables:" % (len(not_set))) for var_name in sorted(not_set): print(" %s" % (var_name)) else: print(" 0 not set local variables.") else: print(" 0 Local variables.") def _detailed_dump_frames(f, thread_index): i = 0 while f is not None: _dump_frame(f, "%s.%s" % (thread_index, i + 1)) f = f.f_back i += 1 def _find_objects(t): return [o for o in gc.get_objects() if hasattr(o, "__class__") and isinstance(o, t)] def _capture_profile(fname=''): if not fname: yappi.set_clock_type('cpu') # We need to set context to greenlet to profile greenlets # https://bitbucket.org/sumerc/yappi/pull-requests/3 yappi.set_context_id_callback( lambda: id(greenlet.getcurrent())) yappi.set_context_name_callback( lambda: greenlet.getcurrent().__class__.__name__) yappi.start() else: yappi.stop() stats = yappi.get_func_stats() # User should provide filename. This file with a suffix .prof # will be created in temp directory. try: stats_file = os.path.join(tempfile.gettempdir(), fname + '.prof') stats.save(stats_file, "pstat") except Exception as e: print("Error while saving the trace stats ", str(e)) finally: yappi.clear_stats() def _print_greenthreads(simple=True): for i, gt in enumerate(_find_objects(greenlet.greenlet)): print(i, gt) if simple: traceback.print_stack(gt.gr_frame) else: _detailed_dump_frames(gt.gr_frame, i) print() def _print_nativethreads(): for threadId, stack in sys._current_frames().items(): print(threadId) traceback.print_stack(stack) print() def _parse_port_range(port_range): if ':' not in port_range: start, end = port_range, port_range else: start, end = port_range.split(':', 1) try: start, end = int(start), int(end) if end < start: raise ValueError return start, end except ValueError as ex: raise EventletBackdoorConfigValueError( port_range, ex, _options.help_for_backdoor_port) def _listen_func(host, port): # eventlet is setting SO_REUSEPORT by default from v0.20. # But we can configure it by passing reuse_port argument # from v0.22 try: return eventlet.listen((host, port), reuse_port=False) except TypeError: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((host, port)) sock.listen(50) return sock def _listen(host, start_port, end_port): try_port = start_port while True: try: return _listen_func(host, try_port) except socket.error as exc: if (exc.errno != errno.EADDRINUSE or try_port >= end_port): raise try_port += 1 def _try_open_unix_domain_socket(socket_path): try: return eventlet.listen(socket_path, socket.AF_UNIX) except socket.error as e: if e.errno != errno.EADDRINUSE: # NOTE(harlowja): Some other non-address in use error # occurred, since we aren't handling those, re-raise # and give up... raise else: # Attempt to remove the file before opening it again. try: os.unlink(socket_path) except OSError as e: if e.errno != errno.ENOENT: # NOTE(harlowja): File existed, but we couldn't # delete it, give up... raise return eventlet.listen(socket_path, socket.AF_UNIX) def _initialize_if_enabled(conf): conf.register_opts(_options.eventlet_backdoor_opts) backdoor_locals = { 'exit': _dont_use_this, # So we don't exit the entire process 'quit': _dont_use_this, # So we don't exit the entire process 'fo': _find_objects, 'pgt': _print_greenthreads, 'pnt': _print_nativethreads, 'prof': _capture_profile, } if conf.backdoor_port is None and conf.backdoor_socket is None: return None if conf.backdoor_socket is None: start_port, end_port = _parse_port_range(str(conf.backdoor_port)) sock = _listen('localhost', start_port, end_port) # In the case of backdoor port being zero, a port number is assigned by # listen(). In any case, pull the port number out here. where_running = sock.getsockname()[1] else: try: backdoor_socket_path = conf.backdoor_socket.format(pid=os.getpid()) except (KeyError, IndexError, ValueError) as e: backdoor_socket_path = conf.backdoor_socket LOG.warning("Could not apply format string to eventlet " "backdoor socket path ({}) - continuing with " "unformatted path" "".format(e)) sock = _try_open_unix_domain_socket(backdoor_socket_path) where_running = backdoor_socket_path # NOTE(johannes): The standard sys.displayhook will print the value of # the last expression and set it to __builtin__._, which overwrites # the __builtin__._ that gettext sets. Let's switch to using pprint # since it won't interact poorly with gettext, and it's easier to # read the output too. def displayhook(val): if val is not None: pprint.pprint(val) sys.displayhook = displayhook LOG.info( 'Eventlet backdoor listening on %(where_running)s for' ' process %(pid)d', {'where_running': where_running, 'pid': os.getpid()} ) thread = eventlet.spawn(eventlet.backdoor.backdoor_server, sock, locals=backdoor_locals) return (where_running, thread) def initialize_if_enabled(conf): where_running_thread = _initialize_if_enabled(conf) if not where_running_thread: return None else: where_running, _thread = where_running_thread return where_running def _main(): import eventlet eventlet.monkey_patch(all=True) # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading import threading # noqa orig_threading.current_thread.__globals__['_active'] = threading._active from oslo_config import cfg logging.basicConfig(level=logging.DEBUG) conf = cfg.ConfigOpts() conf.register_cli_opts(_options.eventlet_backdoor_opts) conf(sys.argv[1:]) where_running_thread = _initialize_if_enabled(conf) if not where_running_thread: raise RuntimeError(_("Did not create backdoor at requested location")) else: _where_running, thread = where_running_thread thread.wait() if __name__ == '__main__': # simple CLI for testing _main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/fixture.py0000664000175000017500000000362400000000000020715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures class SleepFixture(fixtures.Fixture): """A fixture for mocking the ``wait()`` within :doc:`loopingcall` events. This exists so test cases can exercise code that uses :doc:`loopingcall` without actually incurring wall clock time for sleeping. The mock for the ``wait()`` is accessible via the fixture's ``mock_wait`` attribute. .. note:: It is not recommended to assert specific arguments (i.e. timeout values) to the mock, as this relies on the internals of :doc:`loopingcall` not changing. .. todo:: Figure out a way to make an enforceable contract allowing verification of timeout values. Example usage:: from oslo.service import fixture ... class MyTest(...): def setUp(self): ... self.sleepfx = self.useFixture(fixture.SleepFixture()) ... def test_this(self): ... thing_that_hits_a_loopingcall() ... self.assertEqual(5, self.sleepfx.mock_wait.call_count) ... """ def _setUp(self): # Provide access to the mock so that calls to it can be asserted self.mock_wait = self.useFixture(fixtures.MockPatch( 'oslo_utils.eventletutils.EventletEvent.wait')).mock ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/oslo_service/locale/0000775000175000017500000000000000000000000020107 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/oslo_service/locale/en_GB/0000775000175000017500000000000000000000000021061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.276123 oslo.service-3.4.0/oslo_service/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000022646 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/locale/en_GB/LC_MESSAGES/oslo_service.po0000664000175000017500000001057600000000000025713 0ustar00zuulzuul00000000000000# Andi Chandler , 2016. #zanata # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2022. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.service VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-11 15:55+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2022-06-13 07:39+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "A dynamic backoff interval looping call can only run one function at a time" msgstr "" "A dynamic backoff interval looping call can only run one function at a time" msgid "A dynamic interval looping call can only run one function at a time" msgstr "A dynamic interval looping call can only run one function at a time" msgid "" "A dynamic interval looping call should supply either an interval or " "periodic_interval_max" msgstr "" "A dynamic interval looping call should supply either an interval or " "periodic_interval_max" msgid "A fixed interval looping call can only run one function at a time" msgstr "A fixed interval looping call can only run one function at a time" msgid "" "A fixed interval looping call with timeout checking and can only run one " "function at at a time" msgstr "" "A fixed interval looping call with timeout checking and can only run one " "function at at a time" msgid "A looping call can only run one function at a time" msgstr "A looping call can only run one function at a time" #, python-format msgid "Could not find config at %(path)s" msgstr "Could not find config at %(path)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Could not load paste app '%(name)s' from %(path)s" msgid "Did not create backdoor at requested location" msgstr "Did not create backdoor at requested location" msgid "Dynamic backoff interval looping call" msgstr "Dynamic backoff interval looping call" msgid "Dynamic interval looping call" msgstr "Dynamic interval looping call" msgid "Fixed interval looping call" msgstr "Fixed interval looping call" msgid "Fixed interval looping call with timeout checking." msgstr "Fixed interval looping call with timeout checking." #, python-format msgid "Invalid SSL version : %s" msgstr "Invalid SSL version : %s" #, python-format msgid "Invalid backdoor_port configuration %(range)s: %(ex)s. %(help)s" msgstr "Invalid backdoor_port configuration %(range)s: %(ex)s. %(help)s" #, python-format msgid "" "Invalid input received: Unexpected argument for periodic task creation: " "%(arg)s." msgstr "" "Invalid input received: Unexpected argument for periodic task creation: " "%(arg)s." #, python-format msgid "Invalid restart_method: %s" msgstr "Invalid restart_method: %s" msgid "Launcher asked to start multiple workers" msgstr "Launcher asked to start multiple workers" #, python-format msgid "Looping call timed out after %.02f seconds" msgstr "Looping call timed out after %.02f seconds" msgid "Number of workers should be positive!" msgstr "Number of workers should be positive!" #, python-format msgid "Service %(service)s must an instance of %(base)s!" msgstr "Service %(service)s must an instance of %(base)s!" msgid "The backlog must be more than 0" msgstr "The backlog must be more than 0" msgid "Type of workers should be int!" msgstr "Type of workers should be int!" #, python-format msgid "Unable to find ca_file : %s" msgstr "Unable to find ca_file : %s" #, python-format msgid "Unable to find cert_file : %s" msgstr "Unable to find cert_file : %s" #, python-format msgid "Unable to find key_file : %s" msgstr "Unable to find key_file : %s" #, python-format msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "Unexpected argument for periodic task creation: %(arg)s." msgid "Unknown looping call" msgstr "Unknown looping call" #, python-format msgid "Unsupported socket family: %s" msgstr "Unsupported socket family: %s" msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/loopingcall.py0000664000175000017500000004333400000000000021534 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import random import sys import time from eventlet import event from eventlet import greenthread from oslo_log import log as logging from oslo_utils import eventletutils from oslo_utils import excutils from oslo_utils import reflection from oslo_utils import timeutils from oslo_service._i18n import _ LOG = logging.getLogger(__name__) class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCallBase. The poll-function passed to LoopingCallBase can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCallBase.wait() """ def __init__(self, retvalue=True): """:param retvalue: Value that LoopingCallBase.wait() should return.""" self.retvalue = retvalue class LoopingCallTimeOut(Exception): """Exception for a timed out LoopingCall. The LoopingCall will raise this exception when a timeout is provided and it is exceeded. """ pass def _safe_wrapper(f, kind, func_name): """Wrapper that calls into wrapped function and logs errors as needed.""" def func(*args, **kwargs): try: return f(*args, **kwargs) except LoopingCallDone: raise # let the outer handler process this except Exception: LOG.error('%(kind)s %(func_name)r failed', {'kind': kind, 'func_name': func_name}, exc_info=True) return 0 return func class LoopingCallBase(object): _KIND = _("Unknown looping call") _RUN_ONLY_ONE_MESSAGE = _("A looping call can only run one function" " at a time") def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._thread = None self.done = None self._abort = eventletutils.EventletEvent() @property def _running(self): return not self._abort.is_set() def stop(self): if self._running: self._abort.set() def wait(self): return self.done.wait() def _on_done(self, gt, *args, **kwargs): self._thread = None def _sleep(self, timeout): self._abort.wait(timeout) def _start(self, idle_for, initial_delay=None, stop_on_exception=True): """Start the looping :param idle_for: Callable that takes two positional arguments, returns how long to idle for. The first positional argument is the last result from the function being looped and the second positional argument is the time it took to calculate that result. :param initial_delay: How long to delay before starting the looping. Value is in seconds. :param stop_on_exception: Whether to stop if an exception occurs. :returns: eventlet event instance """ if self._thread is not None: raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE) self.done = event.Event() self._abort.clear() self._thread = greenthread.spawn( self._run_loop, idle_for, initial_delay=initial_delay, stop_on_exception=stop_on_exception) self._thread.link(self._on_done) return self.done # NOTE(bnemec): This is just a wrapper function we can mock so we aren't # affected by other users of the StopWatch class. def _elapsed(self, watch): return watch.elapsed() def _run_loop(self, idle_for_func, initial_delay=None, stop_on_exception=True): kind = self._KIND func_name = reflection.get_callable_name(self.f) func = self.f if stop_on_exception else _safe_wrapper(self.f, kind, func_name) if initial_delay: self._sleep(initial_delay) try: watch = timeutils.StopWatch() while self._running: watch.restart() result = func(*self.args, **self.kw) watch.stop() if not self._running: break idle = idle_for_func(result, self._elapsed(watch)) LOG.trace('%(kind)s %(func_name)r sleeping ' 'for %(idle).02f seconds', {'func_name': func_name, 'idle': idle, 'kind': kind}) self._sleep(idle) except LoopingCallDone as e: self.done.send(e.retvalue) except Exception: exc_info = sys.exc_info() try: LOG.error('%(kind)s %(func_name)r failed', {'kind': kind, 'func_name': func_name}, exc_info=exc_info) self.done.send_exception(*exc_info) finally: del exc_info return else: self.done.send(True) class FixedIntervalLoopingCall(LoopingCallBase): """A fixed interval looping call.""" _RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call can only run" " one function at a time") _KIND = _('Fixed interval looping call') def start(self, interval, initial_delay=None, stop_on_exception=True): def _idle_for(result, elapsed): delay = round(elapsed - interval, 2) if delay > 0: func_name = reflection.get_callable_name(self.f) LOG.warning('Function %(func_name)r run outlasted ' 'interval by %(delay).2f sec', {'func_name': func_name, 'delay': delay}) return -delay if delay < 0 else 0 return self._start(_idle_for, initial_delay=initial_delay, stop_on_exception=stop_on_exception) class FixedIntervalWithTimeoutLoopingCall(LoopingCallBase): """A fixed interval looping call with timeout checking mechanism.""" _RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call with timeout" " checking and can only run one function at" " at a time") _KIND = _('Fixed interval looping call with timeout checking.') def start(self, interval, initial_delay=None, stop_on_exception=True, timeout=0): start_time = time.time() def _idle_for(result, elapsed): delay = round(elapsed - interval, 2) if delay > 0: func_name = reflection.get_callable_name(self.f) LOG.warning('Function %(func_name)r run outlasted ' 'interval by %(delay).2f sec', {'func_name': func_name, 'delay': delay}) elapsed_time = time.time() - start_time if timeout > 0 and elapsed_time > timeout: raise LoopingCallTimeOut( _('Looping call timed out after %.02f seconds') % elapsed_time) return -delay if delay < 0 else 0 return self._start(_idle_for, initial_delay=initial_delay, stop_on_exception=stop_on_exception) class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. The function called should return how long to sleep for before being called again. """ _RUN_ONLY_ONE_MESSAGE = _("A dynamic interval looping call can only run" " one function at a time") _TASK_MISSING_SLEEP_VALUE_MESSAGE = _( "A dynamic interval looping call should supply either an" " interval or periodic_interval_max" ) _KIND = _('Dynamic interval looping call') def start(self, initial_delay=None, periodic_interval_max=None, stop_on_exception=True): def _idle_for(suggested_delay, elapsed): delay = suggested_delay if delay is None: if periodic_interval_max is not None: delay = periodic_interval_max else: # Note(suro-patz): An application used to receive a # TypeError thrown from eventlet layer, before # this RuntimeError was introduced. raise RuntimeError( self._TASK_MISSING_SLEEP_VALUE_MESSAGE) else: if periodic_interval_max is not None: delay = min(delay, periodic_interval_max) return delay return self._start(_idle_for, initial_delay=initial_delay, stop_on_exception=stop_on_exception) class BackOffLoopingCall(LoopingCallBase): """Run a method in a loop with backoff on error. The passed in function should return True (no error, return to initial_interval), False (error, start backing off), or raise LoopingCallDone(retvalue=None) (quit looping, return retvalue if set). When there is an error, the call will backoff on each failure. The backoff will be equal to double the previous base interval times some jitter. If a backoff would put it over the timeout, it halts immediately, so the call will never take more than timeout, but may and likely will take less time. When the function return value is True or False, the interval will be multiplied by a random jitter. If min_jitter or max_jitter is None, there will be no jitter (jitter=1). If min_jitter is below 0.5, the code may not backoff and may increase its retry rate. If func constantly returns True, this function will not return. To run a func and wait for a call to finish (by raising a LoopingCallDone): timer = BackOffLoopingCall(func) response = timer.start().wait() :param initial_delay: delay before first running of function :param starting_interval: initial interval in seconds between calls to function. When an error occurs and then a success, the interval is returned to starting_interval :param timeout: time in seconds before a LoopingCallTimeout is raised. The call will never take longer than timeout, but may quit before timeout. :param max_interval: The maximum interval between calls during errors :param jitter: Used to vary when calls are actually run to avoid group of calls all coming at the exact same time. Uses random.gauss(jitter, 0.1), with jitter as the mean for the distribution. If set below .5, it can cause the calls to come more rapidly after each failure. :param min_interval: The minimum interval in seconds between calls to function. :raises: LoopingCallTimeout if time spent doing error retries would exceed timeout. """ _RNG = random.SystemRandom() _KIND = _('Dynamic backoff interval looping call') _RUN_ONLY_ONE_MESSAGE = _("A dynamic backoff interval looping call can" " only run one function at a time") def __init__(self, f=None, *args, **kw): super(BackOffLoopingCall, self).__init__(f=f, *args, **kw) self._error_time = 0 self._interval = 1 def start(self, initial_delay=None, starting_interval=1, timeout=300, max_interval=300, jitter=0.75, min_interval=0.001): if self._thread is not None: raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE) # Reset any prior state. self._error_time = 0 self._interval = starting_interval def _idle_for(success, _elapsed): random_jitter = abs(self._RNG.gauss(jitter, 0.1)) if success: # Reset error state now that it didn't error... self._interval = starting_interval self._error_time = 0 return self._interval * random_jitter else: # Perform backoff, random jitter around the next interval # bounded by min_interval and max_interval. idle = max(self._interval * 2 * random_jitter, min_interval) idle = min(idle, max_interval) # Calculate the next interval based on the mean, so that the # backoff grows at the desired rate. self._interval = max(self._interval * 2 * jitter, min_interval) # Don't go over timeout, end early if necessary. If # timeout is 0, keep going. if timeout > 0 and self._error_time + idle > timeout: raise LoopingCallTimeOut( _('Looping call timed out after %.02f seconds') % (self._error_time + idle)) self._error_time += idle return idle return self._start(_idle_for, initial_delay=initial_delay) class RetryDecorator(object): """Decorator for retrying a function upon suggested exceptions. The decorated function is retried for the given number of times, and the sleep time between the retries is incremented until max sleep time is reached. If the max retry count is set to -1, then the decorated function is invoked indefinitely until an exception is thrown, and the caught exception is not in the list of suggested exceptions. """ def __init__(self, max_retry_count=-1, inc_sleep_time=10, max_sleep_time=60, exceptions=()): """Configure the retry object using the input params. :param max_retry_count: maximum number of times the given function must be retried when one of the input 'exceptions' is caught. When set to -1, it will be retried indefinitely until an exception is thrown and the caught exception is not in param exceptions. :param inc_sleep_time: incremental time in seconds for sleep time between retries :param max_sleep_time: max sleep time in seconds beyond which the sleep time will not be incremented using param inc_sleep_time. On reaching this threshold, max_sleep_time will be used as the sleep time. :param exceptions: suggested exceptions for which the function must be retried, if no exceptions are provided (the default) then all exceptions will be reraised, and no retrying will be triggered. """ self._max_retry_count = max_retry_count self._inc_sleep_time = inc_sleep_time self._max_sleep_time = max_sleep_time self._exceptions = exceptions self._retry_count = 0 self._sleep_time = 0 def __call__(self, f): func_name = reflection.get_callable_name(f) def _func(*args, **kwargs): result = None try: if self._retry_count: LOG.debug("Invoking %(func_name)s; retry count is " "%(retry_count)d.", {'func_name': func_name, 'retry_count': self._retry_count}) result = f(*args, **kwargs) except self._exceptions: with excutils.save_and_reraise_exception() as ctxt: LOG.debug("Exception which is in the suggested list of " "exceptions occurred while invoking function:" " %s.", func_name) if (self._max_retry_count != -1 and self._retry_count >= self._max_retry_count): LOG.debug("Cannot retry %(func_name)s upon " "suggested exception " "since retry count (%(retry_count)d) " "reached max retry count " "(%(max_retry_count)d).", {'retry_count': self._retry_count, 'max_retry_count': self._max_retry_count, 'func_name': func_name}) else: ctxt.reraise = False self._retry_count += 1 self._sleep_time += self._inc_sleep_time return self._sleep_time raise LoopingCallDone(result) @functools.wraps(f) def func(*args, **kwargs): loop = DynamicLoopingCall(_func, *args, **kwargs) evt = loop.start(periodic_interval_max=self._max_sleep_time) LOG.debug("Waiting for function %s to return.", func_name) return evt.wait() return func ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/periodic_task.py0000664000175000017500000001775700000000000022063 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import logging import random import time from time import monotonic as now from oslo_service._i18n import _ from oslo_service import _options from oslo_utils import reflection LOG = logging.getLogger(__name__) DEFAULT_INTERVAL = 60.0 def list_opts(): """Entry point for oslo-config-generator.""" return [(None, copy.deepcopy(_options.periodic_opts))] class InvalidPeriodicTaskArg(Exception): message = _("Unexpected argument for periodic task creation: %(arg)s.") def periodic_task(*args, **kwargs): """Decorator to indicate that a method is a periodic task. This decorator can be used in two ways: 1. Without arguments '@periodic_task', this will be run on the default interval of 60 seconds. 2. With arguments: @periodic_task(spacing=N [, run_immediately=[True|False]] [, name=[None|"string"]) this will be run on approximately every N seconds. If this number is negative the periodic task will be disabled. If the run_immediately argument is provided and has a value of 'True', the first run of the task will be shortly after task scheduler starts. If run_immediately is omitted or set to 'False', the first time the task runs will be approximately N seconds after the task scheduler starts. If name is not provided, __name__ of function is used. """ def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) f._periodic_enabled = kwargs.pop('enabled', True) f._periodic_name = kwargs.pop('name', f.__name__) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = now() return f # NOTE(sirp): The `if` is necessary to allow the decorator to be used with # and without parenthesis. # # In the 'with-parenthesis' case (with kwargs present), this function needs # to return a decorator function since the interpreter will invoke it like: # # periodic_task(*args, **kwargs)(f) # # In the 'without-parenthesis' case, the original function will be passed # in as the first argument, like: # # periodic_task(f) if kwargs: return decorator else: return decorator(args[0]) class _PeriodicTasksMeta(type): def _add_periodic_task(cls, task): """Add a periodic task to the list of periodic tasks. The task should already be decorated by @periodic_task. :return: whether task was actually enabled """ name = task._periodic_name if task._periodic_spacing < 0: LOG.info('Skipping periodic task %(task)s because ' 'its interval is negative', {'task': name}) return False if not task._periodic_enabled: LOG.info('Skipping periodic task %(task)s because ' 'it is disabled', {'task': name}) return False # A periodic spacing of zero indicates that this task should # be run on the default interval to avoid running too # frequently. if task._periodic_spacing == 0: task._periodic_spacing = DEFAULT_INTERVAL cls._periodic_tasks.append((name, task)) cls._periodic_spacing[name] = task._periodic_spacing return True def __init__(cls, names, bases, dict_): """Metaclass that allows us to collect decorated periodic tasks.""" super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) # NOTE(sirp): if the attribute is not present then we must be the base # class, so, go ahead an initialize it. If the attribute is present, # then we're a subclass so make a copy of it so we don't step on our # parent's toes. try: cls._periodic_tasks = cls._periodic_tasks[:] except AttributeError: cls._periodic_tasks = [] try: cls._periodic_spacing = cls._periodic_spacing.copy() except AttributeError: cls._periodic_spacing = {} for value in cls.__dict__.values(): if getattr(value, '_periodic_task', False): cls._add_periodic_task(value) def _nearest_boundary(last_run, spacing): """Find the nearest boundary in the past. The boundary is a multiple of the spacing with the last run as an offset. Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24, 31, 38... 0% to 5% of the spacing value will be added to this value to ensure tasks do not synchronize. This jitter is rounded to the nearest second, this means that spacings smaller than 20 seconds will not have jitter. """ current_time = now() if last_run is None: return current_time delta = current_time - last_run offset = delta % spacing # Add up to 5% jitter jitter = int(spacing * (random.random() / 20)) # nosec return current_time - offset + jitter class PeriodicTasks(metaclass=_PeriodicTasksMeta): def __init__(self, conf): super(PeriodicTasks, self).__init__() self.conf = conf self.conf.register_opts(_options.periodic_opts) self._periodic_last_run = {} for name, task in self._periodic_tasks: self._periodic_last_run[name] = task._periodic_last_run def add_periodic_task(self, task): """Add a periodic task to the list of periodic tasks. The task should already be decorated by @periodic_task. """ if self.__class__._add_periodic_task(task): self._periodic_last_run[task._periodic_name] = ( task._periodic_last_run) def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: if (task._periodic_external_ok and not self.conf.run_external_periodic_tasks): continue cls_name = reflection.get_class_name(self, fully_qualified=False) full_task_name = '.'.join([cls_name, task_name]) spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # Check if due, if not skip idle_for = min(idle_for, spacing) if last_run is not None: delta = last_run + spacing - now() if delta > 0: idle_for = min(idle_for, delta) continue LOG.debug("Running periodic task %(full_task_name)s", {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = _nearest_boundary( last_run, spacing) try: task(self, context) except BaseException: if raise_on_error: raise LOG.exception("Error during %(full_task_name)s", {"full_task_name": full_task_name}) time.sleep(0) return idle_for ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/service.py0000664000175000017500000007143400000000000020673 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import abc import collections import copy import errno import functools import gc import inspect import io import logging import os import random import signal import sys import time import eventlet from eventlet import event from eventlet import tpool from oslo_concurrency import lockutils from oslo_service._i18n import _ from oslo_service import _options from oslo_service import eventlet_backdoor from oslo_service import systemd from oslo_service import threadgroup LOG = logging.getLogger(__name__) _LAUNCHER_RESTART_METHODS = ['reload', 'mutate'] def list_opts(): """Entry point for oslo-config-generator.""" return [(None, copy.deepcopy(_options.eventlet_backdoor_opts + _options.service_opts))] def _is_daemon(): # The process group for a foreground process will match the # process group of the controlling terminal. If those values do # not match, or ioctl() fails on the stdout file handle, we assume # the process is running in the background as a daemon. # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics try: is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) except io.UnsupportedOperation: # Could not get the fileno for stdout, so we must be a daemon. is_daemon = True except OSError as err: if err.errno == errno.ENOTTY: # Assume we are a daemon because there is no terminal. is_daemon = True else: raise return is_daemon def _is_sighup_and_daemon(signo): if not (SignalHandler().is_signal_supported('SIGHUP') and signo == signal.SIGHUP): # Avoid checking if we are a daemon, because the signal isn't # SIGHUP. return False return _is_daemon() def _check_service_base(service): if not isinstance(service, ServiceBase): raise TypeError(_("Service %(service)s must an instance of %(base)s!") % {'service': service, 'base': ServiceBase}) class ServiceBase(metaclass=abc.ABCMeta): """Base class for all services.""" @abc.abstractmethod def start(self): """Start service.""" @abc.abstractmethod def stop(self): """Stop service.""" @abc.abstractmethod def wait(self): """Wait for service to complete.""" @abc.abstractmethod def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ class Singleton(type): _instances = {} _semaphores = lockutils.Semaphores() def __call__(cls, *args, **kwargs): with lockutils.lock('singleton_lock', semaphores=cls._semaphores): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__( *args, **kwargs) return cls._instances[cls] class SignalHandler(metaclass=Singleton): def __init__(self, *args, **kwargs): super(SignalHandler, self).__init__(*args, **kwargs) self.__setup_signal_interruption() # Map all signal names to signal integer values and create a # reverse mapping (for easier + quick lookup). self._ignore_signals = ('SIG_DFL', 'SIG_IGN') self._signals_by_name = dict((name, getattr(signal, name)) for name in dir(signal) if name.startswith("SIG") and name not in self._ignore_signals) self.signals_to_name = dict( (sigval, name) for (name, sigval) in self._signals_by_name.items()) self._signal_handlers = collections.defaultdict(set) self.clear() def clear(self): for sig in self._signal_handlers: signal.signal(sig, signal.SIG_DFL) self._signal_handlers.clear() def add_handlers(self, signals, handler): for sig in signals: self.add_handler(sig, handler) def add_handler(self, sig, handler): if not self.is_signal_supported(sig): return signo = self._signals_by_name[sig] self._signal_handlers[signo].add(handler) signal.signal(signo, self._handle_signal) def _handle_signal(self, signo, frame): # This method can be called anytime, even between two Python # instructions. It's scheduled by the C signal handler of Python using # Py_AddPendingCall(). # # We only do one thing: schedule a call to _handle_signal_cb() later. # eventlet.spawn() is not signal-safe: _handle_signal() can be called # during a call to eventlet.spawn(). This case is supported, it is # ok to schedule multiple calls to _handle_signal() with the same # signal number. # # To call to _handle_signal_cb() is delayed to avoid reentrant calls to # _handle_signal_cb(). It avoids race conditions like reentrant call to # clear(): clear() is not reentrant (bug #1538204). eventlet.spawn(self._handle_signal_cb, signo, frame) # On Python >= 3.5, ensure that eventlet's poll() or sleep() call is # interrupted by raising an exception. If the signal handler does not # raise an exception then due to PEP 475 the call will not return until # an event is detected on a file descriptor or the timeout is reached, # and thus eventlet will not wake up and notice that there has been a # new thread spawned. if self.__force_interrupt_on_signal: try: interrupted_frame = inspect.stack(context=0)[1] except IndexError: pass else: if ((interrupted_frame.function == 'do_poll' and interrupted_frame.filename == self.__hub_module_file) or (interrupted_frame.function == 'do_sleep' and interrupted_frame.filename == __file__)): raise IOError(errno.EINTR, 'Interrupted') def __setup_signal_interruption(self): """Set up to do the Right Thing with signals during poll() and sleep(). Deal with the changes introduced in PEP 475 that prevent a signal from interrupting eventlet's call to poll() or sleep(). """ select_module = eventlet.patcher.original('select') self.__force_interrupt_on_signal = hasattr(select_module, 'poll') if self.__force_interrupt_on_signal: try: from eventlet.hubs import poll as poll_hub except ImportError: pass else: # This is a function we can test for in the stack when handling # a signal - it's safe to raise an IOError with EINTR anywhere # in this function. def do_sleep(time_sleep_func, seconds): return time_sleep_func(seconds) time_sleep = eventlet.patcher.original('time').sleep # Wrap time.sleep to ignore the interruption error we're # injecting from the signal handler. This makes the behaviour # the same as sleep() in Python 2, where EINTR causes the # sleep to be interrupted (and not resumed), but no exception # is raised. @functools.wraps(time_sleep) def sleep_wrapper(seconds): try: return do_sleep(time_sleep, seconds) except (IOError, InterruptedError) as err: if err.errno != errno.EINTR: raise poll_hub.sleep = sleep_wrapper hub = eventlet.hubs.get_hub() self.__hub_module_file = sys.modules[hub.__module__].__file__ def _handle_signal_cb(self, signo, frame): for handler in self._signal_handlers[signo]: handler(signo, frame) def is_signal_supported(self, sig_name): return sig_name in self._signals_by_name class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self, conf, restart_method='reload'): """Initialize the service launcher. :param restart_method: If 'reload', calls reload_config_files on SIGHUP. If 'mutate', calls mutate_config_files on SIGHUP. Other values produce a ValueError. :returns: None """ self.conf = conf conf.register_opts(_options.service_opts) self.services = Services(restart_method=restart_method) self.backdoor_port = ( eventlet_backdoor.initialize_if_enabled(self.conf)) self.restart_method = restart_method def launch_service(self, service, workers=1): """Load and start the given service. :param service: The service you would like to start, must be an instance of :class:`oslo_service.service.ServiceBase` :param workers: This param makes this method compatible with ProcessLauncher.launch_service. It must be None, 1 or omitted. :returns: None """ if workers is not None and workers != 1: raise ValueError(_("Launcher asked to start multiple workers")) _check_service_base(service) service.backdoor_port = self.backdoor_port self.services.add(service) def stop(self): """Stop all services which are currently running. :returns: None """ self.services.stop() def wait(self): """Wait until all services have been stopped, and then return. :returns: None """ self.services.wait() def restart(self): """Reload config files and restart service. :returns: The return value from reload_config_files or mutate_config_files, according to the restart_method. """ if self.restart_method == 'reload': self.conf.reload_config_files() else: # self.restart_method == 'mutate' self.conf.mutate_config_files() self.services.restart() class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo class ServiceLauncher(Launcher): """Runs one or more service in a parent process.""" def __init__(self, conf, restart_method='reload'): """Constructor. :param conf: an instance of ConfigOpts :param restart_method: passed to super """ super(ServiceLauncher, self).__init__( conf, restart_method=restart_method) self.signal_handler = SignalHandler() def _graceful_shutdown(self, *args): self.signal_handler.clear() if (self.conf.graceful_shutdown_timeout and self.signal_handler.is_signal_supported('SIGALRM')): signal.alarm(self.conf.graceful_shutdown_timeout) self.stop() def _reload_service(self, *args): self.signal_handler.clear() raise SignalExit(signal.SIGHUP) def _fast_exit(self, *args): LOG.info('Caught SIGINT signal, instantaneous exiting') os._exit(1) def _on_timeout_exit(self, *args): LOG.info('Graceful shutdown timeout exceeded, ' 'instantaneous exiting') os._exit(1) def handle_signal(self): """Set self._handle_signal as a signal handler.""" self.signal_handler.clear() self.signal_handler.add_handler('SIGTERM', self._graceful_shutdown) self.signal_handler.add_handler('SIGINT', self._fast_exit) self.signal_handler.add_handler('SIGHUP', self._reload_service) self.signal_handler.add_handler('SIGALRM', self._on_timeout_exit) def _wait_for_exit_or_signal(self): status = None signo = 0 if self.conf.log_options: LOG.debug('Full set of CONF:') self.conf.log_opt_values(LOG, logging.DEBUG) try: super(ServiceLauncher, self).wait() except SignalExit as exc: signame = self.signal_handler.signals_to_name[exc.signo] LOG.info('Caught %s, handling', signame) status = exc.code signo = exc.signo except SystemExit as exc: self.stop() status = exc.code except Exception: self.stop() return status, signo def wait(self): """Wait for a service to terminate and restart it on SIGHUP. :returns: termination status """ systemd.notify_once() self.signal_handler.clear() while True: self.handle_signal() status, signo = self._wait_for_exit_or_signal() if not _is_sighup_and_daemon(signo): break self.restart() super(ServiceLauncher, self).wait() return status class ServiceWrapper(object): def __init__(self, service, workers): self.service = service self.workers = workers self.children = set() self.forktimes = [] class ProcessLauncher(object): """Launch a service with a given number of workers.""" def __init__(self, conf, wait_interval=0.01, restart_method='reload'): """Constructor. :param conf: an instance of ConfigOpts :param wait_interval: The interval to sleep for between checks of child process exit. :param restart_method: If 'reload', calls reload_config_files on SIGHUP. If 'mutate', calls mutate_config_files on SIGHUP. Other values produce a ValueError. """ self.conf = conf conf.register_opts(_options.service_opts) self.children = {} self.sigcaught = None self.running = True self.wait_interval = wait_interval self.launcher = None rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') self.signal_handler = SignalHandler() self.handle_signal() self.restart_method = restart_method if restart_method not in _LAUNCHER_RESTART_METHODS: raise ValueError(_("Invalid restart_method: %s") % restart_method) def handle_signal(self): """Add instance's signal handlers to class handlers.""" self.signal_handler.add_handler('SIGTERM', self._handle_term) self.signal_handler.add_handler('SIGHUP', self._handle_hup) self.signal_handler.add_handler('SIGINT', self._fast_exit) self.signal_handler.add_handler('SIGALRM', self._on_alarm_exit) def _handle_term(self, signo, frame): """Handle a TERM event. :param signo: signal number :param frame: current stack frame """ self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes self.signal_handler.clear() def _handle_hup(self, signo, frame): """Handle a HUP event. :param signo: signal number :param frame: current stack frame """ self.sigcaught = signo self.running = False # Do NOT clear the signal_handler, allowing multiple SIGHUPs to be # received swiftly. If a non-HUP is received before #wait loops, the # second event will "overwrite" the HUP. This is fine. def _fast_exit(self, signo, frame): LOG.info('Caught SIGINT signal, instantaneous exiting') os._exit(1) def _on_alarm_exit(self, signo, frame): LOG.info('Graceful shutdown timeout exceeded, ' 'instantaneous exiting') os._exit(1) def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read(1) LOG.info('Parent process has died unexpectedly, exiting') if self.launcher: self.launcher.stop() sys.exit(1) def _child_process_handle_signal(self): # Setup child signal handlers differently def _sigterm(*args): self.signal_handler.clear() self.launcher.stop() def _sighup(*args): self.signal_handler.clear() raise SignalExit(signal.SIGHUP) self.signal_handler.clear() # Parent signals with SIGTERM when it wants us to go away. self.signal_handler.add_handler('SIGTERM', _sigterm) self.signal_handler.add_handler('SIGHUP', _sighup) self.signal_handler.add_handler('SIGINT', self._fast_exit) def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = self.signal_handler.signals_to_name[exc.signo] LOG.info('Child caught %s, handling', signame) status = exc.code signo = exc.signo except SystemExit as exc: launcher.stop() status = exc.code except BaseException: launcher.stop() LOG.exception('Unhandled exception') status = 2 return status, signo def _child_process(self, service): self._child_process_handle_signal() # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() # Close write to ensure only parent has it open os.close(self.writepipe) # Create greenthread to watch for parent to close pipe eventlet.spawn_n(self._pipe_watcher) # Reseed random number generator random.seed() launcher = Launcher(self.conf, restart_method=self.restart_method) launcher.launch_service(service) return launcher def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: # Limit ourselves to one process a second (over the period of # number of workers * 1 second). This will allow workers to # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: LOG.info('Forking too fast, sleeping') time.sleep(1) wrap.forktimes.pop(0) wrap.forktimes.append(time.time()) pid = os.fork() if pid == 0: # When parent used native threads the library on child needs to be # "reset", otherwise native threads won't work on the child. tpool.killall() self.launcher = self._child_process(wrap.service) while True: self._child_process_handle_signal() status, signo = self._child_wait_for_exit_or_signal( self.launcher) if not _is_sighup_and_daemon(signo): self.launcher.wait() break self.launcher.restart() os._exit(status) LOG.debug('Started child %d', pid) wrap.children.add(pid) self.children[pid] = wrap return pid def launch_service(self, service, workers=1): """Launch a service with a given number of workers. :param service: a service to launch, must be an instance of :class:`oslo_service.service.ServiceBase` :param workers: a number of processes in which a service will be running """ _check_service_base(service) wrap = ServiceWrapper(service, workers) # Hide existing objects from the garbage collector, so that most # existing pages will remain in shared memory rather than being # duplicated between subprocesses in the GC mark-and-sweep. (Requires # Python 3.7 or later.) if hasattr(gc, 'freeze'): gc.freeze() LOG.info('Starting %d workers', wrap.workers) while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info('Child %(pid)d killed by signal %(sig)d', dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info('Child %(pid)s exited with status %(code)d', dict(pid=pid, code=code)) if pid not in self.children: LOG.warning('pid %d not in child list', pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap def _respawn_children(self): while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) eventlet.greenthread.sleep(self.wait_interval) continue while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def wait(self): """Loop waiting on children to die and respawning as necessary.""" systemd.notify_once() if self.conf.log_options: LOG.debug('Full set of CONF:') self.conf.log_opt_values(LOG, logging.DEBUG) try: while True: self.handle_signal() self._respawn_children() # No signal means that stop was called. Don't clean up here. if not self.sigcaught: return signame = self.signal_handler.signals_to_name[self.sigcaught] LOG.info('Caught %s, stopping children', signame) if not _is_sighup_and_daemon(self.sigcaught): break child_signal = signal.SIGTERM if self.restart_method == 'reload': self.conf.reload_config_files() elif self.restart_method == 'mutate': self.conf.mutate_config_files() child_signal = signal.SIGHUP for service in set( [wrap.service for wrap in self.children.values()]): service.reset() for pid in self.children: os.kill(pid, child_signal) self.running = True self.sigcaught = None except eventlet.greenlet.GreenletExit: LOG.info("Wait called after thread killed. Cleaning up.") # if we are here it means that we are trying to do graceful shutdown. # add alarm watching that graceful_shutdown_timeout is not exceeded if (self.conf.graceful_shutdown_timeout and self.signal_handler.is_signal_supported('SIGALRM')): signal.alarm(self.conf.graceful_shutdown_timeout) self.stop() def stop(self): """Terminate child processes and wait on each.""" self.running = False LOG.debug("Stop services.") for service in set( [wrap.service for wrap in self.children.values()]): service.stop() LOG.debug("Killing children.") for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info('Waiting on %d children to exit', len(self.children)) while self.children: self._wait_child() class Service(ServiceBase): """Service object for binaries running on hosts.""" def __init__(self, threads=1000): self.tg = threadgroup.ThreadGroup(threads) def reset(self): """Reset a service in case it received a SIGHUP.""" def start(self): """Start a service.""" def stop(self, graceful=False): """Stop a service. :param graceful: indicates whether to wait for all threads to finish or terminate them instantly """ self.tg.stop(graceful) def wait(self): """Wait for a service to shut down.""" self.tg.wait() class Services(object): def __init__(self, restart_method='reload'): if restart_method not in _LAUNCHER_RESTART_METHODS: raise ValueError(_("Invalid restart_method: %s") % restart_method) self.restart_method = restart_method self.services = [] self.tg = threadgroup.ThreadGroup() self.done = event.Event() def add(self, service): """Add a service to a list and create a thread to run it. :param service: service to run """ self.services.append(service) self.tg.add_thread(self.run_service, service, self.done) def stop(self): """Wait for graceful shutdown of services and kill the threads.""" for service in self.services: service.stop() # Each service has performed cleanup, now signal that the run_service # wrapper threads can now die: if not self.done.ready(): self.done.send() # reap threads: self.tg.stop() def wait(self): """Wait for services to shut down.""" for service in self.services: service.wait() self.tg.wait() def restart(self): """Reset services. The behavior of this function varies depending on the value of the restart_method member. If the restart_method is `reload`, then it will stop the services, reset them, and start them in new threads. If the restart_method is `mutate`, then it will just reset the services without restarting them. """ if self.restart_method == 'reload': self.stop() self.done = event.Event() for restart_service in self.services: restart_service.reset() if self.restart_method == 'reload': self.tg.add_thread(self.run_service, restart_service, self.done) @staticmethod def run_service(service, done): """Service start wrapper. :param service: service to run :param done: event to wait on until a shutdown is triggered :returns: None """ try: service.start() except Exception: LOG.exception('Error starting thread.') raise SystemExit(1) else: done.wait() def launch(conf, service, workers=1, restart_method='reload'): """Launch a service with a given number of workers. :param conf: an instance of ConfigOpts :param service: a service to launch, must be an instance of :class:`oslo_service.service.ServiceBase` :param workers: a number of processes in which a service will be running, type should be int. :param restart_method: Passed to the constructed launcher. If 'reload', the launcher will call reload_config_files on SIGHUP. If 'mutate', it will call mutate_config_files on SIGHUP. Other values produce a ValueError. :returns: instance of a launcher that was used to launch the service """ if workers is not None and not isinstance(workers, int): raise TypeError(_("Type of workers should be int!")) if workers is not None and workers <= 0: raise ValueError(_("Number of workers should be positive!")) if workers is None or workers == 1: launcher = ServiceLauncher(conf, restart_method=restart_method) else: launcher = ProcessLauncher(conf, restart_method=restart_method) launcher.launch_service(service, workers=workers) return launcher ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/sslutils.py0000664000175000017500000000627200000000000021113 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import ssl from oslo_service._i18n import _ from oslo_service import _options config_section = 'ssl' _SSL_PROTOCOLS = { "tlsv1": ssl.PROTOCOL_TLSv1, "sslv23": ssl.PROTOCOL_SSLv23 } _OPTIONAL_PROTOCOLS = { 'sslv2': 'PROTOCOL_SSLv2', 'sslv3': 'PROTOCOL_SSLv3', 'tlsv1_1': 'PROTOCOL_TLSv1_1', 'tlsv1_2': 'PROTOCOL_TLSv1_2', } for protocol in _OPTIONAL_PROTOCOLS: try: _SSL_PROTOCOLS[protocol] = getattr(ssl, _OPTIONAL_PROTOCOLS[protocol]) except AttributeError: # nosec pass def list_opts(): """Entry point for oslo-config-generator.""" return [(config_section, copy.deepcopy(_options.ssl_opts))] def register_opts(conf): """Registers sslutils config options.""" return conf.register_opts(_options.ssl_opts, config_section) def is_enabled(conf): conf.register_opts(_options.ssl_opts, config_section) cert_file = conf.ssl.cert_file key_file = conf.ssl.key_file ca_file = conf.ssl.ca_file use_ssl = cert_file or key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError(_("Unable to find key_file : %s") % key_file) if use_ssl and (not cert_file or not key_file): raise RuntimeError(_("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) return use_ssl def wrap(conf, sock): conf.register_opts(_options.ssl_opts, config_section) ssl_kwargs = { 'server_side': True, 'certfile': conf.ssl.cert_file, 'keyfile': conf.ssl.key_file, 'cert_reqs': ssl.CERT_NONE, } if conf.ssl.ca_file: ssl_kwargs['ca_certs'] = conf.ssl.ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED if conf.ssl.version: key = conf.ssl.version.lower() try: ssl_kwargs['ssl_version'] = _SSL_PROTOCOLS[key] except KeyError: raise RuntimeError( _("Invalid SSL version : %s") % conf.ssl.version) if conf.ssl.ciphers: ssl_kwargs['ciphers'] = conf.ssl.ciphers # NOTE(eezhova): SSL/TLS protocol version is injected in ssl_kwargs above, # so skipping bandit check return ssl.wrap_socket(sock, **ssl_kwargs) # nosec ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/systemd.py0000664000175000017500000000603500000000000020716 0ustar00zuulzuul00000000000000# Copyright 2012-2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper module for systemd service readiness notification. """ import contextlib import logging import os import socket import sys LOG = logging.getLogger(__name__) def _abstractify(socket_name): if socket_name.startswith('@'): # abstract namespace socket socket_name = '\0%s' % socket_name[1:] return socket_name def _sd_notify(unset_env, msg): notify_socket = os.getenv('NOTIFY_SOCKET') if notify_socket: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) with contextlib.closing(sock): try: sock.connect(_abstractify(notify_socket)) sock.sendall(msg) if unset_env: del os.environ['NOTIFY_SOCKET'] except EnvironmentError: LOG.debug("Systemd notification failed", exc_info=True) def notify(): """Send notification to Systemd that service is ready. For details see http://www.freedesktop.org/software/systemd/man/sd_notify.html """ _sd_notify(False, b'READY=1') def notify_once(): """Send notification once to Systemd that service is ready. Systemd sets NOTIFY_SOCKET environment variable with the name of the socket listening for notifications from services. This method removes the NOTIFY_SOCKET environment variable to ensure notification is sent only once. """ _sd_notify(True, b'READY=1') def onready(notify_socket, timeout): """Wait for systemd style notification on the socket. :param notify_socket: local socket address :type notify_socket: string :param timeout: socket timeout :type timeout: float :returns: 0 service ready 1 service not ready 2 timeout occurred """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) sock.settimeout(timeout) sock.bind(_abstractify(notify_socket)) with contextlib.closing(sock): try: msg = sock.recv(512) except socket.timeout: return 2 if b'READY=1' == msg: return 0 else: return 1 if __name__ == '__main__': # simple CLI for testing if len(sys.argv) == 1: notify() elif len(sys.argv) >= 2: timeout = float(sys.argv[1]) notify_socket = os.getenv('NOTIFY_SOCKET') if notify_socket: retval = onready(notify_socket, timeout) sys.exit(retval) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.276123 oslo.service-3.4.0/oslo_service/tests/0000775000175000017500000000000000000000000020012 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/__init__.py0000664000175000017500000000247300000000000022131 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import eventlet if os.name == 'nt': # eventlet monkey patching the os and thread modules causes # subprocess.Popen to fail on Windows when using pipes due # to missing non-blocking IO support. # # bug report on eventlet: # https://bitbucket.org/eventlet/eventlet/issue/132/ # eventletmonkey_patch-breaks eventlet.monkey_patch(os=False, thread=False) else: eventlet.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading # noqa import threading # noqa orig_threading.current_thread.__globals__['_active'] = threading._active ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/base.py0000664000175000017500000000514300000000000021301 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from oslo_config import fixture as config from oslotest import base as test_base from oslo_service import _options from oslo_service import sslutils class ServiceBaseTestCase(test_base.BaseTestCase): def setUp(self): super(ServiceBaseTestCase, self).setUp() self.conf_fixture = self.useFixture(config.Config()) self.conf_fixture.register_opts(_options.eventlet_backdoor_opts) self.conf_fixture.register_opts(_options.service_opts) self.conf_fixture.register_opts(_options.ssl_opts, sslutils.config_section) self.conf_fixture.register_opts(_options.periodic_opts) self.conf_fixture.register_opts(_options.wsgi_opts) self.conf = self.conf_fixture.conf self.config = self.conf_fixture.config self.conf(args=[], default_config_files=[]) def get_new_temp_dir(self): """Create a new temporary directory. :returns: fixtures.TempDir """ return self.useFixture(fixtures.TempDir()) def get_default_temp_dir(self): """Create a default temporary directory. Returns the same directory during the whole test case. :returns: fixtures.TempDir """ if not hasattr(self, '_temp_dir'): self._temp_dir = self.get_new_temp_dir() return self._temp_dir def get_temp_file_path(self, filename, root=None): """Returns an absolute path for a temporary file. If root is None, the file is created in default temporary directory. It also creates the directory if it's not initialized yet. If root is not None, the file is created inside the directory passed as root= argument. :param filename: filename :type filename: string :param root: temporary directory to create a new file in :type root: fixtures.TempDir :returns: absolute file path string """ root = root or self.get_default_temp_dir() return root.join(filename) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/eventlet_service.py0000664000175000017500000001176500000000000023744 0ustar00zuulzuul00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # An eventlet server that runs a service.py pool. # Opens listens on a random port. The port # is printed to stdout. import socket import sys import time import eventlet.wsgi import greenlet from oslo_config import cfg from oslo_service import service POOL_SIZE = 1 class Server(service.ServiceBase): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, application, host=None, port=None, keepalive=False, keepidle=None): self.application = application self.host = host or '0.0.0.0' self.port = port or 0 # Pool for a green thread in which wsgi server will be running self.pool = eventlet.GreenPool(POOL_SIZE) self.socket_info = {} self.greenthread = None self.keepalive = keepalive self.keepidle = keepidle self.socket = None def listen(self, key=None, backlog=128): """Create and start listening on socket. Call before forking worker processes. Raises Exception if this has already been called. """ # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix. # Please refer below link # (https://bitbucket.org/eventlet/eventlet/ # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/ # greendns.py?at=0.12#cl-163) info = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0] self.socket = eventlet.listen(info[-1], family=info[0], backlog=backlog) def start(self, key=None, backlog=128): """Run a WSGI server with the given application.""" if self.socket is None: self.listen(key=key, backlog=backlog) dup_socket = self.socket.dup() if key: self.socket_info[key] = self.socket.getsockname() # Optionally enable keepalive on the wsgi socket. if self.keepalive: dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if self.keepidle is not None: dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, self.keepidle) self.greenthread = self.pool.spawn(self._run, self.application, dup_socket) def stop(self): if self.greenthread is not None: self.greenthread.kill() def wait(self): """Wait until all servers have completed running.""" try: self.pool.waitall() except KeyboardInterrupt: pass except greenlet.GreenletExit: pass def reset(self): """Required by the service interface. The service interface is used by the launcher when receiving a SIGHUP. The service interface is defined in oslo_service.Service. Test server does not need to do anything here. """ pass def _run(self, application, socket): """Start a WSGI server with a new green thread pool.""" try: eventlet.wsgi.server(socket, application, debug=False) except greenlet.GreenletExit: # Wait until all servers have completed running pass def run(port_queue, workers=3, process_time=0): eventlet.patcher.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading # noqa import threading # noqa orig_threading.current_thread.__globals__['_active'] = threading._active def hi_app(environ, start_response): # Some requests need to take time to process so the connection # remains active. time.sleep(process_time) start_response('200 OK', [('Content-Type', 'application/json')]) yield 'hi' server = Server(hi_app) server.listen() launcher = service.launch(cfg.CONF, server, workers) port = server.socket.getsockname()[1] port_queue.put(port) sys.stdout.flush() launcher.wait() if __name__ == '__main__': run() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.276123 oslo.service-3.4.0/oslo_service/tests/ssl_cert/0000775000175000017500000000000000000000000021630 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/ssl_cert/ca.crt0000664000175000017500000000205600000000000022730 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIC6jCCAdKgAwIBAgIUZdS0qsU+lp2Vjxqir0wtWawoGTAwDQYJKoZIhvcNAQEL BQAwDTELMAkGA1UEAxMCQ0EwHhcNMTkxMDA5MDk1ODU3WhcNNDkxMjMxMDk1OTAw WjANMQswCQYDVQQDEwJDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB ALg+95eNXP1S3KQMmhuABAbL6WIyumwuKej/WWCke33TXO0g6vkKuhsVOIjqkDow 73iWrcEhuGCOkmAkLOASz6BP7m+QBreS8XQ57BpIz1x3hFZ/UKYStWCuTLgwHYzO QgrA2Neu41zfJ6ia8p+fPFqhX05sVl0TJeh3zktN9KfwliFznIy/j3XR/x/tKX0O rGDwz0kB7QyR/N2aQGxokFQZhwLnlFHinY4jvPQpsINpxrdwUlBI0ajIAd7aIo3f uiQ9a4n71Ngf/3Dk44NIAMuAqzTQFRIndXWfTsyxfW6qiZMJn4lqr0p3nVEP8Bxb +UROl/R6YHapi4HgoCLbCOUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud EwEB/wQFMAMBAf8wHQYDVR0OBBYEFIf1eOkulZ9jqaJIIgvnvZfQwS2aMA0GCSqG SIb3DQEBCwUAA4IBAQB2bjUrfYcKR/uYtMD3PhVwXVpwLsY87poiTf5AQycZix1T SysNysq4XV6V0pnjGyNVEu41xPGUJKLV0Jap3bSldfEq4Fi0NuK4LN6l/4u+OIS4 xw1fGD/mXs/bNpzBeRXJQjjDZpgHbO9WrzdWiQbDl6uTnhWaCB8SELrbCL9iToBr rvOVoXWOd7gf7vhnGGrV/IAQPvYIJWdVnLQVrP9+n53RniJ6ofR7qTP1TxbHCRup 1LxtESrk45GT4zTTnbsSxZ6U9RkeTf1+bQ5TML3TdesPbgxx8LOZ1DeIIriIL9No jb+jXd8r6b/S4uzfy2Ml6bLl+hCG6tSAGLJtsreT -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/ssl_cert/ca.key0000664000175000017500000000321700000000000022730 0ustar00zuulzuul00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAuD73l41c/VLcpAyaG4AEBsvpYjK6bC4p6P9ZYKR7fdNc7SDq +Qq6GxU4iOqQOjDveJatwSG4YI6SYCQs4BLPoE/ub5AGt5LxdDnsGkjPXHeEVn9Q phK1YK5MuDAdjM5CCsDY167jXN8nqJryn588WqFfTmxWXRMl6HfOS030p/CWIXOc jL+PddH/H+0pfQ6sYPDPSQHtDJH83ZpAbGiQVBmHAueUUeKdjiO89Cmwg2nGt3BS UEjRqMgB3toijd+6JD1rifvU2B//cOTjg0gAy4CrNNAVEid1dZ9OzLF9bqqJkwmf iWqvSnedUQ/wHFv5RE6X9HpgdqmLgeCgItsI5QIDAQABAoIBAQCRvq2YouiGM3/5 /UYCrveAcNlDJqG1ZacderC29CYD2KjPo5R8QHILbnP0Sqigf4lPONJlzkjNiLpd dwBQmFDJnbEday8FFCYQLKmJ+UKfAKQc0EV5uZ7kxbxGjzkw4w/29aoSo3OTSatT go6emhXiE3kSzjxbEASSqgL0Ksx3806J59Mv5FMVp7EOiKVcux6Rg7tc24tpg/Aa 6TR3oAGc4DGVQHko25eJY6iRRgFBt3n+M5oLwFrEz8ec4RQML1ov2ybBS25gzzyk aDBW9HRKucqQWMDHcg/YfTRBruW971oUuVXmMtLei6ZKYFTVA7+ZhomKGhnj+Wqz dTmH3XgBAoGBAM1BMTXhmZXRALlwTr5Iw+pn2OrOh7I8Z3ZOqtb0iXmCqbFfSo1I KvIEEfF+ATzOVdLrqo4iVwtQW7VAvJt9NJWNaIXIoY6HhGAVeDvq4zJ4ng/MkjPc KKjKy9TmwU8pDhprFCNFY5NmHxgKHUpPybvSfFBdIBPqjkk331/0PgXlAoGBAOXM HDbjmldWzUTTXztJx7loQgedajMLlZXIfzO84jvHvUp/IWzf1r0QjqO5o5XZwR9U 6jfQEcz06Hrk4m8W5LqfMzOyHOBNhMCDXpFMlJct6I5XITQj28g4ZUtEuVJOUQKH Hl6hMidkFN1h23jis2u/ZeIt/FjczFNIGWIXaMcBAoGBALylcF8/S/OgNnQ5PoYx Rq+IZpvY2mr8jXL09pu+ASRG7UPyewjDDYWGLB+yWdzTH5pNs2ITOcFSSBecujyG pO/XGfvr3GCtOQM+eFVJmytmJJAMy0sSy+PnD9RLidwBtc6eH9ITEmQE9gsYMLd+ AcG4wTzzemoTqFavHYSJbP/pAoGAB1B13XFlKj6DDhvEPjjc+JPO9jsWdEV8H2Zg Vh8+/DQhXEurnrGJPj9WqvNvt4dU1FB5nAktZ96rv62lX2/VG/ORR6X1sVYGUbJc wjczy5QUgONZFTXG+xqAbuLjGzGO9ouiddSfqg1PdLR2reRZXaab8ZURISr1/fif hjXEigECgYEAgBZDchm/FaOLzIzi3wAHBeG/jnC0NTcLxzDzqh7NNs6ADSEvXOn+ O5HyD4iOkicBUotl7J9+4I5ivQjA5QbEjq+CWtHMaHd4xDv/jxvo6cuYJV5SSno3 ev3hxhD5cCGuw0u9LN6M5ERAelPMTeGHGhTLsW7ZMzdj+g/gddBnqKA= -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/ssl_cert/certificate.crt0000664000175000017500000000224000000000000024622 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDPjCCAiagAwIBAgIUPYihitTTb7gWtqs/i6AAYFW6DNIwDQYJKoZIhvcNAQEL BQAwDTELMAkGA1UEAxMCQ0EwHhcNMTkxMDA5MDk1OTMwWhcNNDkxMjMxMDk1OTMy WjARMQ8wDQYDVQQDEwZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK AoIBAQCqaPl2WX6j50gkQldm+VsP1m7ZtPfNjMmn3GSw++a1aaZo9wz7odmrTX/r WcC9wulwEwOMtquVAk5gp5UDOXAXgqszN9goNvOrk9rdpe8Y5p1ZqniI74sl6GFT l6q/vxIjG5A72tbjTEGcbeYUwhCF/p74QjXgPCxKbWwCDGRl+32IykWGYWjxhTBp 72aCCu1BAixQ0zj8QY747DF6nYMDLhsmN24vJ8kFBpsQ+Fgk0/VYbwt2yLIWyAhG 1Pe/TkLA1A1aBh6qCEogxRkNhYkYnOGDt7GABcnXRhrQg+3oVPrDO1Ypd8H6qAUj s8USD6XWlpZpet3OGx3bTubk6LXNAgMBAAGjgZEwgY4wDgYDVR0PAQH/BAQDAgWg MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G A1UdDgQWBBQC5/cd1gqoU4rItbUuFczSEza0qjAfBgNVHSMEGDAWgBSH9XjpLpWf Y6miSCIL572X0MEtmjAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB AQCIY6IF4S1wSsVV+73KdA006XHtZh0LKAjR+yJyXtvzHgg4g2DqUbUyozEZHtJS gZNpVXjgFOnmYsoUE8gFSQ2duo8mKD9EMI5vYskJ/Qai1/klrIZsWrLROrbKTh2b VSZR1QP2PGVenOp1WugEgFl6IqGE4LeLBlC0O8LS0wa/1L7Kb94b+Kht9Qb3mpo9 61uO13w8xSBPrKA7nc+WD45qAqMOGKoXxuYkOWu37I0Wo0iLZZ79hGI/dwpWCbRY pJuy6NrJJC0IFzSgBahq0UjkSjBAYRtClbgDlqd1NBIbHsU//8jRHtrfrm13lTHp ih0VLmjgmG1o8Ykw0iW2qr2x -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/ssl_cert/privatekey.key0000664000175000017500000000321700000000000024530 0ustar00zuulzuul00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAqmj5dll+o+dIJEJXZvlbD9Zu2bT3zYzJp9xksPvmtWmmaPcM +6HZq01/61nAvcLpcBMDjLarlQJOYKeVAzlwF4KrMzfYKDbzq5Pa3aXvGOadWap4 iO+LJehhU5eqv78SIxuQO9rW40xBnG3mFMIQhf6e+EI14DwsSm1sAgxkZft9iMpF hmFo8YUwae9mggrtQQIsUNM4/EGO+Owxep2DAy4bJjduLyfJBQabEPhYJNP1WG8L dsiyFsgIRtT3v05CwNQNWgYeqghKIMUZDYWJGJzhg7exgAXJ10Ya0IPt6FT6wztW KXfB+qgFI7PFEg+l1paWaXrdzhsd207m5Oi1zQIDAQABAoIBAAbNWefXeq6gNMj9 vO9nVLM1JAecqaeSzxncOki2RWdIaFQYSHRi6YVk+o4ybvckKJ1IpQOg5whzpx1E kzYlvMuzc5H/0KkKSH+4zyu5y+l6ix1hKJ6OsUc6F2h2zWIKBndQSlmZSqqYCKZt 3JlHhdFTcJUIT4472Ki/5WfPhtJrPsnzydFrD0YhRyPUdxykri3K/dctINvhaLh6 N1XQEp6sRXTphMF/6Xx2VnjbQIIhrOYnQK+0N7a0AW3Dban+qkbQJr58OUqzYjcJ d/8DwozEgS4tl/l37tAGj70UWeLGUBtoH9lPuoKKbwyoma14w8EsJ2ciQqK8fzlh e85TySECgYEA3s2gUg2gpyONcAWgQ+9j0L8Jw7vzcp+L5Z82cfmK3ucdVlSOlbWN A66I79EVrKvABZXUVG4bnizsdJyu6s2n/cRlOGZdJPImUR7aUyzx7sgns6VKlouh bHetY64DFKegKa7ESjDUscxQfyZD/6ez00auLcGTUjwrf2akf8C0CisCgYEAw8zq 8767/fgwYrra+1IWAN1zv0S6+e7injSgoPggsMIkuC1bLYNTPLCJ6GSDBorey8PD ibqxDJTxfAFa81x6yxq6OhN3rRoRPzq02WbdCja3JDY6ZjPQX7niVCLizykzXENq M8HgiU30ub+fsHdttWf9fXjE7PC/q/jm9aSjG+cCgYBcDyyKGtVhmiALxFdXGqfs jbL0LEnH98JvK1zstQY7WdVWYXPUygItHkW4iXTiNskNxb3I8QH6VmeRO1hO7pDW GvsT1b/wzxkj9ZKSQYTpe8xjvw1VrPYTAlQjVrgxJZ3Y+zxhx1Fq8ZflIaOBFAMK bi7z+wLrPK9gYcHgg3ggpwKBgQC04hosE4iYlzAcBYNHqg2pvuuvR090lbvFa9j4 3EFXCgPiIgSZhzjeWDkO98wTvzcUB0YddtsZRH/Sj232ZPkLqSVhgZqN0Ace4p0w Olb5mJKYYYCR6kwWIyNzlsbxmp5YHe9+4R0Nu3baIqzj2XEApZEI2QkT21U3CjpT tET7AQKBgQCbR0pIMAS1T/ttubtmFSQuGIsuIfetq1ikFnJh85lS0Wk87wKtnNBb KFFx+r67ErWKwBUKeSwxb6958LS1SuNqcfSBnYTaou71DwqzH80kQ/MnseG/MPfZ f70gESDkqjH878evPL9FFpr3GCsx4cSmnF8PVu1u0l1cjNCqDc3/PQ== -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_eventlet_backdoor.py0000664000175000017500000001740400000000000025123 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for eventlet backdoor.""" import errno import os import socket from unittest import mock import eventlet from oslo_service import eventlet_backdoor from oslo_service.tests import base class BackdoorSocketPathTest(base.ServiceBaseTestCase): @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_path(self, listen_mock, spawn_mock): self.config(backdoor_socket="/tmp/my_special_socket") listen_mock.side_effect = mock.Mock() path = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual("/tmp/my_special_socket", path) @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_path_with_format_string(self, listen_mock, spawn_mock): self.config(backdoor_socket="/tmp/my_special_socket-{pid}") listen_mock.side_effect = mock.Mock() path = eventlet_backdoor.initialize_if_enabled(self.conf) expected_path = "/tmp/my_special_socket-{}".format(os.getpid()) self.assertEqual(expected_path, path) @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_path_with_broken_format_string(self, listen_mock, spawn_mock): broken_socket_paths = [ "/tmp/my_special_socket-{}", "/tmp/my_special_socket-{broken", "/tmp/my_special_socket-{broken}", ] for socket_path in broken_socket_paths: self.config(backdoor_socket=socket_path) listen_mock.side_effect = mock.Mock() path = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual(socket_path, path) @mock.patch.object(os, 'unlink') @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_path_already_exists(self, listen_mock, spawn_mock, unlink_mock): self.config(backdoor_socket="/tmp/my_special_socket") sock = mock.Mock() listen_mock.side_effect = [socket.error(errno.EADDRINUSE, ''), sock] path = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual("/tmp/my_special_socket", path) unlink_mock.assert_called_with("/tmp/my_special_socket") @mock.patch.object(os, 'unlink') @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_path_already_exists_and_gone(self, listen_mock, spawn_mock, unlink_mock): self.config(backdoor_socket="/tmp/my_special_socket") sock = mock.Mock() listen_mock.side_effect = [socket.error(errno.EADDRINUSE, ''), sock] unlink_mock.side_effect = OSError(errno.ENOENT, '') path = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual("/tmp/my_special_socket", path) unlink_mock.assert_called_with("/tmp/my_special_socket") @mock.patch.object(os, 'unlink') @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_path_already_exists_and_not_gone(self, listen_mock, spawn_mock, unlink_mock): self.config(backdoor_socket="/tmp/my_special_socket") listen_mock.side_effect = socket.error(errno.EADDRINUSE, '') unlink_mock.side_effect = OSError(errno.EPERM, '') self.assertRaises(OSError, eventlet_backdoor.initialize_if_enabled, self.conf) @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_path_no_perms(self, listen_mock, spawn_mock): self.config(backdoor_socket="/tmp/my_special_socket") listen_mock.side_effect = socket.error(errno.EPERM, '') self.assertRaises(socket.error, eventlet_backdoor.initialize_if_enabled, self.conf) class BackdoorPortTest(base.ServiceBaseTestCase): @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_port(self, listen_mock, spawn_mock): self.config(backdoor_port=1234) sock = mock.Mock() sock.getsockname.return_value = ('127.0.0.1', 1234) listen_mock.return_value = sock port = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual(1234, port) @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_port_inuse(self, listen_mock, spawn_mock): self.config(backdoor_port=2345) listen_mock.side_effect = socket.error(errno.EADDRINUSE, '') self.assertRaises(socket.error, eventlet_backdoor.initialize_if_enabled, self.conf) @mock.patch.object(eventlet, 'spawn') def test_backdoor_port_range_inuse(self, spawn_mock): self.config(backdoor_port='8800:8801') port = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual(8800, port) port = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual(8801, port) @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_port_range(self, listen_mock, spawn_mock): self.config(backdoor_port='8800:8899') sock = mock.Mock() sock.getsockname.return_value = ('127.0.0.1', 8800) listen_mock.return_value = sock port = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual(8800, port) @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_port_range_one_inuse(self, listen_mock, spawn_mock): self.config(backdoor_port='8800:8900') sock = mock.Mock() sock.getsockname.return_value = ('127.0.0.1', 8801) listen_mock.side_effect = [socket.error(errno.EADDRINUSE, ''), sock] port = eventlet_backdoor.initialize_if_enabled(self.conf) self.assertEqual(8801, port) @mock.patch.object(eventlet, 'spawn') @mock.patch.object(eventlet, 'listen') def test_backdoor_port_range_all_inuse(self, listen_mock, spawn_mock): self.config(backdoor_port='8800:8899') side_effects = [] for i in range(8800, 8900): side_effects.append(socket.error(errno.EADDRINUSE, '')) listen_mock.side_effect = side_effects self.assertRaises(socket.error, eventlet_backdoor.initialize_if_enabled, self.conf) def test_backdoor_port_reverse_range(self): self.config(backdoor_port='8888:7777') self.assertRaises(eventlet_backdoor.EventletBackdoorConfigValueError, eventlet_backdoor.initialize_if_enabled, self.conf) def test_backdoor_port_bad(self): self.config(backdoor_port='abc') self.assertRaises(eventlet_backdoor.EventletBackdoorConfigValueError, eventlet_backdoor.initialize_if_enabled, self.conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_fixture.py0000664000175000017500000000266200000000000023117 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslotest import base as test_base from oslo_service import fixture from oslo_service import loopingcall class FixtureTestCase(test_base.BaseTestCase): def setUp(self): super(FixtureTestCase, self).setUp() self.sleepfx = self.useFixture(fixture.SleepFixture()) def test_sleep_fixture(self): @loopingcall.RetryDecorator(max_retry_count=3, inc_sleep_time=2, exceptions=(ValueError,)) def retried_method(): raise ValueError("!") self.assertRaises(ValueError, retried_method) self.assertEqual(3, self.sleepfx.mock_wait.call_count) # TODO(efried): This is cheating, and shouldn't be done by real callers # yet - see todo in SleepFixture. self.sleepfx.mock_wait.assert_has_calls( [mock.call(x) for x in (2, 4, 6)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_loopingcall.py0000664000175000017500000003745700000000000023746 0ustar00zuulzuul00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock import eventlet from eventlet.green import threading as greenthreading from oslotest import base as test_base from oslo_service import fixture from oslo_service import loopingcall class LoopingCallTestCase(test_base.BaseTestCase): def setUp(self): super(LoopingCallTestCase, self).setUp() self.num_runs = 0 def test_return_true(self): def _raise_it(): raise loopingcall.LoopingCallDone(True) timer = loopingcall.FixedIntervalLoopingCall(_raise_it) self.assertTrue(timer.start(interval=0.5).wait()) def test_monotonic_timer(self): def _raise_it(): clock = eventlet.hubs.get_hub().clock ok = (clock == time.monotonic) raise loopingcall.LoopingCallDone(ok) timer = loopingcall.FixedIntervalLoopingCall(_raise_it) self.assertTrue(timer.start(interval=0.5).wait()) def test_eventlet_clock(self): # Make sure that by default the oslo_service.service_hub() kicks in, # test in the main thread hub = eventlet.hubs.get_hub() self.assertEqual(time.monotonic, hub.clock) def test_return_false(self): def _raise_it(): raise loopingcall.LoopingCallDone(False) timer = loopingcall.FixedIntervalLoopingCall(_raise_it) self.assertFalse(timer.start(interval=0.5).wait()) def test_terminate_on_exception(self): def _raise_it(): raise RuntimeError() timer = loopingcall.FixedIntervalLoopingCall(_raise_it) self.assertRaises(RuntimeError, timer.start(interval=0.5).wait) def _raise_and_then_done(self): if self.num_runs == 0: raise loopingcall.LoopingCallDone(False) else: self.num_runs = self.num_runs - 1 raise RuntimeError() def test_do_not_stop_on_exception(self): self.useFixture(fixture.SleepFixture()) self.num_runs = 2 timer = loopingcall.FixedIntervalLoopingCall(self._raise_and_then_done) res = timer.start(interval=0.5, stop_on_exception=False).wait() self.assertFalse(res) def _wait_for_zero(self): """Called at an interval until num_runs == 0.""" if self.num_runs == 0: raise loopingcall.LoopingCallDone(False) else: self.num_runs = self.num_runs - 1 def test_no_double_start(self): wait_ev = greenthreading.Event() def _run_forever_until_set(): if wait_ev.is_set(): raise loopingcall.LoopingCallDone(True) timer = loopingcall.FixedIntervalLoopingCall(_run_forever_until_set) timer.start(interval=0.01) self.assertRaises(RuntimeError, timer.start, interval=0.01) wait_ev.set() timer.wait() def test_no_double_stop(self): def _raise_it(): raise loopingcall.LoopingCallDone(False) timer = loopingcall.FixedIntervalLoopingCall(_raise_it) timer.start(interval=0.5) timer.stop() timer.stop() def test_repeat(self): self.useFixture(fixture.SleepFixture()) self.num_runs = 2 timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_zero) self.assertFalse(timer.start(interval=0.5).wait()) def assertAlmostEqual(self, expected, actual, precision=7, message=None): self.assertEqual(0, round(actual - expected, precision), message) @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') @mock.patch('oslo_service.loopingcall.LoopingCallBase._elapsed') def test_interval_adjustment(self, elapsed_mock, sleep_mock): """Ensure the interval is adjusted to account for task duration.""" self.num_runs = 3 second = 1 smidgen = 0.01 elapsed_mock.side_effect = [second - smidgen, second + second, second + smidgen, ] timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_zero) timer.start(interval=1.01).wait() expected_calls = [0.02, 0.00, 0.00] for i, call in enumerate(sleep_mock.call_args_list): expected = expected_calls[i] args, kwargs = call actual = args[0] message = ('Call #%d, expected: %s, actual: %s' % (i, expected, actual)) self.assertAlmostEqual(expected, actual, message=message) def test_looping_call_timed_out(self): def _fake_task(): pass timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(_fake_task) self.assertRaises(loopingcall.LoopingCallTimeOut, timer.start(interval=0.1, timeout=0.3).wait) class DynamicLoopingCallTestCase(test_base.BaseTestCase): def setUp(self): super(DynamicLoopingCallTestCase, self).setUp() self.num_runs = 0 def test_return_true(self): def _raise_it(): raise loopingcall.LoopingCallDone(True) timer = loopingcall.DynamicLoopingCall(_raise_it) self.assertTrue(timer.start().wait()) def test_monotonic_timer(self): def _raise_it(): clock = eventlet.hubs.get_hub().clock ok = (clock == time.monotonic) raise loopingcall.LoopingCallDone(ok) timer = loopingcall.DynamicLoopingCall(_raise_it) self.assertTrue(timer.start().wait()) def test_no_double_start(self): wait_ev = greenthreading.Event() def _run_forever_until_set(): if wait_ev.is_set(): raise loopingcall.LoopingCallDone(True) else: return 0.01 timer = loopingcall.DynamicLoopingCall(_run_forever_until_set) timer.start() self.assertRaises(RuntimeError, timer.start) wait_ev.set() timer.wait() def test_return_false(self): def _raise_it(): raise loopingcall.LoopingCallDone(False) timer = loopingcall.DynamicLoopingCall(_raise_it) self.assertFalse(timer.start().wait()) def test_terminate_on_exception(self): def _raise_it(): raise RuntimeError() timer = loopingcall.DynamicLoopingCall(_raise_it) self.assertRaises(RuntimeError, timer.start().wait) def _raise_and_then_done(self): if self.num_runs == 0: raise loopingcall.LoopingCallDone(False) else: self.num_runs = self.num_runs - 1 raise RuntimeError() def test_do_not_stop_on_exception(self): self.useFixture(fixture.SleepFixture()) self.num_runs = 2 timer = loopingcall.DynamicLoopingCall(self._raise_and_then_done) timer.start(stop_on_exception=False).wait() def _wait_for_zero(self): """Called at an interval until num_runs == 0.""" if self.num_runs == 0: raise loopingcall.LoopingCallDone(False) else: self.num_runs = self.num_runs - 1 sleep_for = self.num_runs * 10 + 1 # dynamic duration return sleep_for def test_repeat(self): self.useFixture(fixture.SleepFixture()) self.num_runs = 2 timer = loopingcall.DynamicLoopingCall(self._wait_for_zero) self.assertFalse(timer.start().wait()) def _timeout_task_without_any_return(self): pass def test_timeout_task_without_return_and_max_periodic(self): timer = loopingcall.DynamicLoopingCall( self._timeout_task_without_any_return ) self.assertRaises(RuntimeError, timer.start().wait) def _timeout_task_without_return_but_with_done(self): if self.num_runs == 0: raise loopingcall.LoopingCallDone(False) else: self.num_runs = self.num_runs - 1 @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_timeout_task_without_return(self, sleep_mock): self.num_runs = 1 timer = loopingcall.DynamicLoopingCall( self._timeout_task_without_return_but_with_done ) timer.start(periodic_interval_max=5).wait() sleep_mock.assert_has_calls([mock.call(5)]) @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_interval_adjustment(self, sleep_mock): self.num_runs = 2 timer = loopingcall.DynamicLoopingCall(self._wait_for_zero) timer.start(periodic_interval_max=5).wait() sleep_mock.assert_has_calls([mock.call(5), mock.call(1)]) @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_initial_delay(self, sleep_mock): self.num_runs = 1 timer = loopingcall.DynamicLoopingCall(self._wait_for_zero) timer.start(initial_delay=3).wait() sleep_mock.assert_has_calls([mock.call(3), mock.call(1)]) class TestBackOffLoopingCall(test_base.BaseTestCase): @mock.patch('random.SystemRandom.gauss') @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_exponential_backoff(self, sleep_mock, random_mock): def false(): return False random_mock.return_value = .8 self.assertRaises(loopingcall.LoopingCallTimeOut, loopingcall.BackOffLoopingCall(false).start() .wait) expected_times = [mock.call(1.6), mock.call(2.4000000000000004), mock.call(3.6), mock.call(5.4), mock.call(8.1), mock.call(12.15), mock.call(18.225), mock.call(27.337500000000002), mock.call(41.00625), mock.call(61.509375000000006), mock.call(92.26406250000001)] self.assertEqual(expected_times, sleep_mock.call_args_list) @mock.patch('random.SystemRandom.gauss') @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_exponential_backoff_negative_value(self, sleep_mock, random_mock): def false(): return False # random.gauss() can return negative values random_mock.return_value = -.8 self.assertRaises(loopingcall.LoopingCallTimeOut, loopingcall.BackOffLoopingCall(false).start() .wait) expected_times = [mock.call(1.6), mock.call(2.4000000000000004), mock.call(3.6), mock.call(5.4), mock.call(8.1), mock.call(12.15), mock.call(18.225), mock.call(27.337500000000002), mock.call(41.00625), mock.call(61.509375000000006), mock.call(92.26406250000001)] self.assertEqual(expected_times, sleep_mock.call_args_list) @mock.patch('random.SystemRandom.gauss') @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_no_backoff(self, sleep_mock, random_mock): random_mock.return_value = 1 func = mock.Mock() # func.side_effect func.side_effect = [True, True, True, loopingcall.LoopingCallDone( retvalue='return value')] retvalue = loopingcall.BackOffLoopingCall(func).start().wait() expected_times = [mock.call(1), mock.call(1), mock.call(1)] self.assertEqual(expected_times, sleep_mock.call_args_list) self.assertEqual('return value', retvalue) @mock.patch('random.SystemRandom.gauss') @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_no_sleep(self, sleep_mock, random_mock): # Any call that executes properly the first time shouldn't sleep random_mock.return_value = 1 func = mock.Mock() # func.side_effect func.side_effect = loopingcall.LoopingCallDone(retvalue='return value') retvalue = loopingcall.BackOffLoopingCall(func).start().wait() self.assertFalse(sleep_mock.called) self.assertEqual('return value', retvalue) @mock.patch('random.SystemRandom.gauss') @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') def test_max_interval(self, sleep_mock, random_mock): def false(): return False random_mock.return_value = .8 self.assertRaises(loopingcall.LoopingCallTimeOut, loopingcall.BackOffLoopingCall(false).start( max_interval=60) .wait) expected_times = [mock.call(1.6), mock.call(2.4000000000000004), mock.call(3.6), mock.call(5.4), mock.call(8.1), mock.call(12.15), mock.call(18.225), mock.call(27.337500000000002), mock.call(41.00625), mock.call(60), mock.call(60), mock.call(60)] self.assertEqual(expected_times, sleep_mock.call_args_list) class AnException(Exception): pass class UnknownException(Exception): pass class RetryDecoratorTest(test_base.BaseTestCase): """Tests for retry decorator class.""" def test_retry(self): result = "RESULT" @loopingcall.RetryDecorator() def func(*args, **kwargs): return result self.assertEqual(result, func()) def func2(*args, **kwargs): return result retry = loopingcall.RetryDecorator() self.assertEqual(result, retry(func2)()) self.assertTrue(retry._retry_count == 0) def test_retry_with_expected_exceptions(self): result = "RESULT" responses = [AnException(None), AnException(None), result] def func(*args, **kwargs): response = responses.pop(0) if isinstance(response, Exception): raise response return response sleep_time_incr = 0.01 retry_count = 2 retry = loopingcall.RetryDecorator(10, sleep_time_incr, 10, (AnException,)) self.assertEqual(result, retry(func)()) self.assertTrue(retry._retry_count == retry_count) self.assertEqual(retry_count * sleep_time_incr, retry._sleep_time) def test_retry_with_max_retries(self): responses = [AnException(None), AnException(None), AnException(None)] def func(*args, **kwargs): response = responses.pop(0) if isinstance(response, Exception): raise response return response retry = loopingcall.RetryDecorator(2, 0, 0, (AnException,)) self.assertRaises(AnException, retry(func)) self.assertTrue(retry._retry_count == 2) def test_retry_with_unexpected_exception(self): def func(*args, **kwargs): raise UnknownException(None) retry = loopingcall.RetryDecorator() self.assertRaises(UnknownException, retry(func)) self.assertTrue(retry._retry_count == 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_periodic.py0000664000175000017500000003277200000000000023234 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for periodic_task decorator and PeriodicTasks class.""" from unittest import mock from testtools import matchers from oslo_service import periodic_task from oslo_service.tests import base class AnException(Exception): pass class PeriodicTasksTestCase(base.ServiceBaseTestCase): """Test cases for PeriodicTasks.""" @mock.patch('oslo_service.periodic_task.now') def test_called_thrice(self, mock_now): time = 340 mock_now.return_value = time # Class inside test def to mock 'now' in # the periodic task decorator class AService(periodic_task.PeriodicTasks): def __init__(self, conf): super(AService, self).__init__(conf) self.called = {'doit': 0, 'urg': 0, 'ticks': 0, 'tocks': 0} @periodic_task.periodic_task def doit(self, context): self.called['doit'] += 1 @periodic_task.periodic_task def crashit(self, context): self.called['urg'] += 1 raise AnException('urg') @periodic_task.periodic_task( spacing=10 + periodic_task.DEFAULT_INTERVAL, run_immediately=True) def doit_with_ticks(self, context): self.called['ticks'] += 1 @periodic_task.periodic_task( spacing=10 + periodic_task.DEFAULT_INTERVAL) def doit_with_tocks(self, context): self.called['tocks'] += 1 external_called = {'ext1': 0, 'ext2': 0} @periodic_task.periodic_task def ext1(self, context): external_called['ext1'] += 1 @periodic_task.periodic_task( spacing=10 + periodic_task.DEFAULT_INTERVAL) def ext2(self, context): external_called['ext2'] += 1 serv = AService(self.conf) serv.add_periodic_task(ext1) serv.add_periodic_task(ext2) serv.run_periodic_tasks(None) # Time: 340 self.assertEqual(0, serv.called['doit']) self.assertEqual(0, serv.called['urg']) # New last run will be 350 self.assertEqual(1, serv.called['ticks']) self.assertEqual(0, serv.called['tocks']) self.assertEqual(0, external_called['ext1']) self.assertEqual(0, external_called['ext2']) time = time + periodic_task.DEFAULT_INTERVAL mock_now.return_value = time serv.run_periodic_tasks(None) # Time:400 # New Last run: 420 self.assertEqual(1, serv.called['doit']) self.assertEqual(1, serv.called['urg']) # Closest multiple of 70 is 420 self.assertEqual(1, serv.called['ticks']) self.assertEqual(0, serv.called['tocks']) self.assertEqual(1, external_called['ext1']) self.assertEqual(0, external_called['ext2']) time = time + periodic_task.DEFAULT_INTERVAL / 2 mock_now.return_value = time serv.run_periodic_tasks(None) self.assertEqual(1, serv.called['doit']) self.assertEqual(1, serv.called['urg']) self.assertEqual(2, serv.called['ticks']) self.assertEqual(1, serv.called['tocks']) self.assertEqual(1, external_called['ext1']) self.assertEqual(1, external_called['ext2']) time = time + periodic_task.DEFAULT_INTERVAL mock_now.return_value = time serv.run_periodic_tasks(None) self.assertEqual(2, serv.called['doit']) self.assertEqual(2, serv.called['urg']) self.assertEqual(3, serv.called['ticks']) self.assertEqual(2, serv.called['tocks']) self.assertEqual(2, external_called['ext1']) self.assertEqual(2, external_called['ext2']) @mock.patch('oslo_service.periodic_task.now') def test_called_correct(self, mock_now): time = 360444 mock_now.return_value = time test_spacing = 9 # Class inside test def to mock 'now' in # the periodic task decorator class AService(periodic_task.PeriodicTasks): def __init__(self, conf): super(AService, self).__init__(conf) self.called = {'ticks': 0} @periodic_task.periodic_task(spacing=test_spacing) def tick(self, context): self.called['ticks'] += 1 serv = AService(self.conf) for i in range(200): serv.run_periodic_tasks(None) self.assertEqual(int(i / test_spacing), serv.called['ticks']) time += 1 mock_now.return_value = time @mock.patch('oslo_service.periodic_task.now') def test_raises(self, mock_now): time = 230000 mock_now.return_value = time class AService(periodic_task.PeriodicTasks): def __init__(self, conf): super(AService, self).__init__(conf) self.called = {'urg': 0, } @periodic_task.periodic_task def crashit(self, context): self.called['urg'] += 1 raise AnException('urg') serv = AService(self.conf) now = serv._periodic_last_run['crashit'] mock_now.return_value = now + periodic_task.DEFAULT_INTERVAL self.assertRaises(AnException, serv.run_periodic_tasks, None, raise_on_error=True) def test_name(self): class AService(periodic_task.PeriodicTasks): def __init__(self, conf): super(AService, self).__init__(conf) @periodic_task.periodic_task(name='better-name') def tick(self, context): pass @periodic_task.periodic_task def tack(self, context): pass @periodic_task.periodic_task(name='another-name') def foo(self, context): pass serv = AService(self.conf) serv.add_periodic_task(foo) self.assertIn('better-name', serv._periodic_last_run) self.assertIn('another-name', serv._periodic_last_run) self.assertIn('tack', serv._periodic_last_run) class ManagerMetaTestCase(base.ServiceBaseTestCase): """Tests for the meta class which manages creation of periodic tasks.""" def test_meta(self): class Manager(periodic_task.PeriodicTasks): @periodic_task.periodic_task def foo(self): return 'foo' @periodic_task.periodic_task(spacing=4) def bar(self): return 'bar' @periodic_task.periodic_task(enabled=False) def baz(self): return 'baz' m = Manager(self.conf) self.assertThat(m._periodic_tasks, matchers.HasLength(2)) self.assertEqual(periodic_task.DEFAULT_INTERVAL, m._periodic_spacing['foo']) self.assertEqual(4, m._periodic_spacing['bar']) self.assertThat( m._periodic_spacing, matchers.Not(matchers.Contains('baz'))) @periodic_task.periodic_task def external(): return 42 m.add_periodic_task(external) self.assertThat(m._periodic_tasks, matchers.HasLength(3)) self.assertEqual(periodic_task.DEFAULT_INTERVAL, m._periodic_spacing['external']) class ManagerTestCase(base.ServiceBaseTestCase): """Tests the periodic tasks portion of the manager class.""" def setUp(self): super(ManagerTestCase, self).setUp() def test_periodic_tasks_with_idle(self): class Manager(periodic_task.PeriodicTasks): @periodic_task.periodic_task(spacing=200) def bar(self): return 'bar' m = Manager(self.conf) self.assertThat(m._periodic_tasks, matchers.HasLength(1)) self.assertEqual(200, m._periodic_spacing['bar']) # Now a single pass of the periodic tasks idle = m.run_periodic_tasks(None) self.assertAlmostEqual(60, idle, 1) def test_periodic_tasks_constant(self): class Manager(periodic_task.PeriodicTasks): @periodic_task.periodic_task(spacing=0) def bar(self): return 'bar' m = Manager(self.conf) idle = m.run_periodic_tasks(None) self.assertAlmostEqual(60, idle, 1) @mock.patch('oslo_service.periodic_task.now') def test_periodic_tasks_idle_calculation(self, mock_now): fake_time = 32503680000.0 mock_now.return_value = fake_time class Manager(periodic_task.PeriodicTasks): @periodic_task.periodic_task(spacing=10) def bar(self, context): return 'bar' m = Manager(self.conf) # Ensure initial values are correct self.assertEqual(1, len(m._periodic_tasks)) task_name, task = m._periodic_tasks[0] # Test task values self.assertEqual('bar', task_name) self.assertEqual(10, task._periodic_spacing) self.assertTrue(task._periodic_enabled) self.assertFalse(task._periodic_external_ok) self.assertFalse(task._periodic_immediate) self.assertAlmostEqual(32503680000.0, task._periodic_last_run) # Test the manager's representation of those values self.assertEqual(10, m._periodic_spacing[task_name]) self.assertAlmostEqual(32503680000.0, m._periodic_last_run[task_name]) mock_now.return_value = fake_time + 5 idle = m.run_periodic_tasks(None) self.assertAlmostEqual(5, idle, 1) self.assertAlmostEqual(32503680000.0, m._periodic_last_run[task_name]) mock_now.return_value = fake_time + 10 idle = m.run_periodic_tasks(None) self.assertAlmostEqual(10, idle, 1) self.assertAlmostEqual(32503680010.0, m._periodic_last_run[task_name]) @mock.patch('oslo_service.periodic_task.now') def test_periodic_tasks_immediate_runs_now(self, mock_now): fake_time = 32503680000.0 mock_now.return_value = fake_time class Manager(periodic_task.PeriodicTasks): @periodic_task.periodic_task(spacing=10, run_immediately=True) def bar(self, context): return 'bar' m = Manager(self.conf) # Ensure initial values are correct self.assertEqual(1, len(m._periodic_tasks)) task_name, task = m._periodic_tasks[0] # Test task values self.assertEqual('bar', task_name) self.assertEqual(10, task._periodic_spacing) self.assertTrue(task._periodic_enabled) self.assertFalse(task._periodic_external_ok) self.assertTrue(task._periodic_immediate) self.assertIsNone(task._periodic_last_run) # Test the manager's representation of those values self.assertEqual(10, m._periodic_spacing[task_name]) self.assertIsNone(m._periodic_last_run[task_name]) idle = m.run_periodic_tasks(None) self.assertAlmostEqual(32503680000.0, m._periodic_last_run[task_name]) self.assertAlmostEqual(10, idle, 1) mock_now.return_value = fake_time + 5 idle = m.run_periodic_tasks(None) self.assertAlmostEqual(5, idle, 1) def test_periodic_tasks_disabled(self): class Manager(periodic_task.PeriodicTasks): @periodic_task.periodic_task(spacing=-1) def bar(self): return 'bar' m = Manager(self.conf) idle = m.run_periodic_tasks(None) self.assertAlmostEqual(60, idle, 1) def test_external_running_here(self): self.config(run_external_periodic_tasks=True) class Manager(periodic_task.PeriodicTasks): @periodic_task.periodic_task(spacing=200, external_process_ok=True) def bar(self): return 'bar' m = Manager(self.conf) self.assertThat(m._periodic_tasks, matchers.HasLength(1)) @mock.patch('oslo_service.periodic_task.now') @mock.patch('random.random') def test_nearest_boundary(self, mock_random, mock_now): mock_now.return_value = 19 mock_random.return_value = 0 self.assertEqual(17, periodic_task._nearest_boundary(10, 7)) mock_now.return_value = 28 self.assertEqual(27, periodic_task._nearest_boundary(13, 7)) mock_now.return_value = 1841 self.assertEqual(1837, periodic_task._nearest_boundary(781, 88)) mock_now.return_value = 1835 self.assertEqual(mock_now.return_value, periodic_task._nearest_boundary(None, 88)) # Add 5% jitter mock_random.return_value = 1.0 mock_now.return_value = 1300 self.assertEqual(1200 + 10, periodic_task._nearest_boundary(1000, 200)) # Add 2.5% jitter mock_random.return_value = 0.5 mock_now.return_value = 1300 self.assertEqual(1200 + 5, periodic_task._nearest_boundary(1000, 200)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_service.py0000664000175000017500000006407500000000000023077 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for service class""" import logging import multiprocessing import os import signal import socket import time import traceback from unittest import mock import eventlet from eventlet import event from oslotest import base as test_base from oslo_service import service from oslo_service.tests import base from oslo_service.tests import eventlet_service LOG = logging.getLogger(__name__) class ExtendedService(service.Service): def test_method(self): return 'service' class ServiceManagerTestCase(test_base.BaseTestCase): """Test cases for Services.""" def test_override_manager_method(self): serv = ExtendedService() serv.start() self.assertEqual('service', serv.test_method()) class ServiceWithTimer(service.Service): def __init__(self, ready_event=None): super(ServiceWithTimer, self).__init__() self.ready_event = ready_event def start(self): super(ServiceWithTimer, self).start() self.timer_fired = 0 self.tg.add_timer(1, self.timer_expired) def wait(self): if self.ready_event: self.ready_event.set() super(ServiceWithTimer, self).wait() def timer_expired(self): self.timer_fired = self.timer_fired + 1 class ServiceCrashOnStart(ServiceWithTimer): def start(self): super(ServiceCrashOnStart, self).start() raise ValueError class ServiceTestBase(base.ServiceBaseTestCase): """A base class for ServiceLauncherTest and ServiceRestartTest.""" def _spawn_service(self, workers=1, service_maker=None, launcher_maker=None): self.workers = workers pid = os.fork() if pid == 0: os.setsid() # NOTE(johannes): We can't let the child processes exit back # into the unit test framework since then we'll have multiple # processes running the same tests (and possibly forking more # processes that end up in the same situation). So we need # to catch all exceptions and make sure nothing leaks out, in # particular SystemExit, which is raised by sys.exit(). We use # os._exit() which doesn't have this problem. status = 0 try: serv = service_maker() if service_maker else ServiceWithTimer() if launcher_maker: launcher = launcher_maker() launcher.launch_service(serv, workers=workers) else: launcher = service.launch(self.conf, serv, workers=workers) status = launcher.wait() except SystemExit as exc: status = exc.code except BaseException: # We need to be defensive here too try: traceback.print_exc() except BaseException: print("Couldn't print traceback") status = 2 # Really exit os._exit(status or 0) return pid def _wait(self, cond, timeout): start = time.time() while not cond(): if time.time() - start > timeout: break time.sleep(.1) def setUp(self): super(ServiceTestBase, self).setUp() # NOTE(markmc): ConfigOpts.log_opt_values() uses CONF.config-file self.conf(args=[], default_config_files=[]) self.addCleanup(self.conf.reset) self.addCleanup(self._reap_pid) self.pid = 0 def _reap_pid(self): if self.pid: # Make sure all processes are stopped os.kill(self.pid, signal.SIGTERM) # Make sure we reap our test process self._reap_test() def _reap_test(self): pid, status = os.waitpid(self.pid, 0) self.pid = None return status class ServiceLauncherTest(ServiceTestBase): """Originally from nova/tests/integrated/test_multiprocess_api.py.""" def _spawn(self): self.pid = self._spawn_service(workers=2) # Wait at most 10 seconds to spawn workers cond = lambda: self.workers == len(self._get_workers()) timeout = 10 self._wait(cond, timeout) workers = self._get_workers() self.assertEqual(len(workers), self.workers) return workers def _get_workers(self): f = os.popen('ps ax -o pid,ppid,command') # Skip ps header f.readline() processes = [tuple(int(p) for p in line.strip().split()[:2]) for line in f] return [p for p, pp in processes if pp == self.pid] def test_killed_worker_recover(self): start_workers = self._spawn() # kill one worker and check if new worker can come up LOG.info('pid of first child is %s' % start_workers[0]) os.kill(start_workers[0], signal.SIGTERM) # Wait at most 5 seconds to respawn a worker cond = lambda: start_workers != self._get_workers() timeout = 5 self._wait(cond, timeout) # Make sure worker pids don't match end_workers = self._get_workers() LOG.info('workers: %r' % end_workers) self.assertNotEqual(start_workers, end_workers) def _terminate_with_signal(self, sig): self._spawn() os.kill(self.pid, sig) # Wait at most 5 seconds to kill all workers cond = lambda: not self._get_workers() timeout = 5 self._wait(cond, timeout) workers = self._get_workers() LOG.info('workers: %r' % workers) self.assertFalse(workers, 'No OS processes left.') def test_terminate_sigkill(self): self._terminate_with_signal(signal.SIGKILL) status = self._reap_test() self.assertTrue(os.WIFSIGNALED(status)) self.assertEqual(signal.SIGKILL, os.WTERMSIG(status)) def test_terminate_sigterm(self): self._terminate_with_signal(signal.SIGTERM) status = self._reap_test() self.assertTrue(os.WIFEXITED(status)) self.assertEqual(0, os.WEXITSTATUS(status)) def test_crashed_service(self): service_maker = lambda: ServiceCrashOnStart() self.pid = self._spawn_service(service_maker=service_maker) status = self._reap_test() self.assertTrue(os.WIFEXITED(status)) self.assertEqual(1, os.WEXITSTATUS(status)) def test_child_signal_sighup(self): start_workers = self._spawn() os.kill(start_workers[0], signal.SIGHUP) # Wait at most 5 seconds to respawn a worker cond = lambda: start_workers != self._get_workers() timeout = 5 self._wait(cond, timeout) # Make sure worker pids match end_workers = self._get_workers() LOG.info('workers: %r' % end_workers) self.assertEqual(start_workers, end_workers) def test_parent_signal_sighup(self): start_workers = self._spawn() os.kill(self.pid, signal.SIGHUP) def cond(): workers = self._get_workers() return (len(workers) == len(start_workers) and not set(start_workers).intersection(workers)) # Wait at most 5 seconds to respawn a worker timeout = 10 self._wait(cond, timeout) self.assertTrue(cond()) class ServiceRestartTest(ServiceTestBase): def _spawn(self): ready_event = multiprocessing.Event() service_maker = lambda: ServiceWithTimer(ready_event=ready_event) self.pid = self._spawn_service(service_maker=service_maker) return ready_event def test_service_restart(self): ready = self._spawn() timeout = 5 ready.wait(timeout) self.assertTrue(ready.is_set(), 'Service never became ready') ready.clear() os.kill(self.pid, signal.SIGHUP) ready.wait(timeout) self.assertTrue(ready.is_set(), 'Service never back after SIGHUP') def test_terminate_sigterm(self): ready = self._spawn() timeout = 5 ready.wait(timeout) self.assertTrue(ready.is_set(), 'Service never became ready') os.kill(self.pid, signal.SIGTERM) status = self._reap_test() self.assertTrue(os.WIFEXITED(status)) self.assertEqual(0, os.WEXITSTATUS(status)) def test_mutate_hook_service_launcher(self): """Test mutate_config_files is called by ServiceLauncher on SIGHUP. Not using _spawn_service because ServiceLauncher doesn't fork and it's simplest to stay all in one process. """ mutate = multiprocessing.Event() self.conf.register_mutate_hook(lambda c, f: mutate.set()) launcher = service.launch( self.conf, ServiceWithTimer(), restart_method='mutate') self.assertFalse(mutate.is_set(), "Hook was called too early") launcher.restart() self.assertTrue(mutate.is_set(), "Hook wasn't called") def test_mutate_hook_process_launcher(self): """Test mutate_config_files is called by ProcessLauncher on SIGHUP. Forks happen in _spawn_service and ProcessLauncher. So we get three tiers of processes, the top tier being the test process. self.pid refers to the middle tier, which represents our application. Both service_maker and launcher_maker execute in the middle tier. The bottom tier is the workers. The behavior we want is that when the application (middle tier) receives a SIGHUP, it catches that, calls mutate_config_files and relaunches all the workers. This causes them to inherit the mutated config. """ mutate = multiprocessing.Event() ready = multiprocessing.Event() def service_maker(): self.conf.register_mutate_hook(lambda c, f: mutate.set()) return ServiceWithTimer(ready) def launcher_maker(): return service.ProcessLauncher(self.conf, restart_method='mutate') self.pid = self._spawn_service(1, service_maker, launcher_maker) timeout = 5 ready.wait(timeout) self.assertTrue(ready.is_set(), 'Service never became ready') ready.clear() self.assertFalse(mutate.is_set(), "Hook was called too early") os.kill(self.pid, signal.SIGHUP) ready.wait(timeout) self.assertTrue(ready.is_set(), 'Service never back after SIGHUP') self.assertTrue(mutate.is_set(), "Hook wasn't called") class _Service(service.Service): def __init__(self): super(_Service, self).__init__() self.init = event.Event() self.cleaned_up = False def start(self): self.init.send() def stop(self): self.cleaned_up = True super(_Service, self).stop() class LauncherTest(base.ServiceBaseTestCase): def test_graceful_shutdown(self): # test that services are given a chance to clean up: svc = _Service() launcher = service.launch(self.conf, svc) # wait on 'init' so we know the service had time to start: svc.init.wait() launcher.stop() self.assertTrue(svc.cleaned_up) # make sure stop can be called more than once. (i.e. play nice with # unit test fixtures in nova bug #1199315) launcher.stop() @mock.patch('oslo_service.service.ServiceLauncher.launch_service') def _test_launch_single(self, workers, mock_launch): svc = service.Service() service.launch(self.conf, svc, workers=workers) mock_launch.assert_called_with(svc, workers=workers) def test_launch_none(self): self._test_launch_single(None) def test_launch_one_worker(self): self._test_launch_single(1) def test_launch_invalid_workers_number(self): svc = service.Service() for num_workers in [0, -1]: self.assertRaises(ValueError, service.launch, self.conf, svc, num_workers) for num_workers in ["0", "a", "1"]: self.assertRaises(TypeError, service.launch, self.conf, svc, num_workers) @mock.patch('signal.alarm') @mock.patch('oslo_service.service.ProcessLauncher.launch_service') def test_multiple_worker(self, mock_launch, alarm_mock): svc = service.Service() service.launch(self.conf, svc, workers=3) mock_launch.assert_called_with(svc, workers=3) def test_launch_wrong_service_base_class(self): # check that services that do not subclass service.ServiceBase # can not be launched. svc = mock.Mock() self.assertRaises(TypeError, service.launch, self.conf, svc) @mock.patch('signal.alarm') @mock.patch("oslo_service.service.Services.add") @mock.patch("oslo_service.eventlet_backdoor.initialize_if_enabled") def test_check_service_base(self, initialize_if_enabled_mock, services_mock, alarm_mock): initialize_if_enabled_mock.return_value = None launcher = service.Launcher(self.conf) serv = _Service() launcher.launch_service(serv) @mock.patch('signal.alarm') @mock.patch("oslo_service.service.Services.add") @mock.patch("oslo_service.eventlet_backdoor.initialize_if_enabled") def test_check_service_base_fails(self, initialize_if_enabled_mock, services_mock, alarm_mock): initialize_if_enabled_mock.return_value = None launcher = service.Launcher(self.conf) class FooService(object): def __init__(self): pass serv = FooService() self.assertRaises(TypeError, launcher.launch_service, serv) class ProcessLauncherTest(base.ServiceBaseTestCase): @mock.patch('signal.alarm') @mock.patch("signal.signal") def test_stop(self, signal_mock, alarm_mock): signal_mock.SIGTERM = 15 launcher = service.ProcessLauncher(self.conf) self.assertTrue(launcher.running) pid_nums = [22, 222] fakeServiceWrapper = service.ServiceWrapper(service.Service(), 1) launcher.children = {pid_nums[0]: fakeServiceWrapper, pid_nums[1]: fakeServiceWrapper} with mock.patch('oslo_service.service.os.kill') as mock_kill: with mock.patch.object(launcher, '_wait_child') as _wait_child: def fake_wait_child(): pid = pid_nums.pop() return launcher.children.pop(pid) _wait_child.side_effect = fake_wait_child with mock.patch('oslo_service.service.Service.stop') as \ mock_service_stop: mock_service_stop.side_effect = lambda: None launcher.stop() self.assertFalse(launcher.running) self.assertFalse(launcher.children) mock_kill.assert_has_calls([mock.call(222, signal_mock.SIGTERM), mock.call(22, signal_mock.SIGTERM)], any_order=True) self.assertEqual(2, mock_kill.call_count) mock_service_stop.assert_called_once_with() def test__handle_signal(self): signal_handler = service.SignalHandler() signal_handler.clear() self.assertEqual(0, len(signal_handler._signal_handlers[signal.SIGTERM])) call_1, call_2 = mock.Mock(), mock.Mock() signal_handler.add_handler('SIGTERM', call_1) signal_handler.add_handler('SIGTERM', call_2) self.assertEqual(2, len(signal_handler._signal_handlers[signal.SIGTERM])) signal_handler._handle_signal(signal.SIGTERM, 'test') # execute pending eventlet callbacks time.sleep(0) for m in signal_handler._signal_handlers[signal.SIGTERM]: m.assert_called_once_with(signal.SIGTERM, 'test') signal_handler.clear() def test_setup_signal_interruption_no_select_poll(self): # NOTE(claudiub): SignalHandler is a singleton, which means that it # might already be initialized. We need to clear to clear the cache # in order to prevent race conditions between tests. service.SignalHandler.__class__._instances.clear() with mock.patch('eventlet.patcher.original', return_value=object()) as get_original: signal_handler = service.SignalHandler() get_original.assert_called_with('select') self.addCleanup(service.SignalHandler.__class__._instances.clear) self.assertFalse( signal_handler._SignalHandler__force_interrupt_on_signal) def test_setup_signal_interruption_select_poll(self): # NOTE(claudiub): SignalHandler is a singleton, which means that it # might already be initialized. We need to clear to clear the cache # in order to prevent race conditions between tests. service.SignalHandler.__class__._instances.clear() signal_handler = service.SignalHandler() self.addCleanup(service.SignalHandler.__class__._instances.clear) self.assertTrue( signal_handler._SignalHandler__force_interrupt_on_signal) @mock.patch('signal.alarm') @mock.patch("os.kill") @mock.patch("oslo_service.service.ProcessLauncher.stop") @mock.patch("oslo_service.service.ProcessLauncher._respawn_children") @mock.patch("oslo_service.service.ProcessLauncher.handle_signal") @mock.patch("oslo_config.cfg.CONF.log_opt_values") @mock.patch("oslo_service.systemd.notify_once") @mock.patch("oslo_config.cfg.CONF.reload_config_files") @mock.patch("oslo_service.service._is_sighup_and_daemon") def test_parent_process_reload_config(self, is_sighup_and_daemon_mock, reload_config_files_mock, notify_once_mock, log_opt_values_mock, handle_signal_mock, respawn_children_mock, stop_mock, kill_mock, alarm_mock): is_sighup_and_daemon_mock.return_value = True respawn_children_mock.side_effect = [None, eventlet.greenlet.GreenletExit()] launcher = service.ProcessLauncher(self.conf) launcher.sigcaught = 1 launcher.children = {} wrap_mock = mock.Mock() launcher.children[222] = wrap_mock launcher.wait() reload_config_files_mock.assert_called_once_with() wrap_mock.service.reset.assert_called_once_with() @mock.patch("oslo_service.service.ProcessLauncher._start_child") @mock.patch("oslo_service.service.ProcessLauncher.handle_signal") @mock.patch("eventlet.greenio.GreenPipe") @mock.patch("os.pipe") def test_check_service_base(self, pipe_mock, green_pipe_mock, handle_signal_mock, start_child_mock): pipe_mock.return_value = [None, None] launcher = service.ProcessLauncher(self.conf) serv = _Service() launcher.launch_service(serv, workers=0) @mock.patch("oslo_service.service.ProcessLauncher._start_child") @mock.patch("oslo_service.service.ProcessLauncher.handle_signal") @mock.patch("eventlet.greenio.GreenPipe") @mock.patch("os.pipe") def test_check_service_base_fails(self, pipe_mock, green_pipe_mock, handle_signal_mock, start_child_mock): pipe_mock.return_value = [None, None] launcher = service.ProcessLauncher(self.conf) class FooService(object): def __init__(self): pass serv = FooService() self.assertRaises(TypeError, launcher.launch_service, serv, 0) @mock.patch("oslo_service.service.ProcessLauncher._start_child") @mock.patch("oslo_service.service.ProcessLauncher.handle_signal") @mock.patch("eventlet.greenio.GreenPipe") @mock.patch("os.pipe") def test_double_sighup(self, pipe_mock, green_pipe_mock, handle_signal_mock, start_child_mock): # Test that issuing two SIGHUPs in a row does not exit; then send a # TERM that does cause an exit. pipe_mock.return_value = [None, None] launcher = service.ProcessLauncher(self.conf) serv = _Service() launcher.launch_service(serv, workers=0) def stager(): # -1: start state # 0: post-init # 1: first HUP sent # 2: second HUP sent # 3: TERM sent stager.stage += 1 if stager.stage < 3: launcher._handle_hup(1, mock.sentinel.frame) elif stager.stage == 3: launcher._handle_term(15, mock.sentinel.frame) else: self.fail("TERM did not kill launcher") stager.stage = -1 handle_signal_mock.side_effect = stager launcher.wait() self.assertEqual(3, stager.stage) class GracefulShutdownTestService(service.Service): def __init__(self): super(GracefulShutdownTestService, self).__init__() self.finished_task = event.Event() def start(self, sleep_amount): def sleep_and_send(finish_event): time.sleep(sleep_amount) finish_event.send() self.tg.add_thread(sleep_and_send, self.finished_task) def exercise_graceful_test_service(sleep_amount, time_to_wait, graceful): svc = GracefulShutdownTestService() svc.start(sleep_amount) svc.stop(graceful) def wait_for_task(svc): svc.finished_task.wait() return eventlet.timeout.with_timeout(time_to_wait, wait_for_task, svc=svc, timeout_value="Timeout!") class ServiceTest(test_base.BaseTestCase): def test_graceful_stop(self): # Here we wait long enough for the task to gracefully finish. self.assertIsNone(exercise_graceful_test_service(1, 2, True)) def test_ungraceful_stop(self): # Here we stop ungracefully, and will never see the task finish. self.assertEqual("Timeout!", exercise_graceful_test_service(1, 2, False)) class EventletServerProcessLauncherTest(base.ServiceBaseTestCase): def setUp(self): super(EventletServerProcessLauncherTest, self).setUp() self.conf(args=[], default_config_files=[]) self.addCleanup(self.conf.reset) self.workers = 3 def run_server(self): queue = multiprocessing.Queue() # NOTE(bnemec): process_time of 5 needs to be longer than the graceful # shutdown timeout in the "exceeded" test below, but also needs to be # shorter than the timeout in the regular graceful shutdown test. proc = multiprocessing.Process(target=eventlet_service.run, args=(queue,), kwargs={'workers': self.workers, 'process_time': 5}) proc.start() port = queue.get() conn = socket.create_connection(('127.0.0.1', port)) # Send request to make the connection active. conn.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') # NOTE(blk-u): The sleep shouldn't be necessary. There must be a bug in # the server implementation where it takes some time to set up the # server or signal handlers. time.sleep(1) return (proc, conn) def test_shuts_down_on_sigint_when_client_connected(self): proc, conn = self.run_server() # check that server is live self.assertTrue(proc.is_alive()) # send SIGINT to the server and wait for it to exit while client still # connected. os.kill(proc.pid, signal.SIGINT) proc.join() conn.close() def test_graceful_shuts_down_on_sigterm_when_client_connected(self): self.config(graceful_shutdown_timeout=7) proc, conn = self.run_server() # send SIGTERM to the server and wait for it to exit while client still # connected. os.kill(proc.pid, signal.SIGTERM) # server with graceful shutdown must wait forever if # option graceful_shutdown_timeout is not specified. # we can not wait forever ... so 1 second is enough. # NOTE(bnemec): In newer versions of eventlet that drop idle # connections, this needs to be long enough to allow the signal # handler to fire but short enough that our request doesn't complete # or the connection will be closed and the server will stop. time.sleep(1) self.assertTrue(proc.is_alive()) conn.close() proc.join() def test_graceful_stop_with_exceeded_graceful_shutdown_timeout(self): # Server must exit if graceful_shutdown_timeout exceeded graceful_shutdown_timeout = 4 self.config(graceful_shutdown_timeout=graceful_shutdown_timeout) proc, conn = self.run_server() time_before = time.time() os.kill(proc.pid, signal.SIGTERM) self.assertTrue(proc.is_alive()) proc.join() self.assertFalse(proc.is_alive()) time_after = time.time() self.assertTrue(time_after - time_before > graceful_shutdown_timeout) class EventletServerServiceLauncherTest(EventletServerProcessLauncherTest): def setUp(self): super(EventletServerServiceLauncherTest, self).setUp() self.workers = 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_sslutils.py0000664000175000017500000001321100000000000023303 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ssl from unittest import mock from oslo_config import cfg from oslo_service import sslutils from oslo_service.tests import base CONF = cfg.CONF SSL_CERT_DIR = os.path.normpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), 'ssl_cert')) class SslutilsTestCase(base.ServiceBaseTestCase): """Test cases for sslutils.""" def setUp(self): super(SslutilsTestCase, self).setUp() self.cert_file_name = os.path.join(SSL_CERT_DIR, 'certificate.crt') self.key_file_name = os.path.join(SSL_CERT_DIR, 'privatekey.key') self.ca_file_name = os.path.join(SSL_CERT_DIR, 'ca.crt') @mock.patch("%s.RuntimeError" % RuntimeError.__module__) @mock.patch("os.path.exists") def test_is_enabled(self, exists_mock, runtime_error_mock): exists_mock.return_value = True self.conf.set_default("cert_file", self.cert_file_name, group=sslutils.config_section) self.conf.set_default("key_file", self.key_file_name, group=sslutils.config_section) self.conf.set_default("ca_file", self.ca_file_name, group=sslutils.config_section) sslutils.is_enabled(self.conf) self.assertFalse(runtime_error_mock.called) @mock.patch("os.path.exists") def test_is_enabled_no_ssl_cert_file_fails(self, exists_mock): exists_mock.side_effect = [False] self.conf.set_default("cert_file", "/no/such/file", group=sslutils.config_section) self.assertRaises(RuntimeError, sslutils.is_enabled, self.conf) @mock.patch("os.path.exists") def test_is_enabled_no_ssl_key_file_fails(self, exists_mock): exists_mock.side_effect = [True, False] self.conf.set_default("cert_file", self.cert_file_name, group=sslutils.config_section) self.conf.set_default("key_file", "/no/such/file", group=sslutils.config_section) self.assertRaises(RuntimeError, sslutils.is_enabled, self.conf) @mock.patch("os.path.exists") def test_is_enabled_no_ssl_ca_file_fails(self, exists_mock): exists_mock.side_effect = [True, True, False] self.conf.set_default("cert_file", self.cert_file_name, group=sslutils.config_section) self.conf.set_default("key_file", self.key_file_name, group=sslutils.config_section) self.conf.set_default("ca_file", "/no/such/file", group=sslutils.config_section) self.assertRaises(RuntimeError, sslutils.is_enabled, self.conf) @mock.patch("ssl.wrap_socket") @mock.patch("os.path.exists") def _test_wrap(self, exists_mock, wrap_socket_mock, **kwargs): exists_mock.return_value = True sock = mock.Mock() self.conf.set_default("cert_file", self.cert_file_name, group=sslutils.config_section) self.conf.set_default("key_file", self.key_file_name, group=sslutils.config_section) ssl_kwargs = {'server_side': True, 'certfile': self.conf.ssl.cert_file, 'keyfile': self.conf.ssl.key_file, 'cert_reqs': ssl.CERT_NONE, } if kwargs: ssl_kwargs.update(**kwargs) sslutils.wrap(self.conf, sock) wrap_socket_mock.assert_called_once_with(sock, **ssl_kwargs) def test_wrap(self): self._test_wrap() def test_wrap_ca_file(self): self.conf.set_default("ca_file", self.ca_file_name, group=sslutils.config_section) ssl_kwargs = {'ca_certs': self.conf.ssl.ca_file, 'cert_reqs': ssl.CERT_REQUIRED } self._test_wrap(**ssl_kwargs) def test_wrap_ciphers(self): self.conf.set_default("ca_file", self.ca_file_name, group=sslutils.config_section) ciphers = ( 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+' 'AES:ECDH+HIGH:DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:' 'RSA+HIGH:RSA+3DES:!aNULL:!eNULL:!MD5:!DSS:!RC4' ) self.conf.set_default("ciphers", ciphers, group=sslutils.config_section) ssl_kwargs = {'ca_certs': self.conf.ssl.ca_file, 'cert_reqs': ssl.CERT_REQUIRED, 'ciphers': ciphers} self._test_wrap(**ssl_kwargs) def test_wrap_ssl_version(self): self.conf.set_default("ca_file", self.ca_file_name, group=sslutils.config_section) self.conf.set_default("version", "tlsv1", group=sslutils.config_section) ssl_kwargs = {'ca_certs': self.conf.ssl.ca_file, 'cert_reqs': ssl.CERT_REQUIRED, 'ssl_version': ssl.PROTOCOL_TLSv1} self._test_wrap(**ssl_kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_systemd.py0000664000175000017500000000502400000000000023114 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket from unittest import mock from oslotest import base as test_base from oslo_service import systemd class SystemdTestCase(test_base.BaseTestCase): """Test case for Systemd service readiness.""" def test__abstractify(self): sock_name = '@fake_socket' res = systemd._abstractify(sock_name) self.assertEqual('\0{0}'.format(sock_name[1:]), res) @mock.patch.object(os, 'getenv', return_value='@fake_socket') def _test__sd_notify(self, getenv_mock, unset_env=False): self.ready = False self.closed = False class FakeSocket(object): def __init__(self, family, type): pass def connect(fs, socket): pass def close(fs): self.closed = True def sendall(fs, data): if data == b'READY=1': self.ready = True with mock.patch.object(socket, 'socket', new=FakeSocket): if unset_env: systemd.notify_once() else: systemd.notify() self.assertTrue(self.ready) self.assertTrue(self.closed) def test_notify(self): self._test__sd_notify() def test_notify_once(self): os.environ['NOTIFY_SOCKET'] = '@fake_socket' self._test__sd_notify(unset_env=True) self.assertRaises(KeyError, os.environ.__getitem__, 'NOTIFY_SOCKET') @mock.patch("socket.socket") def test_onready(self, sock_mock): recv_results = [b'READY=1', '', socket.timeout] expected_results = [0, 1, 2] for recv, expected in zip(recv_results, expected_results): if recv == socket.timeout: sock_mock.return_value.recv.side_effect = recv else: sock_mock.return_value.recv.return_value = recv actual = systemd.onready('@fake_socket', 1) self.assertEqual(expected, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_threadgroup.py0000664000175000017500000001406300000000000023753 0ustar00zuulzuul00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for thread groups """ import time from eventlet import event from oslotest import base as test_base from oslo_service import threadgroup class ThreadGroupTestCase(test_base.BaseTestCase): """Test cases for thread group.""" def setUp(self): super(ThreadGroupTestCase, self).setUp() self.tg = threadgroup.ThreadGroup() self.addCleanup(self.tg.stop) def test_add_dynamic_timer(self): def foo(*args, **kwargs): pass initial_delay = 1 periodic_interval_max = 2 self.tg.add_dynamic_timer(foo, initial_delay, periodic_interval_max, 'arg', kwarg='kwarg') self.assertEqual(1, len(self.tg.timers)) timer = self.tg.timers[0] self.assertTrue(timer._running) self.assertEqual(('arg',), timer.args) self.assertEqual({'kwarg': 'kwarg'}, timer.kw) def test_add_dynamic_timer_args(self): def foo(*args, **kwargs): pass self.tg.add_dynamic_timer_args(foo, ['arg'], {'kwarg': 'kwarg'}, initial_delay=1, periodic_interval_max=2, stop_on_exception=False) self.assertEqual(1, len(self.tg.timers)) timer = self.tg.timers[0] self.assertTrue(timer._running) self.assertEqual(('arg',), timer.args) self.assertEqual({'kwarg': 'kwarg'}, timer.kw) def test_add_timer(self): def foo(*args, **kwargs): pass self.tg.add_timer(1, foo, 1, 'arg', kwarg='kwarg') self.assertEqual(1, len(self.tg.timers)) timer = self.tg.timers[0] self.assertTrue(timer._running) self.assertEqual(('arg',), timer.args) self.assertEqual({'kwarg': 'kwarg'}, timer.kw) def test_add_timer_args(self): def foo(*args, **kwargs): pass self.tg.add_timer_args(1, foo, ['arg'], {'kwarg': 'kwarg'}, initial_delay=1, stop_on_exception=False) self.assertEqual(1, len(self.tg.timers)) timer = self.tg.timers[0] self.assertTrue(timer._running) self.assertEqual(('arg',), timer.args) self.assertEqual({'kwarg': 'kwarg'}, timer.kw) def test_stop_current_thread(self): stop_event = event.Event() quit_event = event.Event() def stop_self(*args, **kwargs): if args[0] == 1: time.sleep(1) self.tg.stop() stop_event.send('stop_event') quit_event.wait() for i in range(0, 4): self.tg.add_thread(stop_self, i, kwargs='kwargs') stop_event.wait() self.assertEqual(1, len(self.tg.threads)) quit_event.send('quit_event') def test_stop_immediately(self): def foo(*args, **kwargs): time.sleep(1) start_time = time.time() self.tg.add_thread(foo, 'arg', kwarg='kwarg') time.sleep(0) self.tg.stop() end_time = time.time() self.assertEqual(0, len(self.tg.threads)) self.assertTrue(end_time - start_time < 1) self.assertEqual(0, len(self.tg.timers)) def test_stop_gracefully(self): def foo(*args, **kwargs): time.sleep(1) start_time = time.time() self.tg.add_thread(foo, 'arg', kwarg='kwarg') self.tg.stop(True) end_time = time.time() self.assertEqual(0, len(self.tg.threads)) self.assertTrue(end_time - start_time >= 1) self.assertEqual(0, len(self.tg.timers)) def test_cancel_early(self): def foo(*args, **kwargs): time.sleep(1) self.tg.add_thread(foo, 'arg', kwarg='kwarg') self.tg.cancel() self.assertEqual(0, len(self.tg.threads)) def test_cancel_late(self): def foo(*args, **kwargs): time.sleep(0.3) self.tg.add_thread(foo, 'arg', kwarg='kwarg') time.sleep(0) self.tg.cancel() self.assertEqual(1, len(self.tg.threads)) def test_cancel_timeout(self): def foo(*args, **kwargs): time.sleep(0.3) self.tg.add_thread(foo, 'arg', kwarg='kwarg') time.sleep(0) self.tg.cancel(timeout=0.2, wait_time=0.1) self.assertEqual(0, len(self.tg.threads)) def test_stop_timers(self): def foo(*args, **kwargs): pass self.tg.add_timer('1234', foo) self.assertEqual(1, len(self.tg.timers)) self.tg.stop_timers() self.assertEqual(0, len(self.tg.timers)) def test_add_and_remove_timer(self): def foo(*args, **kwargs): pass timer = self.tg.add_timer('1234', foo) self.assertEqual(1, len(self.tg.timers)) timer.stop() self.assertEqual(1, len(self.tg.timers)) self.tg.timer_done(timer) self.assertEqual(0, len(self.tg.timers)) def test_add_and_remove_dynamic_timer(self): def foo(*args, **kwargs): pass initial_delay = 1 periodic_interval_max = 2 timer = self.tg.add_dynamic_timer(foo, initial_delay, periodic_interval_max) self.assertEqual(1, len(self.tg.timers)) self.assertTrue(timer._running) timer.stop() self.assertEqual(1, len(self.tg.timers)) self.tg.timer_done(timer) self.assertEqual(0, len(self.tg.timers)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/tests/test_wsgi.py0000664000175000017500000003412300000000000022377 0ustar00zuulzuul00000000000000# Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for `wsgi`.""" import os import platform import socket import tempfile import testtools from unittest import mock import eventlet import eventlet.wsgi import requests import webob from oslo_config import cfg from oslo_service import sslutils from oslo_service.tests import base from oslo_service import wsgi from oslo_utils import netutils SSL_CERT_DIR = os.path.normpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), 'ssl_cert')) CONF = cfg.CONF class WsgiTestCase(base.ServiceBaseTestCase): """Base class for WSGI tests.""" def setUp(self): super(WsgiTestCase, self).setUp() self.conf(args=[], default_config_files=[]) class TestLoaderNothingExists(WsgiTestCase): """Loader tests where os.path.exists always returns False.""" def setUp(self): super(TestLoaderNothingExists, self).setUp() mock_patcher = mock.patch.object(os.path, 'exists', lambda _: False) mock_patcher.start() self.addCleanup(mock_patcher.stop) def test_relpath_config_not_found(self): self.config(api_paste_config='api-paste.ini') self.assertRaises( wsgi.ConfigNotFound, wsgi.Loader, self.conf ) def test_asbpath_config_not_found(self): self.config(api_paste_config='/etc/openstack-srv/api-paste.ini') self.assertRaises( wsgi.ConfigNotFound, wsgi.Loader, self.conf ) class TestLoaderNormalFilesystem(WsgiTestCase): """Loader tests with normal filesystem (unmodified os.path module).""" _paste_config = """ [app:test_app] use = egg:Paste#static document_root = /tmp """ def setUp(self): super(TestLoaderNormalFilesystem, self).setUp() self.paste_config = tempfile.NamedTemporaryFile(mode="w+t") self.paste_config.write(self._paste_config.lstrip()) self.paste_config.seek(0) self.paste_config.flush() self.config(api_paste_config=self.paste_config.name) self.loader = wsgi.Loader(CONF) def test_config_found(self): self.assertEqual(self.paste_config.name, self.loader.config_path) def test_app_not_found(self): self.assertRaises( wsgi.PasteAppNotFound, self.loader.load_app, "nonexistent app", ) def test_app_found(self): url_parser = self.loader.load_app("test_app") self.assertEqual("/tmp", url_parser.directory) def tearDown(self): self.paste_config.close() super(TestLoaderNormalFilesystem, self).tearDown() class TestWSGIServer(WsgiTestCase): """WSGI server tests.""" def setUp(self): super(TestWSGIServer, self).setUp() def test_no_app(self): server = wsgi.Server(self.conf, "test_app", None) self.assertEqual("test_app", server.name) def test_custom_max_header_line(self): self.config(max_header_line=4096) # Default value is 16384 wsgi.Server(self.conf, "test_custom_max_header_line", None) self.assertEqual(eventlet.wsgi.MAX_HEADER_LINE, self.conf.max_header_line) def test_start_random_port(self): server = wsgi.Server(self.conf, "test_random_port", None, host="127.0.0.1", port=0) server.start() self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(not netutils.is_ipv6_enabled(), "no ipv6 support") def test_start_random_port_with_ipv6(self): server = wsgi.Server(self.conf, "test_random_port", None, host="::1", port=0) server.start() self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(platform.mac_ver()[0] != '', 'SO_REUSEADDR behaves differently ' 'on OSX, see bug 1436895') def test_socket_options_for_simple_server(self): # test normal socket options has set properly self.config(tcp_keepidle=500) server = wsgi.Server(self.conf, "test_socket_options", None, host="127.0.0.1", port=0) server.start() sock = server.socket self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual(self.conf.tcp_keepidle, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)) self.assertFalse(server._server.dead) server.stop() server.wait() self.assertTrue(server._server.dead) @testtools.skipIf(not hasattr(socket, "AF_UNIX"), 'UNIX sockets not supported') def test_server_with_unix_socket(self): socket_file = self.get_temp_file_path('sock') socket_mode = 0o644 server = wsgi.Server(self.conf, "test_socket_options", None, socket_family=socket.AF_UNIX, socket_mode=socket_mode, socket_file=socket_file) self.assertEqual(socket_file, server.socket.getsockname()) self.assertEqual(socket_mode, os.stat(socket_file).st_mode & 0o777) server.start() self.assertFalse(server._server.dead) server.stop() server.wait() self.assertTrue(server._server.dead) def test_server_pool_waitall(self): # test pools waitall method gets called while stopping server server = wsgi.Server(self.conf, "test_server", None, host="127.0.0.1") server.start() with mock.patch.object(server._pool, 'waitall') as mock_waitall: server.stop() server.wait() mock_waitall.assert_called_once_with() def test_uri_length_limit(self): eventlet.monkey_patch(os=False, thread=False) server = wsgi.Server(self.conf, "test_uri_length_limit", None, host="127.0.0.1", max_url_len=16384, port=33337) server.start() self.assertFalse(server._server.dead) uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x') resp = requests.get(uri, proxies={"http": ""}) eventlet.sleep(0) self.assertNotEqual(requests.codes.REQUEST_URI_TOO_LARGE, resp.status_code) uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x') resp = requests.get(uri, proxies={"http": ""}) eventlet.sleep(0) self.assertEqual(requests.codes.REQUEST_URI_TOO_LARGE, resp.status_code) server.stop() server.wait() def test_reset_pool_size_to_default(self): server = wsgi.Server(self.conf, "test_resize", None, host="127.0.0.1", max_url_len=16384) server.start() # Stopping the server, which in turn sets pool size to 0 server.stop() self.assertEqual(0, server._pool.size) # Resetting pool size to default server.reset() server.start() self.assertEqual(CONF.wsgi_default_pool_size, server._pool.size) def test_client_socket_timeout(self): self.config(client_socket_timeout=5) # mocking eventlet spawn method to check it is called with # configured 'client_socket_timeout' value. with mock.patch.object(eventlet, 'spawn') as mock_spawn: server = wsgi.Server(self.conf, "test_app", None, host="127.0.0.1", port=0) server.start() _, kwargs = mock_spawn.call_args self.assertEqual(self.conf.client_socket_timeout, kwargs['socket_timeout']) server.stop() def test_wsgi_keep_alive(self): self.config(wsgi_keep_alive=False) # mocking eventlet spawn method to check it is called with # configured 'wsgi_keep_alive' value. with mock.patch.object(eventlet, 'spawn') as mock_spawn: server = wsgi.Server(self.conf, "test_app", None, host="127.0.0.1", port=0) server.start() _, kwargs = mock_spawn.call_args self.assertEqual(self.conf.wsgi_keep_alive, kwargs['keepalive']) server.stop() def requesting(host, port, ca_certs=None, method="POST", content_type="application/x-www-form-urlencoded", address_familly=socket.AF_INET): frame = bytes("{verb} / HTTP/1.1\r\n\r\n".format(verb=method), "utf-8") with socket.socket(address_familly, socket.SOCK_STREAM) as sock: if ca_certs: with eventlet.wrap_ssl(sock, ca_certs=ca_certs) as wrappedSocket: wrappedSocket.connect((host, port)) wrappedSocket.send(frame) data = wrappedSocket.recv(1024).decode() return data else: sock.connect((host, port)) sock.send(frame) data = sock.recv(1024).decode() return data class TestWSGIServerWithSSL(WsgiTestCase): """WSGI server with SSL tests.""" def setUp(self): super(TestWSGIServerWithSSL, self).setUp() cert_file_name = os.path.join(SSL_CERT_DIR, 'certificate.crt') key_file_name = os.path.join(SSL_CERT_DIR, 'privatekey.key') eventlet.monkey_patch(os=False, thread=False) self.host = "127.0.0.1" self.config(cert_file=cert_file_name, key_file=key_file_name, group=sslutils.config_section) def test_ssl_server(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = wsgi.Server(self.conf, "fake_ssl", test_app, host=self.host, port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) response = requesting( method='GET', host=self.host, port=fake_ssl_server.port, ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'), ) self.assertEqual('PONG', response[-4:]) fake_ssl_server.stop() fake_ssl_server.wait() def test_two_servers(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = wsgi.Server(self.conf, "fake_ssl", test_app, host="127.0.0.1", port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) fake_server = wsgi.Server(self.conf, "fake", test_app, host="127.0.0.1", port=0) fake_server.start() self.assertNotEqual(0, fake_server.port) response = requesting( method='GET', host='127.0.0.1', port=fake_ssl_server.port, ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'), ) self.assertEqual('PONG', response[-4:]) response = requesting( method='GET', host='127.0.0.1', port=fake_server.port, ) self.assertEqual('PONG', response[-4:]) fake_ssl_server.stop() fake_ssl_server.wait() fake_server.stop() fake_server.wait() @testtools.skipIf(platform.mac_ver()[0] != '', 'SO_REUSEADDR behaves differently ' 'on OSX, see bug 1436895') def test_socket_options_for_ssl_server(self): # test normal socket options has set properly self.config(tcp_keepidle=500) server = wsgi.Server(self.conf, "test_socket_options", None, host="127.0.0.1", port=0, use_ssl=True) server.start() sock = server.socket self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual(CONF.tcp_keepidle, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)) server.stop() server.wait() def test_app_using_ipv6_and_ssl(self): greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = wsgi.Server(self.conf, "fake_ssl", hello_world, host="::1", port=0, use_ssl=True) server.start() response = requesting( method='GET', host='::1', port=server.port, ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'), address_familly=socket.AF_INET6 ) self.assertEqual(greetings, response[-15:]) server.stop() server.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/threadgroup.py0000664000175000017500000004315100000000000021552 0ustar00zuulzuul00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import threading import warnings from debtcollector import removals import eventlet from eventlet import greenpool from oslo_service import loopingcall from oslo_utils import timeutils LOG = logging.getLogger(__name__) def _on_thread_done(_greenthread, group, thread): """Callback function to be passed to GreenThread.link() when we spawn(). Calls the :class:`ThreadGroup` to notify it to remove this thread from the associated group. """ group.thread_done(thread) class Thread(object): """Wrapper around a greenthread. Holds a reference to the :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when it has done so it can be removed from the threads list. """ def __init__(self, thread, group, link=True): self.thread = thread if link: self.thread.link(_on_thread_done, group, self) self._ident = id(thread) @property def ident(self): return self._ident def stop(self): """Kill the thread by raising GreenletExit within it.""" self.thread.kill() def wait(self): """Block until the thread completes and return the result.""" return self.thread.wait() def link(self, func, *args, **kwargs): """Schedule a function to be run upon completion of the thread.""" self.thread.link(func, *args, **kwargs) def cancel(self, *throw_args): """Prevent the thread from starting if it has not already done so. :param throw_args: the `exc_info` data to raise from :func:`wait`. """ self.thread.cancel(*throw_args) class ThreadGroup(object): """A group of greenthreads and timers. The point of the ThreadGroup class is to: * keep track of timers and greenthreads (making it easier to stop them when need be). * provide an easy API to add timers. .. note:: The API is inconsistent, confusing, and not orthogonal. The same verbs often mean different things when applied to timers and threads, respectively. Read the documentation carefully. """ def __init__(self, thread_pool_size=10): """Create a ThreadGroup with a pool of greenthreads. :param thread_pool_size: the maximum number of threads allowed to run concurrently. """ self.pool = greenpool.GreenPool(thread_pool_size) self.threads = [] self.timers = [] def add_dynamic_timer(self, callback, initial_delay=None, periodic_interval_max=None, *args, **kwargs): """Add a timer that controls its own period dynamically. The period of each iteration of the timer is controlled by the return value of the callback function on the previous iteration. .. warning:: Passing arguments to the callback function is deprecated. Use the :func:`add_dynamic_timer_args` method to pass arguments for the callback function. :param callback: The callback function to run when the timer is triggered. :param initial_delay: The delay in seconds before first triggering the timer. If not set, the timer is liable to be scheduled immediately. :param periodic_interval_max: The maximum interval in seconds to allow the callback function to request. If provided, this is also used as the default delay if None is returned by the callback function. :returns: an :class:`oslo_service.loopingcall.DynamicLoopingCall` instance """ if args or kwargs: warnings.warn("Calling add_dynamic_timer() with arguments to the " "callback function is deprecated. Use " "add_dynamic_timer_args() instead.", DeprecationWarning) return self.add_dynamic_timer_args( callback, args, kwargs, initial_delay=initial_delay, periodic_interval_max=periodic_interval_max) def add_dynamic_timer_args(self, callback, args=None, kwargs=None, initial_delay=None, periodic_interval_max=None, stop_on_exception=True): """Add a timer that controls its own period dynamically. The period of each iteration of the timer is controlled by the return value of the callback function on the previous iteration. :param callback: The callback function to run when the timer is triggered. :param args: A list of positional args to the callback function. :param kwargs: A dict of keyword args to the callback function. :param initial_delay: The delay in seconds before first triggering the timer. If not set, the timer is liable to be scheduled immediately. :param periodic_interval_max: The maximum interval in seconds to allow the callback function to request. If provided, this is also used as the default delay if None is returned by the callback function. :param stop_on_exception: Pass ``False`` to have the timer continue running even if the callback function raises an exception. :returns: an :class:`oslo_service.loopingcall.DynamicLoopingCall` instance """ args = args or [] kwargs = kwargs or {} timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) timer.start(initial_delay=initial_delay, periodic_interval_max=periodic_interval_max, stop_on_exception=stop_on_exception) self.timers.append(timer) return timer def add_timer(self, interval, callback, initial_delay=None, *args, **kwargs): """Add a timer with a fixed period. .. warning:: Passing arguments to the callback function is deprecated. Use the :func:`add_timer_args` method to pass arguments for the callback function. :param interval: The minimum period in seconds between calls to the callback function. :param callback: The callback function to run when the timer is triggered. :param initial_delay: The delay in seconds before first triggering the timer. If not set, the timer is liable to be scheduled immediately. :returns: an :class:`oslo_service.loopingcall.FixedIntervalLoopingCall` instance """ if args or kwargs: warnings.warn("Calling add_timer() with arguments to the callback " "function is deprecated. Use add_timer_args() " "instead.", DeprecationWarning) return self.add_timer_args(interval, callback, args, kwargs, initial_delay=initial_delay) def add_timer_args(self, interval, callback, args=None, kwargs=None, initial_delay=None, stop_on_exception=True): """Add a timer with a fixed period. :param interval: The minimum period in seconds between calls to the callback function. :param callback: The callback function to run when the timer is triggered. :param args: A list of positional args to the callback function. :param kwargs: A dict of keyword args to the callback function. :param initial_delay: The delay in seconds before first triggering the timer. If not set, the timer is liable to be scheduled immediately. :param stop_on_exception: Pass ``False`` to have the timer continue running even if the callback function raises an exception. :returns: an :class:`oslo_service.loopingcall.FixedIntervalLoopingCall` instance """ args = args or [] kwargs = kwargs or {} pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) pulse.start(interval=interval, initial_delay=initial_delay, stop_on_exception=stop_on_exception) self.timers.append(pulse) return pulse def add_thread(self, callback, *args, **kwargs): """Spawn a new thread. This call will block until capacity is available in the thread pool. After that, it returns immediately (i.e. *before* the new thread is scheduled). :param callback: the function to run in the new thread. :param args: positional arguments to the callback function. :param kwargs: keyword arguments to the callback function. :returns: a :class:`Thread` object """ gt = self.pool.spawn(callback, *args, **kwargs) th = Thread(gt, self, link=False) self.threads.append(th) gt.link(_on_thread_done, self, th) return th def thread_done(self, thread): """Remove a completed thread from the group. This method is automatically called on completion of a thread in the group, and should not be called explicitly. """ self.threads.remove(thread) def timer_done(self, timer): """Remove a timer from the group. :param timer: The timer object returned from :func:`add_timer` or its analogues. """ self.timers.remove(timer) def _perform_action_on_threads(self, action_func, on_error_func): current = threading.current_thread() # Iterate over a copy of self.threads so thread_done doesn't # modify the list while we're iterating for x in self.threads[:]: if x.ident == current.ident: # Don't perform actions on the current thread. continue try: action_func(x) except eventlet.greenlet.GreenletExit: # nosec # greenlet exited successfully pass except Exception: on_error_func(x) def _stop_threads(self): self._perform_action_on_threads( lambda x: x.stop(), lambda x: LOG.exception('Error stopping thread.')) def stop_timers(self, wait=False): """Stop all timers in the group and remove them from the group. No new invocations of timers will be triggered after they are stopped, but calls that are in progress will not be interrupted. To wait for in-progress calls to complete, pass ``wait=True`` - calling :func:`wait` will not have the desired effect as the timers will have already been removed from the group. :param wait: If true, block until all timers have been stopped before returning. """ for timer in self.timers: timer.stop() if wait: self._wait_timers() self.timers = [] def stop(self, graceful=False): """Stop all timers and threads in the group. No new invocations of timers will be triggered after they are stopped, but calls that are in progress will not be interrupted. If ``graceful`` is false, kill all threads immediately by raising GreenletExit. Note that in this case, this method will **not** block until all threads and running timer callbacks have actually exited. To guarantee that all threads have exited, call :func:`wait`. If ``graceful`` is true, do not kill threads. Block until all threads and running timer callbacks have completed. This is equivalent to calling :func:`stop_timers` with ``wait=True`` followed by :func:`wait`. :param graceful: If true, block until all timers have stopped and all threads completed; never kill threads. Otherwise, kill threads immediately and return immediately even if there are timer callbacks still running. """ self.stop_timers(wait=graceful) if graceful: # In case of graceful=True, wait for all threads to be # finished, never kill threads self._wait_threads() else: # In case of graceful=False(Default), kill threads # immediately self._stop_threads() def _wait_timers(self): for x in self.timers: try: x.wait() except eventlet.greenlet.GreenletExit: # nosec # greenlet exited successfully pass except Exception: LOG.exception('Error waiting on timer.') def _wait_threads(self): self._perform_action_on_threads( lambda x: x.wait(), lambda x: LOG.exception('Error waiting on thread.')) def wait(self): """Block until all timers and threads in the group are complete. .. note:: Before calling this method, any timers should be stopped first by calling :func:`stop_timers`, :func:`stop`, or :func:`cancel` with a ``timeout`` argument. Otherwise this will block forever. .. note:: Calling :func:`stop_timers` removes the timers from the group, so a subsequent call to this method will not wait for any in-progress timer calls to complete. Any exceptions raised by the threads will be logged but suppressed. .. note:: This call guarantees only that the threads themselves have completed, **not** that any cleanup functions added via :func:`Thread.link` have completed. """ self._wait_timers() self._wait_threads() def _any_threads_alive(self): current = threading.current_thread() for x in self.threads[:]: if x.ident == current.ident: # Don't check current thread. continue if not x.thread.dead: return True return False @removals.remove(removal_version='?') def cancel(self, *throw_args, **kwargs): """Cancel unstarted threads in the group, and optionally stop the rest. .. warning:: This method is deprecated and should not be used. It will be removed in a future release. If called without the ``timeout`` argument, this does **not** stop any running threads, but prevents any threads in the group that have not yet started from running, then returns immediately. Timers are not affected. If the 'timeout' argument is supplied, then it serves as a grace period to allow running threads to finish. After the timeout, any threads in the group that are still running will be killed by raising GreenletExit in them, and all timers will be stopped (so that they are not retriggered - timer calls that are in progress will not be interrupted). This method will **not** block until all threads have actually exited, nor that all in-progress timer calls have completed. To guarantee that all threads have exited, call :func:`wait`. If all threads complete before the timeout expires, timers will be left running; there is no way to then stop those timers, so for consistent behaviour :func`stop_timers` should be called before calling this method. :param throw_args: the `exc_info` data to raise from :func:`Thread.wait` for any of the unstarted threads. (Though note that :func:`ThreadGroup.wait` suppresses exceptions.) :param timeout: time to wait for running threads to complete before calling stop(). If not supplied, threads that are already running continue to completion. :param wait_time: length of time in seconds to sleep between checks of whether any threads are still alive. (Default 1s.) """ self._perform_action_on_threads( lambda x: x.cancel(*throw_args), lambda x: LOG.exception('Error canceling thread.')) timeout = kwargs.get('timeout', None) if timeout is None: return wait_time = kwargs.get('wait_time', 1) watch = timeutils.StopWatch(duration=timeout) watch.start() while self._any_threads_alive(): if not watch.expired(): eventlet.sleep(wait_time) continue LOG.debug("Cancel timeout reached, stopping threads.") self.stop() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/version.py0000664000175000017500000000126400000000000020712 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('oslo.service') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/oslo_service/wsgi.py0000664000175000017500000003032000000000000020171 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" import copy import os import socket import eventlet import eventlet.wsgi import greenlet from paste import deploy import routes.middleware import webob.dec import webob.exc from oslo_log import log as logging from oslo_service._i18n import _ from oslo_service import _options from oslo_service import service from oslo_service import sslutils LOG = logging.getLogger(__name__) def list_opts(): """Entry point for oslo-config-generator.""" return [(None, copy.deepcopy(_options.wsgi_opts))] def register_opts(conf): """Registers WSGI config options.""" return conf.register_opts(_options.wsgi_opts) class InvalidInput(Exception): message = _("Invalid input received: " "Unexpected argument for periodic task creation: %(arg)s.") class Server(service.ServiceBase): """Server class to manage a WSGI server, serving a WSGI application.""" # TODO(eezhova): Consider changing the default host value to prevent # possible binding to all interfaces. The most appropriate value seems # to be 127.0.0.1, but it has to be verified that the change wouldn't # break any consuming project. def __init__(self, conf, name, app, host='0.0.0.0', port=0, # nosec pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128, use_ssl=False, max_url_len=None, logger_name='eventlet.wsgi.server', socket_family=None, socket_file=None, socket_mode=None): """Initialize, but do not start, a WSGI server. :param conf: Instance of ConfigOpts. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param protocol: Protocol class. :param backlog: Maximum number of queued connections. :param use_ssl: Wraps the socket in an SSL context if True. :param max_url_len: Maximum length of permitted URLs. :param logger_name: The name for the logger. :param socket_family: Socket family. :param socket_file: location of UNIX socket. :param socket_mode: UNIX socket mode. :returns: None :raises: InvalidInput :raises: EnvironmentError """ self.conf = conf self.conf.register_opts(_options.wsgi_opts) self.default_pool_size = self.conf.wsgi_default_pool_size # Allow operators to customize http requests max header line size. eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line self.name = name self.app = app self._server = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = logging.getLogger(logger_name) self._use_ssl = use_ssl self._max_url_len = max_url_len self.client_socket_timeout = conf.client_socket_timeout or None if backlog < 1: raise InvalidInput(reason=_('The backlog must be more than 0')) if not socket_family or socket_family in [socket.AF_INET, socket.AF_INET6]: self.socket = self._get_socket(host, port, backlog) elif hasattr(socket, "AF_UNIX") and socket_family == socket.AF_UNIX: self.socket = self._get_unix_socket(socket_file, socket_mode, backlog) else: raise ValueError(_("Unsupported socket family: %s"), socket_family) (self.host, self.port) = self.socket.getsockname()[0:2] if self._use_ssl: sslutils.is_enabled(conf) def _get_socket(self, host, port, backlog): bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET try: sock = eventlet.listen(bind_addr, family, backlog=backlog) except EnvironmentError: LOG.error("Could not bind to %(host)s:%(port)s", {'host': host, 'port': port}) raise sock = self._set_socket_opts(sock) LOG.info("%(name)s listening on %(host)s:%(port)s", {'name': self.name, 'host': host, 'port': port}) return sock def _get_unix_socket(self, socket_file, socket_mode, backlog): sock = eventlet.listen(socket_file, family=socket.AF_UNIX, backlog=backlog) if socket_mode is not None: os.chmod(socket_file, socket_mode) LOG.info("%(name)s listening on %(socket_file)s:", {'name': self.name, 'socket_file': socket_file}) return sock def start(self): """Start serving a WSGI application. :returns: None """ # The server socket object will be closed after server exits, # but the underlying file descriptor will remain open, and will # give bad file descriptor error. So duplicating the socket object, # to keep file descriptor usable. self.dup_socket = self.socket.dup() if self._use_ssl: self.dup_socket = sslutils.wrap(self.conf, self.dup_socket) wsgi_kwargs = { 'func': eventlet.wsgi.server, 'sock': self.dup_socket, 'site': self.app, 'protocol': self._protocol, 'custom_pool': self._pool, 'log': self._logger, 'log_format': self.conf.wsgi_log_format, 'debug': self.conf.wsgi_server_debug, 'keepalive': self.conf.wsgi_keep_alive, 'socket_timeout': self.client_socket_timeout } if self._max_url_len: wsgi_kwargs['url_length_limit'] = self._max_url_len self._server = eventlet.spawn(**wsgi_kwargs) def _set_socket_opts(self, _socket): _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive _socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): _socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, self.conf.tcp_keepidle) return _socket def reset(self): """Reset server greenpool size to default. :returns: None """ self._pool.resize(self.pool_size) def stop(self): """Stops eventlet server. Doesn't allow accept new connecting. :returns: None """ LOG.info("Stopping WSGI server.") if self._server is not None: # let eventlet close socket self._pool.resize(0) self._server.kill() def wait(self): """Block, until the server has stopped. Waits on the server's eventlet to finish, then returns. :returns: None """ try: if self._server is not None: num = self._pool.running() LOG.debug("Waiting WSGI server to finish %d requests.", num) self._pool.waitall() except greenlet.GreenletExit: LOG.info("WSGI server has stopped.") class Request(webob.Request): pass class Router(object): """WSGI middleware that maps incoming requests to WSGI apps.""" def __init__(self, mapper): """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be an object that can route the request to the action-specific method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, '/svrlist', controller=sc, action='list') # Actions are all implicitly defined mapper.resource('server', 'servers', controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """Dispatch the request to the appropriate controller. Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return webob.exc.HTTPNotFound() app = match['controller'] return app class ConfigNotFound(Exception): def __init__(self, path): msg = _('Could not find config at %(path)s') % {'path': path} super(ConfigNotFound, self).__init__(msg) class PasteAppNotFound(Exception): def __init__(self, name, path): msg = (_("Could not load paste app '%(name)s' from %(path)s") % {'name': name, 'path': path}) super(PasteAppNotFound, self).__init__(msg) class Loader(object): """Used to load WSGI applications from paste configurations.""" def __init__(self, conf): """Initialize the loader, and attempt to find the config. :param conf: Application config :returns: None """ conf.register_opts(_options.wsgi_opts) self.config_path = None config_path = conf.api_paste_config if not os.path.isabs(config_path): self.config_path = conf.find_file(config_path) elif os.path.exists(config_path): self.config_path = config_path if not self.config_path: raise ConfigNotFound(path=config_path) def load_app(self, name): """Return the paste URLMap wrapped WSGI application. :param name: Name of the application to load. :returns: Paste URLMap object wrapping the requested application. :raises: PasteAppNotFound """ try: LOG.debug("Loading app %(name)s from %(path)s", {'name': name, 'path': self.config_path}) return deploy.loadapp("config:%s" % self.config_path, name=name) except LookupError: LOG.exception("Couldn't lookup app: %s", name) raise PasteAppNotFound(name=name, path=self.config_path) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/releasenotes/0000775000175000017500000000000000000000000016645 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.276123 oslo.service-3.4.0/releasenotes/notes/0000775000175000017500000000000000000000000017775 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/add-timeout-looping-call-5cc396b75597c3c2.yaml0000664000175000017500000000023100000000000027617 0ustar00zuulzuul00000000000000--- features: - | Add a new type of looping call: FixedIntervalWithTimeoutLoopingCall. It is a FixedIntervalLoopingCall with timeout checking. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/add-wsgi_server_debug-opt-70d818b5b78bfc7c.yaml0000664000175000017500000000043400000000000030223 0ustar00zuulzuul00000000000000--- features: - | A new config options, ``[DEFAULT] wsgi_server_debug``, has been added. This allows admins to configure whether the server should send exception tracebacks to the clients on HTTP 500 errors. This defaults to ``False``, preserving previous behavior. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml0000664000175000017500000000007100000000000024656 0ustar00zuulzuul00000000000000--- other: - Switch to reno for managing release notes.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/drop-python27-support-1cfdf65193a03f3a.yaml0000664000175000017500000000017700000000000027323 0ustar00zuulzuul00000000000000--- upgrade: - | Support for Python 2.7 has been dropped. The minimum version of Python now supported is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/fix-find-object-in-backdoor-487bf78c4c502594.yaml0000664000175000017500000000026400000000000030113 0ustar00zuulzuul00000000000000--- fixes: - | Fix the backdoor helper method fo() to also work when there are objects present in the current python instance that do not have a __class__ attribute. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/native-threads-on-child-7150690c7caa1013.yaml0000664000175000017500000000033700000000000027332 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1983949 `_: Fixed eventlet native threads tpool on child process when parent process has used them before launching the service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/profile-worker-5d3fd0f0251d62b8.yaml0000664000175000017500000000015700000000000026042 0ustar00zuulzuul00000000000000--- features: - | Add support for profiling (capture function calltrace) service's worker processes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/service-children-SIGHUP-15d0cf6d2a1bdbf9.yaml0000664000175000017500000000071200000000000027531 0ustar00zuulzuul00000000000000--- fixes: - | ``SIGHUP`` is now handled properly with ``restart_method='mutate'``, no longer restarting child processes. See `bug 1794708`_ for details. In conjunction with the fix for `bug 1715374`_ in oslo.privsep, the nova-compute service now behaves correctly when it receives ``SIGHUP``. .. _`bug 1794708`: https://bugs.launchpad.net/oslo.service/+bug/1794708 .. _`bug 1715374`: https://bugs.launchpad.net/nova/+bug/1715374 ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=oslo.service-3.4.0/releasenotes/notes/support-pid-in-eventlet-backdoor-socket-path-1863eaad1dd08556.yaml 22 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/support-pid-in-eventlet-backdoor-socket-path-1863eaad1dd08556.0000664000175000017500000000050100000000000032725 0ustar00zuulzuul00000000000000--- features: - | The config option backdoor_socket_path now is a format string that supports {pid}, which will be replaced with the PID of the current process. This makes the eventlet backdoor accessible when spawning multiple processes with the same backdoor_socket_path inside the configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/threadgroup-cancel-bd89d72f383a3d9b.yaml0000664000175000017500000000020100000000000026731 0ustar00zuulzuul00000000000000--- deprecations: - | The ``ThreadGroup.cancel()`` method is deprecated and will be removed in a future major release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/timer-args-f578c8f9d08b217d.yaml0000664000175000017500000000137000000000000025171 0ustar00zuulzuul00000000000000--- features: - | The ThreadGroup class has new add_timer_args() and add_dynamic_timer_args() methods to create timers passing the positional and keyword arguments to the callback as a sequence and a mapping. This API provides more flexibility for the addition of timer control options in the future. deprecations: - | The API of the ThreadGroup add_timer() and add_dynamic_timer() methods has been identified as error-prone when passing arguments intended for the callback function. Passing callback arguments in this way is now deprecated. Callers should use the new add_timer_args() or add_dynamic_timer_args() methods (respectively) instead when it is necessary to pass arguments to the timer callback function. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/notes/timer-stop_on_exception-9f21d7c4d6d1b0d9.yaml0000664000175000017500000000037300000000000030040 0ustar00zuulzuul00000000000000--- features: - | The ThreadGroup add_timer_args() and add_dynamic_timer_args() methods now support passing a stop_on_exception=False argument to allow the timer to keep running even when an exception is raised by the callback function. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/releasenotes/source/0000775000175000017500000000000000000000000020145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000021416 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000021417 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/releasenotes/source/_static/0000775000175000017500000000000000000000000021573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000024044 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000022302 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000024553 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/conf.py0000664000175000017500000002154600000000000021454 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/oslo.service' openstackdocs_bug_project = 'oslo.service' openstackdocs_bug_tag = '' openstackdocs_auto_name = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'oslo.service Release Notes' copyright = '2016, oslo.service Developers' # Release notes do not need a version in the title, they span # multiple versions. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'oslo.serviceReleaseNotesDoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'oslo.serviceReleaseNotes.tex', 'oslo.service Release Notes Documentation', 'oslo.service Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'oslo.serviceReleaseNotes', 'oslo.service Release Notes Documentation', ['oslo.service Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'oslo.serviceReleaseNotes', 'oslo.service Release Notes Documentation', 'oslo.service Developers', 'oslo.serviceReleaseNotes', 'Provides a framework for defining new long-running services using the' ' patterns established by other OpenStack applications', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/index.rst0000664000175000017500000000040500000000000022005 0ustar00zuulzuul00000000000000============================ oslo.service Release Notes ============================ .. toctree:: :maxdepth: 1 unreleased 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/releasenotes/source/locale/0000775000175000017500000000000000000000000021404 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000022356 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000024143 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000001633200000000000027201 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.service Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-06-27 14:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-06-21 08:13+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.17.0" msgstr "1.17.0" msgid "1.19.0" msgstr "1.19.0" msgid "1.35.0" msgstr "1.35.0" msgid "1.37.0" msgstr "1.37.0" msgid "1.40.0" msgstr "1.40.0" msgid "1.40.2" msgstr "1.40.2" msgid "2.0.0" msgstr "2.0.0" msgid "2.4.1" msgstr "2.4.1" msgid "2.5.1" msgstr "2.5.1" msgid "2.6.1" msgstr "2.6.1" msgid "2.6.2" msgstr "2.6.2" msgid "2.7.0" msgstr "2.7.0" msgid "2.8.0" msgstr "2.8.0" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "" "A new config options, ``[DEFAULT] wsgi_server_debug``, has been added. This " "allows admins to configure whether the server should send exception " "tracebacks to the clients on HTTP 500 errors. This defaults to ``False``, " "preserving previous behavior." msgstr "" "A new config options, ``[DEFAULT] wsgi_server_debug``, has been added. This " "allows admins to configure whether the server should send exception " "tracebacks to the clients on HTTP 500 errors. This defaults to ``False``, " "preserving previous behaviour." msgid "" "Add a new type of looping call: FixedIntervalWithTimeoutLoopingCall. It is a " "FixedIntervalLoopingCall with timeout checking." msgstr "" "Add a new type of looping call: FixedIntervalWithTimeoutLoopingCall. It is a " "FixedIntervalLoopingCall with timeout checking." msgid "" "Add support for profiling (capture function calltrace) service's worker " "processes." msgstr "" "Add support for profiling (capture function calltrace) service's worker " "processes." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Fix the backdoor helper method fo() to also work when there are objects " "present in the current python instance that do not have a __class__ " "attribute." msgstr "" "Fix the backdoor helper method fo() to also work when there are objects " "present in the current Python instance that do not have a __class__ " "attribute." msgid "New Features" msgstr "New Features" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgstr "" "Support for Python 2.7 has been dropped. The minimum version of Python now " "supported is Python 3.6." msgid "Switch to reno for managing release notes." msgstr "Switch to Reno for managing release notes." msgid "" "The API of the ThreadGroup add_timer() and add_dynamic_timer() methods has " "been identified as error-prone when passing arguments intended for the " "callback function. Passing callback arguments in this way is now deprecated. " "Callers should use the new add_timer_args() or add_dynamic_timer_args() " "methods (respectively) instead when it is necessary to pass arguments to the " "timer callback function." msgstr "" "The API of the ThreadGroup add_timer() and add_dynamic_timer() methods has " "been identified as error-prone when passing arguments intended for the " "callback function. Passing callback arguments in this way is now deprecated. " "Callers should use the new add_timer_args() or add_dynamic_timer_args() " "methods (respectively) instead when it is necessary to pass arguments to the " "timer callback function." msgid "" "The ThreadGroup add_timer_args() and add_dynamic_timer_args() methods now " "support passing a stop_on_exception=False argument to allow the timer to " "keep running even when an exception is raised by the callback function." msgstr "" "The ThreadGroup add_timer_args() and add_dynamic_timer_args() methods now " "support passing a stop_on_exception=False argument to allow the timer to " "keep running even when an exception is raised by the callback function." msgid "" "The ThreadGroup class has new add_timer_args() and add_dynamic_timer_args() " "methods to create timers passing the positional and keyword arguments to the " "callback as a sequence and a mapping. This API provides more flexibility for " "the addition of timer control options in the future." msgstr "" "The ThreadGroup class has new add_timer_args() and add_dynamic_timer_args() " "methods to create timers passing the positional and keyword arguments to the " "callback as a sequence and a mapping. This API provides more flexibility for " "the addition of timer control options in the future." msgid "" "The ``ThreadGroup.cancel()`` method is deprecated and will be removed in a " "future major release." msgstr "" "The ``ThreadGroup.cancel()`` method is deprecated and will be removed in a " "future major release." msgid "" "The config option backdoor_socket_path now is a format string that supports " "{pid}, which will be replaced with the PID of the current process. This " "makes the eventlet backdoor accessible when spawning multiple processes with " "the same backdoor_socket_path inside the configuration." msgstr "" "The config option backdoor_socket_path now is a format string that supports " "{pid}, which will be replaced with the PID of the current process. This " "makes the eventlet backdoor accessible when spawning multiple processes with " "the same backdoor_socket_path inside the configuration." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Unreleased Release Notes" msgstr "Unreleased Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "``SIGHUP`` is now handled properly with ``restart_method='mutate'``, no " "longer restarting child processes. See `bug 1794708`_ for details. In " "conjunction with the fix for `bug 1715374`_ in oslo.privsep, the nova-" "compute service now behaves correctly when it receives ``SIGHUP``." msgstr "" "``SIGHUP`` is now handled properly with ``restart_method='mutate'``, no " "longer restarting child processes. See `bug 1794708`_ for details. In " "conjunction with the fix for `bug 1715374`_ in oslo.privsep, the nova-" "compute service now behaves correctly when it receives ``SIGHUP``." msgid "oslo.service Release Notes" msgstr "oslo.service Release Notes" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1708611940.272123 oslo.service-3.4.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000022013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000023600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000153200000000000026632 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.service Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2022-09-09 15:55+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 06:05+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "Other Notes" msgstr "Autres notes" msgid "Switch to reno for managing release notes." msgstr "Commence à utiliser reno pour la gestion des notes de release" msgid "Unreleased Release Notes" msgstr "Note de release pour les changements non déployées" msgid "oslo.service Release Notes" msgstr "Note de release pour oslo.service" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000021761 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000021627 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000022174 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000022021 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000022014 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000022020 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/unreleased.rst0000664000175000017500000000014400000000000023025 0ustar00zuulzuul00000000000000========================== Unreleased Release Notes ========================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000022223 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/victoria.rst0000664000175000017500000000021200000000000022512 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: stable/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/wallaby.rst0000664000175000017500000000020600000000000022330 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: stable/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/xena.rst0000664000175000017500000000017200000000000021632 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: stable/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000021626 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/releasenotes/source/zed.rst0000664000175000017500000000016600000000000021464 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: stable/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/requirements.txt0000664000175000017500000000052700000000000017444 0ustar00zuulzuul00000000000000WebOb>=1.7.1 # MIT debtcollector>=1.2.0 # Apache 2.0 eventlet>=0.25.2 # MIT greenlet>=0.4.15 # MIT oslo.utils>=3.40.2 # Apache-2.0 oslo.concurrency>=3.25.0 # Apache-2.0 oslo.config>=5.1.0 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 PasteDeploy>=1.5.0 # MIT Routes>=2.3.1 # MIT Paste>=2.0.2 # MIT Yappi>=1.0 # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1708611940.2801232 oslo.service-3.4.0/setup.cfg0000664000175000017500000000223700000000000016001 0ustar00zuulzuul00000000000000[metadata] name = oslo.service summary = oslo.service library description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/oslo.service/latest/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython [files] packages = oslo_service [entry_points] oslo.config.opts = oslo.service.periodic_task = oslo_service.periodic_task:list_opts oslo.service.service = oslo_service.service:list_opts oslo.service.sslutils = oslo_service.sslutils:list_opts oslo.service.wsgi = oslo_service.wsgi:list_opts [upload_sphinx] upload-dir = doc/build/html [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/setup.py0000664000175000017500000000127100000000000015667 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/test-requirements.txt0000664000175000017500000000023100000000000020411 0ustar00zuulzuul00000000000000fixtures>=3.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 requests>=2.14.2 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1708611912.0 oslo.service-3.4.0/tox.ini0000664000175000017500000000341500000000000015472 0ustar00zuulzuul00000000000000[tox] minversion = 3.2.0 envlist = py3,pep8 [testenv] setenv = VIRTUAL_ENV={envdir} BRANCH_NAME=master CLIENT_NAME=oslo.service deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt allowlist_externals = find commands = find . -type f -name "*.py[co]" -delete stestr run --slowest {posargs} [testenv:pep8] deps = pre-commit commands = pre-commit run -a [testenv:venv] commands = {posargs} [testenv:docs] allowlist_externals = rm deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt fixtures>=3.0.0 # Apache-2.0/BSD commands = rm -rf doc/build sphinx-build -W --keep-going -b html doc/source doc/build/html {posargs} [testenv:cover] setenv = PYTHON=coverage run --source oslo_service --parallel-mode commands = coverage erase stestr run --slowest {posargs} coverage combine coverage html -d cover coverage report coverage report --show-missing [flake8] # E123, E125 skipped as they are invalid PEP-8. # E731 skipped as assign a lambda expression # W504 line break after binary operator show-source = True ignore = E123,E125,E731,W504 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build [hacking] import_exceptions = oslo_service._i18n [testenv:debug] commands = oslo_debug_helper -t oslo_service/tests {posargs} [testenv:releasenotes] allowlist_externals = rm deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html