././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5754259 stestr-3.0.0/0000755000175000017500000000000000000000000017207 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501112621.0 stestr-3.0.0/.coveragerc0000644000175000017500000000011200000000000021322 0ustar00computertrekercomputertreker00000000000000[run] branch = True source = stestr omit = stestr/tests/* parallel = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.2720988 stestr-3.0.0/.github/0000755000175000017500000000000000000000000020547 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5620928 stestr-3.0.0/.github/ISSUE_TEMPLATE/0000755000175000017500000000000000000000000022732 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/.github/ISSUE_TEMPLATE/bug_report.md0000644000175000017500000000143200000000000025424 0ustar00computertrekercomputertreker00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: 'bug' assignees: '' --- **Issue description** Include a description of the issue encountered, and what the expected behavior was. Also include any configuration, test suite, or other relevant information to reproduce the issue. Note if you encounter an error message try to re-run the command with --debug to print any tracebacks **Expected behavior and actual behavior** **Steps to reproduce the problem** **Specifications like the version of the project, operating system, or hardware** **System information** - OS: [e.g. Windows10, openSUSE 15.1, Ubuntu 18.04] - stestr version (`stestr --version`): - Python release (`python --version`): - pip packages (`pip freeze`): **Additional information** ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/.github/ISSUE_TEMPLATE/feature_request.md0000644000175000017500000000113600000000000026460 0ustar00computertrekercomputertreker00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: 'enhancement' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/.mailmap0000644000175000017500000000141500000000000020631 0ustar00computertrekercomputertreker00000000000000# Entries in this file are made for two reasons: # 1) to merge multiple git commit authors that correspond to a single author # 2) to change the canonical name and/or email address of an author. # # Format is: # Canonical Name commit name # \--------------+---------------/ \----------+-------------/ # replace find # See also: 'git shortlog --help' and 'git check-mailmap --help'. Andrea Frittoli Jonathan Lange Masayuki Igawa Masayuki Igawa Matthew Treinish ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1546619231.0 stestr-3.0.0/.stestr.conf0000644000175000017500000000004300000000000021455 0ustar00computertrekercomputertreker00000000000000[DEFAULT] test_path=./stestr/tests ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5620928 stestr-3.0.0/.travis/0000755000175000017500000000000000000000000020575 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1508178685.0 stestr-3.0.0/.travis/coveralls.sh0000755000175000017500000000007500000000000023130 0ustar00computertrekercomputertreker00000000000000#!/bin/sh if [ "$TOXENV" = "cover" ]; then coveralls fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/.travis.yml0000644000175000017500000000150700000000000021323 0ustar00computertrekercomputertreker00000000000000sudo: false matrix: fast_finish: true include: - python: "3.5" env: TOXENV=py35 - python: "3.6" env: TOXENV=py36 - os: linux python: 3.8 env: TOXENV=py38 - os: linux dist: xenial python: 3.7 env: TOXENV=py37 sudo: true - python: "3.6" env: TOXENV=cover - python: "3.6" env: TOXENV=pep8 - python: "3.6" env: TOXENV=docs - if: tag IS present python: "3.6" env: - TWINE_USERNAME=stestr-release install: pip install -U twine script: - python3 setup.py sdist bdist_wheel --universal - twine upload dist/stestr* cache: directories: - $HOME/.cache/pip install: pip install -U tox coveralls language: python script: - tox after_success: .travis/coveralls.sh notifications: email: false ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/AUTHORS0000644000175000017500000000235300000000000020262 0ustar00computertrekercomputertreker00000000000000Alexei Kornienko Alfredo Moralejo Andrea Frittoli Benji York Brad Crittenden Chandan Kumar Chris Dent Chris Jones Clark Boylan Claudiu Belu Dirk Mueller Dirk Mueller Francesco Banconi James Page James Westby Jelmer Vernooij Jonathan Lange Lucian Petrut Luigi Toscano Martin Masayuki Igawa Matthew Treinish Monty Taylor Robert Collins Sean McGinnis Sergey Vilgelm Sorin Sbarnea Stephen Finucane Steve Heyman Thomas Bechtold Tony Breeds Zane Bitter pvinci ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1508178685.0 stestr-3.0.0/CODE_OF_CONDUCT.md0000644000175000017500000000622700000000000022015 0ustar00computertrekercomputertreker00000000000000# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at mtreinish@kortar.org. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1518910744.0 stestr-3.0.0/CONTRIBUTING.rst0000644000175000017500000000125300000000000021651 0ustar00computertrekercomputertreker00000000000000Contributing ============ To browse the latest code, see: https://github.com/mtreinish/stestr To clone the latest code, use: ``git clone https://github.com/mtreinish/stestr.git`` Guidelines for contribution are documented at: http://stestr.readthedocs.io/en/latest/developer_guidelines.html Use `github pull requests`_ to submit patches. Before you submit a pull request ensure that all the automated testing will pass by running ``tox`` locally. This will run the test suite and also the automated style rule checks just as they will in CI. If CI fails on your change it will not be able to merge. .. _github pull requests: https://help.github.com/articles/about-pull-requests/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/ChangeLog0000644000175000017500000011761100000000000020770 0ustar00computertrekercomputertreker00000000000000CHANGES ======= 3.0.0 ----- * Update README\_ja.rst * Update README in preparation for release 3.0.0 * Update test-requirements to be py3 only * Remove six usage from testr\_to\_stestr * Bump python-subunit minimum to 1.4.0 * Remove universal wheel tag from setup.cfg * Remove six from tests and requirements * Fix line length issue from pyupgrade * Treat bytes and strings differently * Remove outdated tox envs * Run pyupgrade on repo * Fix import issues * Remove 2.7 ci jobs * Remove six usage from stestr code * Fix typo bug in list\_tests() error code * Remove sys.version switches for python2.7 * Update package metadata for python2 removal 2.6.0 ----- * Add python 3.8 macOS and windows ci jobs * Update test\_processor.py * Update config\_file.py * Update cli.py * Remove 3.8 azure pipelines jobs * Update stestr/scheduler.py * Update to recent hacking version * Add Python 3.8 support * Add --pdb flag to stestr run * Add unit tests around repo initialize * Handle empty .stestr directory to initialize * Update issue templates * Escape Windows paths * Stop encoding attachments text 2.5.1 ----- * Remove stray reference to removed short form of --random * Remove abbreviated option for stestr run --random * Only use stdlib based unittest runner for py >=3.5 2.5.0 ----- * Handle NoneType timestamps in subunit results * Add links to translated version README * Add README\_ja.rst * Update docs * Revert to the trimmed stdout usage * Revert to previous stdout and remove usage modification * Revert "Add back more of the \_get\_runner() logic" * Add back more of the \_get\_runner() logic * Remove unused bits from test runner * Use StringIO instead of tempfile * Remove console script entrypoint for the runner * Add license to package metadata * Add unit tests for output functions * Add mailmap file to deduplicate authors list * Add custom test runner and remove testtools runner dependency * Fix edge case where there are no test times in output * Mention IRC channel in README 2.4.0 ----- * Add macOS azure pipelines * Use Azure DevOps build badge instead of appveyor * Add Deprecation warning on cli usage with py2.7 * Remove appveyor config * Install vcpython27 when python.version is 2.7 * Set up CI with Azure Pipelines * Add release automation for pypi artifacts * Rework logic for suppress attachments and all attachments * Add readme note about python 2 eol in readme * Cap sphinx version <2.0.0 when python2.7 * Add flag to print all attachments for successful tests * Add skips on windows * Discover python executable when discover is not used * Update copyright year * Use str instead of six.text\_type for python2.7 * Fix pep8 error * Update stestr/repository/memory.py * Add support to repositories for storing run metadata * Use yaml.safe\_load instead of yaml.load * Take care of 'concurrency is None' case 2.3.1 ----- * Add sanity check unittests * Use to\_int in both CLI and Python API * Ensure concurrency is always an int 2.3.0 ----- * Update package metadata in setup.cfg * Inline CONTRIBUTING.rst in README.rst and add to docs * Add links to GitHub repo and project-urls to metadata * Move from get\_description() to class docstrings part-2 * Switch from get\_descripition() to class docstrings * Fix whitespace lint * Gracefully handle the case when sys.executable is None * Add error message for invalid concurrency value * Use default user config in unit tests * Turn on debug loggin by default * Run commands through the same python used for stestr * Improved test coverage for stestr run --subunit 0 exit status * Update docs for exit codes with --subunit flags * Put in quotes the top\_dir and test\_path * Clarify group\_regex explanation * Update MANUAL.rst for parallel\_class option * Update doc/source/MANUAL.rst * Update MANUAL.rst for parallel\_class option * Introduce parallel-class option * Tweak the README for readability 2.2.0 ----- * Change title of project in readme * Add a better description to README Overview section * Fix discovery import error formatting on py3 * Cleanup unused parameters in \_run\_tests * Enable doc8 * Add all stream to repo even if some tests fail * Extract loading case code to \_load\_case() function * Make test running serially when just loading * Fix time measurement for load command too * Use reported times instead of wall time in subunit-trace * Add support for test class and method by path on no-discover 2.1.1 ----- * Add support for python 3.7 * Fix handling of unexpected success results * Allow stestr to be called as a module (#185) * Make warning and error messages use stderr * Add error handling for invalid input regexes * Cleanup the manpage section on dealing with failed tests * Cleanup argument help text on load command * Fix description for the stestr run command 2.1.0 ----- * Uncap pbr * Revert "Cap pip version in windows ci" * Revert "Uncap pbr" * Uncap pbr * Clarify docs on group\_regex * Remove Python 2.6 code * Switch python doc reference from 2 to 3 * Cleanup manual section on configuration * Fix abbreviate option in run subcommand * Ignore errors on cleanup in test\_return\_codes * Cap pip version in windows ci * Add docs and helper script for building man page * Add documentation for --suppress-attachments option * Add option to suppress attachment printing on successful tests * Add unit tests for return code from functions * Remove windows skips for old subunit versions * Cap PBR<4.0.0 * Fixed manual page description * Fix manpage generation * Add issue description in template * Add an issue template to project * Add usage to manual docs page * Fix the manual docs link in README * Add alt texts for badges in README * Use flat-square design badges on README * Fix typos about whitelist/blacklist option in manual * Fix a small mistake in manual about test selection * Update the package summary in setup.cfg metadata 2.0.0 ----- * Add functional tests for user config file * Fix no attribute 'repo\_type' error when running w/ --slowest option * Add support for a user config file * Fix warning message about missing worker tags in subunit-trace * Add badge for Appveyor status * Remove unnecessary shebang and execution bit from subunit\_trace * Fix typos in docs * Fix pep8 violations * Add --slowest option to test runs * Handle zero and negative times in stestr slowest * list: Deal with [] instead of 'None' for filters * Fix docs nits * Update doc for using cliff for CLI layer * Use cliff for CLI layer * Implement quiet option for run and load commands * Deprecate the partial flag * Allow both group\_regex and group-regex as an optional argument * Remove pyc files before running stestr in tox * Update travis job config 1.1.0 ----- * Pass output to output.make\_result() in stestr load * Remove 32bit windows testing CI config * Add functional test for analyze-isolation flag * Fetch the data from last run earlier * Fix the stestr run --analyze-isolation flag * Remove contributor docs for using reno * Fix stestr load from stdin * Add test\_return\_codes check for valid subunit output * Respect all selection mechanisms for discovery check * Fix .testr.conf parsing: test path follows discover * Fix if logic on output types in stestr load * Add a --abbreviate flag to stestr run and load * Add missing run command to stestr manual * Print just error message when no stestr repo * Fix whitelist arg usage in stestr run * Drop reno * Add missing home-page * Fix: pass the top\_dir parameter to the 'list' command * Fix: pass the test\_path parameter to the 'list' command * Update MANUAL.rst * Update the trove classifiers * Use context managers for file open() * Ensure we always close files in file repository * Create a Code of Conduct for the project * Fix coveralls collection 1.0.0 ----- * Handle stestr init with existing path * Fix commands \_\_all\_\_ and its docs * Add docs to the list of tox envs for travis * Fix doc generation - add subunit trace to tree * Fix return code on no matching tests * Add stdout argument to command functions * Allow to import commands APIs from a single module * Fix docs warnings and treat warning as error * Add docs for output filters * Add subunit-trace support to stestr last * Add support for setting colorized output * Switch the default output filter to subunit-trace * Add API docs for the commands * Copy subunit-trace and colorizer from os-testr * Fix stestr run --until-failure with subunit output * Fix stestr --test-path argument for stestr run and list * Remove the stats command * Rename the test\_listing\_fixture to test\_processor * Add a real python api for running commands * Remove unused class in commands/load.py * Write subunit2sql db manage output to local stdout * Add a concurrency option to the manual scheduler * Get coverage only one time in travis-ci 0.5.0 ----- * Add combine option to stestr run * Close sqlite tempfile before tests run * Add file repository skips for windows * Add appveyor ci config * Update requirements * Add pypi version badge * Remove unused output method * Cleanup output from test listing fixture * Cleanup error handling for missing test-path or config file * Remove unused code for printing skips * [Trivial] Cleanup typos and word-wrappings * Improve coverage collection * Add build status and coverage badges * Add tox -e cover to .travis.yml to enable coveralls * Enable coverage with tox 0.4.0 ----- * Add support to stestr load to append to an existing entry * Make version string return full string with vcs * Adds Windows support * Use dbm.dumb rather than gdbm * Add test\_sql.py * Fix wrong count condition in SQL * Add unit test for the fix of bug #35 * Fix error stestr run with bad regex * Return help message is no command is specified * Print nice error message when test\_path isn't set * Move sql requirements to setuptools extras * Fix scheduler.partition\_tests randomize option * Enable until-failure option with no-discover option * Add CLI option to stestr run to randomize test order * Workaround potential backwards incompat with gnudbm usage * Use the same dbm version across python versions 0.3.0 ----- * Remove extra dash from force-init option of load * Add line feed to stats command output * Add --no-discover option to stestr run * Fix typos in docs * Add section to README about migrating from testr 0.2.0 ----- * Add CLI options for config file variables * Add a postive unit test for selection.filter\_tests() * Port test selection logic from ostestr * Update the help strings for the CLI 0.1.0 ----- * Preserve full test\_id in \_get\_test\_times for file repository * Enable real time DB writes for sql repository * Strip attrs from test ids in \_get\_test\_times lookup * Add python 3.6 support to stestr * Strip attrs from test\_ids before storing time data * Add option to manual configure the worker balance * Ensure that we run subprocess with the same version of python * Add repository presence check to sql RepositoryFactory * Fix default exception message for RepositoryNotFound * Fix \_Subunit2SqlRun.get\_test() method * Add subunit2sql to requirements.txt~ * Cleanup test\_listing\_fixture and add docs * Remove LINEFEED variable * Add serial option to CLI * Fix temp\_dir path in TestUtil * Add .stestr.sqlite to .gitignore * Add unit tests for getting default repo url * Fix typo in sql repository type for failing test list * Change sql repository type default location to be .stestr.sqlite * Add docs for the different repository types * Add reno release note for repository type changes * Workaround stestr last issue with sql repository type * Add a warning when using the sql repository type * Add api and cli to select repository type and url * Add subunit2sql repository type * Remove /.testrepository from .gitignore * Add CONTRIBUTING.rst file * Add install details to README * Add docs with details about release notes * Cleanup the overview a bit more * Make README overview less subject 0.0.1 ----- * Use version module to get package version * Cleanup the scheduler tests * Make unit test attachment fixtures always enabled * Add unit tests for scheduler * Clean up docstring in the abstract repository module * Cleanup the stestr manual * Add api docs for the output module * Add more details to the run test operations doc * Add a note that internal modules aren't stable interfaces * Add API docs for the scheduler module * Add api doc page for the selection module * Split dev guidelines from internal arch doc files * Add api docs for the config\_file module * Start adding developer documentation * Add more details to the README * Add missing apache header * Start using reno to manage release notes * Add tool to convert testr.conf to stestr.conf * Add unit tests for file repository * Cleanup the config file model * Add missing blank newline to end of LICENSE * Add a note about get\_id arg in CLITestResult class * Remove extra six import * Fix several issues with stestr failing * Add tests to verify the return code of run command * Remove external path from iter\_streams * Add missing periods from license headers * Separate out the abstract repository class * Relicense project under just Apache 2.0 * Fix stestr run on py2 * Fix --subunit flag on run command * Fix serialized subunit stream output from stestr run * Fix typo to stestr * Fix pep8 issue in utils.py * Fix concurrency option type to int * Add Python 3.5 to setup.cfg * Make run command actually run tests * Remove fixtures directory * Add py35 to tox.ini envlist * Fix testr list command again * Remove unecessary testcommand module * Add pro forma test * Nuke the testrepository unit tests * Fix stestr last command * Fix issues with parallel runner handling * Cleanup more debug prints * Cleanup the manual section in docs a bit * Fix stestr load from stdin * Switch to stestr for the executable name * Remove a stray debug print * Fix testr list with a regex * Fix testr list with a regex * Fix the testr list command * Move and fix test\_listing fixture * Remove debug prints from iter\_streams functions * Add a CLITestResults class and use it for load * Add ouptut\_summary method * Cleanup load() a bit * Cleanup .testr.conf in repo * Just create a repository with testr run if it doens't exist * Add testr init call to travis yaml * Add doc builds to gitignore * Add pbr generated files to gitignore * Start to cleanup docs * Cleanup some leftove files from testrepository * Remove TestCommand class * Add .stestr directory to .gitignore * Turn off travis email notifications for now * Update test-requirements to include missing packages * Fix spacing in travis.yml * Add travis.yml for initial project start * Remove bzrignore file * Add missed files * Fix pep8 issues (except for tests) * Reowrk things as stestr * Pivot repo to stestr * Release 0.0.20 * Tests will be reliably tagged with worker-%d * Release 0.0.19 * Fix 0 timestamps for enumerated but not run tests * Update docs for the move to github * Make subunit 0.0.18 be a hard dependency * Move to subunit v2 for output of stored streams * Make sure output\_stream can handle non-utf8 bytes * Ignore .swp files * Added \*.pyc to gitignore * Fix python3 filtering support * Ignore files in git * Improve error rendering of listing issues * \* When list-tests encounters an error, a much clearer response will now be shown. (Robert Collins, #1271133) * \* \`\`run\`\` was outputting bad MIME types - test/plain, not text/plain. (Robert Collins) * Release 0.0.18 * \* \`\`run\`\` now accepts \`\`--isolated\`\` as a parameter, which will cause each selected test to be run independently. This can be useful to both workaround isolation bugs and detect tests that can not be run independently. (Robert Collins) * \* \`\`capture\_ids\`\` in test\_run now returns a list of captures, permitting tests that need to test multiple runs to do so. (Robert Collins) * 0.0.17 ++++++ * Release 0.0.16 * \* When test listing fails, testr will now report an error rather than incorrectly trying to run zero tests. A test listing failure is detected by the returncode of the test listing process. (Robert Collins, #1185231) * \* A new testr.conf option \`\`group\_regex\`\` can be used for grouping tests so that they get run in the same backend runner. (Matthew Treinish) * \* The scheduler can now groups tests together permitting co-dependent tests to always be scheduled onto the same backend. Note that this does not force co-dependent tests to be executed, so partial test runs (e.g. --failing) may still fail. (Matthew Treinish, Robert Collins) * Make an assertion format check more lenient, for Python 3.3 * Fix an erroneous unicode to bytes comparison * Make failure order stable from memory repository for testing * Deal with Python 3 rejecting comparisons between None and None * \* Fix Python 3.\* support for entrypoints; the initial code was Python3 incompatible. (Robert Collins, Clark Boylan, #1187192) * Add group\_regex option to .testr.conf to leverage use of group\_regex for scheduling * Add group regex scheduling hint to the test partitioner * Remove unneeded test setup * Teach load how to cause a failure in response to stdin * \* Switch to using multiprocessing to determine CPU counts. (Chris Jones, #1092276) * Document the change of CPU core detection * Switch to multiprocessing for detecting CPU core count. This is present as of Python 2.6 * Only return streams from CLI.\_iter\_stream if the type matches the first type the command declared * Simplify load a little * Consolidate on the StreamResult API for make\_results' return values * Missing (C) header * Cleanup crufty imports * Drop unused class TestResultFilter * Drop the ExtendedToStream wrapping around UI.make\_result * Move tag based test filtering into the UI: many test things won't be filtered, such as slow tests and pdb debugging, so the UI has to see the tests. Moving the responsibility into the UI may lead to repetition in other UI's if not made easy to reuse, but that seems like the lesser of evils for now * Simplify commands.failing * Release 0.0.15, with minimal subunit v2 support * Fix subunit v1 parallel test execution * Add test for --subunit support - the UI was previously only loosely tested * Change get\_test() APIs to return StreamResult rather than TestResult emitting test objects * Migrate to new streamresult concurrent test suite API * Split out event forwarding and summarising roles for make\_result * More reasoning for \n in return-code injection * Move internal get\_inserter to be StreamResult based * Cleanup the switch to using inserter state * Change run\_id to be read from the object rather than a local binding * Fix test\_cli for python 2.x * Use ConcurrentStreamResult always * Start getting streamresult into the innards * \* Expects subunit v2 if the local library has v2 support in the subunit library. This should be seamless if the system under test shares the Python libraries. If it doesn't, either arrange to use \`\`subunit-2to1\`\` or upgrade the subunit libraries for the system under test. (Robert Collins) * Update releasing docs and really release 0.0.14 * 0.0.14 ++++++ * Release 0.0.13 * \* \`\`setup.py testr\`\` was not indicating test failures via it's return code. (Monty Taylor) * Actually return from \_run\_testr * Release 0.0.12 * \* There is now a setuptools extension provided by \`\`testrespository\`\` making it easy to invoke testr from setup.py driven workflows. (Monty Taylor, Robert Collins) * \* BSD license file incorrectly claimed copyright by subunit contributors. (Monty Taylor) * Correct a typo in setup.py * \* .testr.conf is now shipped in the source distribution to aid folk wanting to validate that testrepository works correctly on their machine. (Robert Collins) * Add setuptools commands for running testr and coverage * Release 0.0.11 * Tweak docs * \* Fix another incompatability with Mac OS X - gdbm dbm modules don't support get. (Robert Collins, #1094330) * ReST fixes for docs * Release 0.0.10 * \* It's now possible to configure \`\`test\_run\_concurrency\`\` in \`\`.testr.conf\`\` to have concurrency defined by a callout. (Robert Collins) * Update testr help run docs for new options * Refactor, making the test run case also spin up in advance * Support creating test execution environments at runtime * Implement test listing and execution with test execution instances * Revert out a test tweak, and add test for behaviour without per-instance execution logic * Tweak some docs * Actually implement instance disposal * Enforce setUp before get\_run\_command * \* TestCommand is now a fixture. This is used to ensure cached test instances are disposed of - if using the object to run or list tests, you will need to adjust your calls. (Robert Collins) * \* It's now possible to configure \`\`test\_run\_concurrency\`\` in \`\`.testr.conf\`\` to have concurrency defined by a callout. (Robert Collins) * Document overview * Release 0.0.9 * \* On OSX the \`\`anydbm\`\` module by default returns an implementation that doesn't support update(). Workaround that by falling back to a loop. (Robert Collins, #1091500) * Document workaround * Workaround Apple's OSX Python having a brain -damanged bsddb module * Fix .testr.conf example to match what works out there in the wild * \* \`\`testr --analyze-improvements\`\` now honours test regex filters and only analyzes matching tests. (Robert Collins) * Better documentation for setup of .testr.conf * Better keywords * Release 0.0.8 * \* \`\`testr run --analyze-isolation\`\` will search the current failing tests for spurious failures caused by interactions with other tests. (Robert Collins, #684069) * First, horribly untested, version of --analyze-failures * Science fiction * Make failing --subunit always output 0 and document process exit codes for last and failing * \* \`\`testr last\`\` now supports \`\`--subunit\`\` and when passed will output the stored subunit stream. Note that the exit code is always 0 when this is done (unless an exception occurs reading the stream) - subunit consumers should parse the subunit to determine success/failure. (Robert Collins) * Slight code cleanup * \* \`\`test failing --subunit\`\` now exits 0 if there are no failing tests. (Robert Collins) * Document --until-failure * \* \`\`testr run --until-failure\`\` will repeat a test run until interrupted by ctrl-C or until a failure occurs. (Robert Collins, #680995) * \* \`\`testr run --failing\`\` will no longer run any tests at all if there are no failing tests. (Robert Collins, #904400) * Fixup last commit * \* \`\`testr run --failing\`\` will no longer run any tests at all if there are no failing tests. (Robert Collins, #904400) * More accurate docs * More accurate getting-going docs * Remove some sci-fi * \* \`\`testr help command\`\` now shows the docstring for commands (Robert Collins) * \* \`\`testr help command\`\` now shows the docstring for commands (Robert Collins) * Reference the online docs * \* New argument type \`\`ExistingPathArgument\`\` for use when commands want to take the name of a file. (Robert Collins) * \* \`\`testr load\`\` now supports passing filenames to subunit streams to load. (Robert Collins, #620386) * \* New argument type \`\`ExistingPathArgument\`\` for use when commands want to take the name of a file. (Robert Collins) * \* Test tags are now shown in failures. Of particular interest for folk debgging cross-test interactions will be the worker-N tags which indicate which backend test process executed a given test. (Robert Collins) * \* Sphinx has been added to tie the documentation toghether (And it is available on testrepository.readthedocs.org). (Robert Collins) * \* As a side effect of fixing bug #597060 additional arguments passed to testr run or testr list are only passed to the underlying test runner if they are preceeded by '--'. (Robert Collins, #597060) * \* As a side effect of fixing bug #597060 additional arguments passed to testr run or testr list are only passed to the underlying test runner if they are preceeded by '--'. (Robert Collins, #597060) * Pyflakes cleanup * Add more docs and tricks, including the fact that this poking isn't actually the desired UI * \* \`\`testr run --load-list FILENAME\`\` will limit the tests run to the test ids supplied in the list file FILENAME. This is useful for manually specifying the tests to run, or running testr subordinate to testr (e.g. on remote machines). (Robert Collins, partial fix for #597060) * \`\`testr\`\` will now show the version. (Robert Collins) * NEWS * Show the version in testr --version * \`\`testr run\`\` will now fail a test run if the test process exits non-zero. As a side effect of this change, if the test program closes its stdout but does not exit, \`\`testr run\`\` will hang (waiting for the test program to exit). (Robert Collins) * Improvements from review - better docs, avoid race condition with process exit * \* \`\`testr run\`\` will now fail a test run if the test process exits non-zero. (Robert Collins) * Test that failures load detects are propogated to the exit code of run * Ignore testrepository.egg-info * Sketch out remote operation in manual * IMPROVEMENTS ------------ * Release 0.0.7 * Change to using distribute * \* stream loading will now synthesise datestamps before demultiplexing rather than on insertion into the repository. This fixes erroneously short times being recorded on non timestamped streams. Additionally, moving the automatic addition of timestamp material in front of the demuxer has removed the skew that caused test times to be reported as longer than the stream could indicate (by the amount of time the test runner took to start outputting subunit). This time may be something we want to track later, but the prior mechanism was inconsistent between the current run and reporting on prior runs, which lead to a very confusing UI. Now it is consistent, but totally ignores that overhead. (Robert Collins, #1048126, #980950) * Stop test\_id\_list\_default masking test\_list\_option with some horribly convoluted code * \* \`\`testr run\`\` now accepts a --concurrency option, allowing command line override of the number of workers spawned. This allows conccurency on operating systems where autodetection is not yet implemented, or just debugging problems with concurrent test suites. (Robert Collins, #957145) * open 0.0.7 * Release 0.0.6 * \* Various cleanups of recent structural drift * Implement hiding of tagged tests * Compat with subunit trunk * Refactor, pushing the result creation into the desired shape * Glue into the active commands a way to get a customised filter, which the next patch will parameterise with tag stripping support * Another case of the DBM API breakage in Python2.7 fixed * \* \`\`testr\`\` will drop into PDB from its command line UI if the environment variable TESTR\_PDB is set. (Robert Collins) * fix test failure when using trunk of testtools * add worker-id tagging with fix to work with testtools trunk * merge from trunk * simplify test * remove unneeded import * add test for worker-id-tagging wrapper * Update docstrings * Test behaviour, not types * Fixed description for --subunit * Add --subunit and --full-results to load command * Add --full-results switch to get unfiltered output * add worker ID tagging, needs tests * Add --subunit option to the run command to get subunit output * Use an error object that consistently returns unicode, regardless of locale * Changes suggested in review * New revision * New revision * Fixed outpit encoding when stream has encoding set to None * Handling non-ascii characters in output * Add config option to set filter tags * \* Test partitioning now handles a corner case where multiple tests have a reported duration of 0. Previously they could all accumulate into one partition, now they split across partitions (the length of a partition is used as a tie breaker if two partitions have the same duration). (Robert Collins, #914359) * Show a delta between the last run and the run before it on 'testr load', 'testr run' and 'testr last' * Show a plus when there are more tests * Add a 'slowest' command that shows the slowest tests from the latest run * Settle for what we have now * Maybe cleaner * Fix documentation * Put in research findings * Flag the autotimer extra code * New TODO * Already-done TODO * Show the diff between last and the one previous run * Include failures delta * Include test run delta * Time delta information * Flakes * Memory repository wasn't including time, which made it really confusing to get time information from previous test runs * Pass through previous run * Test to verify behaviour on empty repo * Make the base UI a summary result * Create a SummarizingResult separate from the UI * Extend make\_result to accept previous run * Convenience for getting the last run * Add get\_id to TestRun interface * Display test run time after loading * Mark upstreaming * Change the test filter to forward time results even for filtered tests * Make the tests correspond closer to what they need to test * Extract timedelta\_to\_seconds helper * TODOs * End with new line * Actually use output\_summary. Update all of the tests for its new API * No delta test for values * Combining test to make sure * Support other values. Change the API so that we pass in tests & time stuff explicitly * Actually hook up output\_summary * Handling for number of tests run and time taken * Start of the testing structure for summary code * Put output\_summary in the UI interface * Flakes * Fix a NameError in memory.py * Push the implementation of gathering test ids in to the repo * Add an option to show all rows * Add another XXX * Format the times such that the decimal places are aligned * Limit the number of rows shown by default * Add a header to the table * First steps to having a "slowest" command * Memory repository was only storing integer time. Make it store the actual given time. (Comes with test.) * Add a test for jelmer's fix * \* The test 'test\_outputs\_results\_to\_stdout' was sensitive to changes in testtools and has been made more generic. (Robert Collins) * \* A horrible thinko in the testrepository test suite came to light and has been fixed. How the tests ever ran is a mystery. (Robert Collins, #881497) * Fix up addUnexpectedSuccess * Refactor the support for counting tests observed vs shown * \* Python2.7 changed the interface for DBM.update, this has been worked around. (Robert Collins, #775214) * And open up 0.0.6 * Mark current version as 0.0.5 * \* \`\`testr init-repo\`\` now has a \`\`--force-init\`\` option which when provided will cause a repository to be created just-in-time. (Jonathan Lange) * Use assertThat * Restore NEWS item lost in conflict * Add a '--force-init' option and only initialize repo on load if it's passed * Variable expansion no longer does python \ escape expansion. (Robert Collins, #694800) * Make setup.py smoke test more specific again as requested in review * Use str.replace not re.sub for testcommand interpolation to paper over design flaw * Fix four of the remaining six failures here * Add repository backend for samba buildfarm * Use test timing data to allocate test partitions in --parallel * Refactor to make it possible to use repository test times in parallel test partitioning * The \`\`testrepository.repository.Repository\`\` interface now tracks test times for use in estimating test run duration and parallel test partitioning. (Robert Collins) * Use parallel in self testing * \* \`\`testr load\`\` and \`\`testr run\`\` now have a flag \`\`--partial\`\`. When set this will cause existing failures to be preserved. When not set, doing a load will reset existing failures. The \`\`testr run\`\` flag \`\`--failing\`\` implicitly sets \`\`--partial\`\` (so that an interrupted incremental test run does not incorrectly discard a failure record). The \`\`--partial\`\` flag exists so that deleted or renamed tests do not persist forever in the database. (Robert Collins) * Improve help * \* \`\`testr run\`\` no longer attempts to expand unknown variables. This permits the use of environmen variables to control the test run. For instance, ${PYTHON:-python} in the test\_command setting will run the command with $PYTHON or python if $PYTHON is not set. (Robert Collins, #595295) * Improve MANUAL for parallel testing * Parallel testing implemented * \* \`\`testr list-tests\`\` is a new command that will list the tests for a project when \`\`.testr.conf\`\` has been configured with a \`\`test\_list\_option\`\`. (Robert Collins) * Refactor: move responsibility for running tests into the TestCommand fixture * Woops, forgot the decorator.py file * \* \`\`testr run\`\` uses an in-process load rather than reinvoking testr. This should be faster on Windows and avoids the issue with running the wrong testr when PYTHONPATH but not PATH is set. (Robert Collins, #613129) * Add a decorating UI for in-process reinvocation of subcommands * Change model UI to permit passing in file objects as input streams * \* \`\`testr load\`\` now loads all input streams in parallel. This has no impact on the CLI as yet, but permits API users to load from parallel processes. (Robert Collins) * Some workarounds for Python 2.4 and windows issues * Fix typo in INSTALL.txt * \* \`\`testr run\`\` now uses a unique file name rather than hard coding failing.list - while not as clear, this permits concurrent testr invocations, or parallel testing from within testr, to execute safely. (Robert Collins) * Allow hyphenated command names * Change to using load-list in the testrepository .testr.conf now that it is available for testtools/subunit * \* \`\`testr run\`\` now resets the SIGPIPE handler to default - which is what most Unix processes expect. (Robert Collins) * \* \`\`testr run\`\` will now pass \`\`-q\`\` down to \`\`testr load\`\`. (Robert Collins, #529701) * \* \`\`testr run\`\` will now pass -d to the \`\`testr load\`\` invocation, so that running \`\`testr run -d /some/path\`\` will work correctly. (Robert Collins, #529698) * Unlink temporary failing files if the update of failing is interrupted * \* Updates to next-stream are done via a temporary file to reduce the chance of an empty next-stream being written to disk. (Robert Collins, #531664) * Nicer error message when reading next-stream fails * Pull out some common test code * \`\`setup.py\`\` will read the version number from PKG-INFO when it is running without a bzr tree : this makes it easier to snapshot without doing a release. (Jonathan Lange) * Use PKG-INFO if it's present * Also update INSTALL docs * \* The testrepository test suite depends on testtools 0.9.8. (Robert Collins) * testr load, last and failing now shows failures as they come * Review comments * Probably not * Tests for results module * Delete unused output\_run * Oops * Give the UI's TestResult object full responsibility for summing up the result of the test, * Use Wildcard to make matching UI output a little nicer * Wildcard object equal to everything * Refactor the CLITestResult tests so they don't care so much about how results are constructed * make\_result now takes a callable that returns the id of the test run. Not actually used yet * Open 0.0.5 * Release 0.0.4 * Record a bugfix that was skipped * Extend the UI interface to have support for listing tests. Use that support to list failing tests * NEWS message * Save some doc updates * Add a --list option to failing to display list of failures * Flakes * Remove output\_results, not needed * output\_results isn't needed * Change 'failing' to delegate to the test result object * output\_run doesn't need the output stream any more * Change last to have the same structure as load, delegating to the UI's TestResult. Makes last() display output incrementally too * Unnecessary variable * As far as I can tell, this ought to display results incrementally * Minor rephrasing to reduce the size of the change * Use make\_result internally * Add make\_result to the UI contract * Flakes * Display errors as soon as the ui finds out about them * Extract out the logic for formatting errors * Flakes * Cleanups * Expand tilde when initializing and opening file repositories * Initialise the repository if we cannot find it * Interface documentation. Because I care * Create a RepositoryNotFound exception and raise it when we cannot open a repository * Display values in foo=bar, baz=qux form, making it easier to match names to values * Remove unnecessary lines * Minor docstring improvement for Command * Open 0.0.4 for development * Release 0.0.3 * Add a .testr.conf - dogfooding ftw * \`\`run\`\` can also supply test ids on the command, for test runners that want that * \`\`run\`\` also passes arguments and options down to the child process * New argument type \`\`StringArgument\`\` for use when a supplied argument is just a string, rather than a typed argument * New subcommand \`\`run\`\` added which reads a .testr.conf file to figure out how to run tests with subunit output. It then runs them and pipes into testr load. This allows simpler integration and permits a programming interface so that tools like Tribunal/Eclipe etc can refresh tests in a testrepository * \`\`failing\`\` now correctly calls \`\`repository.get\_failing\`\` and will this track all seen failures rather than just the latest observed failures * Add a indirected subprocess.Popen to permit testing of the planned run command * Fix last on a new repository * Make failing track just failures properly * Tweak MANUAL.txt docs * Add a NEWS file and tweak MANUAL.txt * Output errors during run * Add error reporting facility * First cut at failing --subunit [shows all tests so not right yet] * Add stream output support for the UI * Allow commands to add options * Really add a 'failing' command * Open 0.0.3 * 0.0.2: include all commands * Make README.txt actual ReST * Open 0.0.2 * Cut a 0.0.1 release * Get a basic failing command going * Basic last command * Create an API for getting at inserted runs * Add repo.latest\_id() * Sketchy contract test for repo.count() * Add a small stats command * Add quickstart command * Make the no-commands help invoke the help command * Implement help command * Hook arg parsing into command execution * Add CommandArgument * Define parsing for arguments * Introduce an arguments package * More dev docs * Improve testrepository.tests.test\_suite to be more clear * Design docs * Add an output\_rest method to UI * Remove unneeded import * Add a command commands * Add output\_table to ui * Add commands.iter\_commands * Separate StubPackages should use different temp dirs * Close the loop on the test failure output from load * Preserve timing data from loaded tests * Show failures when loading tests * Add ui.output\_results() * Change ui.model to note what sort of output in preparation for output\_results * Show skip counts too * Show the run id too * Report on failures * Enforce per-stream output in load * Output the test count from load * Add a test that load -q doesn't output anything * Add a very rudimentary structured output facility * Add access to options and a global quiet option * Make stopTestRun return a db reference * Return stream status from load, and change make to use testr itself * Create an explicit RepositoryFactory concept, a load command and implement opening of file based repositories * Python2.4 does not have NamedTemporaryFile...delete=False, so use mkstemp directly * Basic repository can-store-runs * Teach UI about -d HERE and the concept of 'here' * Outline streams API for ui objects, and tell ui objects about their command * Change direct use of file.Repository to use a factory in the init command, and provide a repository\_factory to all commands * Make repository initialisation a consistent interface on the Repository class * Get the path to the repository from os.getcwd * Finish minimal wiring up of Command.run * run\_argv returns the result of command.execute() * Command line commands are run with a CLI UI * No commands -> some blurb * Find commands after options * Start on run\_argv * Have initialise actually do something * Start on the init command * Switch to using Popen for test\_setup * Add IRC channel to README.txt * Outline the UI module and it's responsibilities * Add command lookup logic * Create testr executable * Install a testr script * Add CI link to README.txt * Test that setup.py works * Sync README with homepage notes * Ignore test.xml (output from make check-xml) * More docs, CI system info and a check-xml to permit easy CI integration * Some more fictional docs * Base project infrastructure ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1482374682.0 stestr-3.0.0/LICENSE0000644000175000017500000002363700000000000020227 0ustar00computertrekercomputertreker00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5754259 stestr-3.0.0/PKG-INFO0000644000175000017500000002026400000000000020310 0ustar00computertrekercomputertreker00000000000000Metadata-Version: 2.1 Name: stestr Version: 3.0.0 Summary: A parallel Python test runner built around subunit Home-page: http://stestr.readthedocs.io/en/latest/ Author: Matthew Treinish Author-email: mtreinish@kortar.org License: Apache-2.0 Project-URL: Documentation, https://stestr.readthedocs.io Project-URL: Source Code, https://github.com/mtreinish/stestr Project-URL: Bug Tracker, https://github.com/mtreinish/stestr/issues Description: stestr ====== .. image:: https://img.shields.io/travis/mtreinish/stestr/master.svg?style=flat-square :target: https://travis-ci.org/mtreinish/stestr :alt: Build status .. image:: https://dev.azure.com/stestr/stestr/_apis/build/status/mtreinish.stestr?branchName=master :target: https://dev.azure.com/stestr/stestr/_build/latest?definitionId=1&branchName=master :alt: Azure DevOps build status .. image:: https://img.shields.io/coveralls/github/mtreinish/stestr/master.svg?style=flat-square :target: https://coveralls.io/github/mtreinish/stestr?branch=master :alt: Code coverage .. image:: https://img.shields.io/pypi/v/stestr.svg?style=flat-square :target: https://pypi.python.org/pypi/stestr :alt: Latest Version * Read this in other languages: `English`_, `日本語`_ * You can see the full rendered docs at: http://stestr.readthedocs.io/en/latest/ * The code of the project is on Github: https://github.com/mtreinish/stestr .. _English: https://github.com/mtreinish/stestr/blob/master/README.rst .. _日本語: https://github.com/mtreinish/stestr/blob/master/README_ja.rst .. note:: stestr v2.x.x release series will be the last series that supports Python 2. Support for Python 2.7 was dropped in stestr release 3.0.0. Overview -------- stestr is parallel Python test runner designed to execute `unittest`_ test suites using multiple processes to split up execution of a test suite. It also will store a history of all test runs to help in debugging failures and optimizing the scheduler to improve speed. To accomplish this goal it uses the `subunit`_ protocol to facilitate streaming and storing results from multiple workers. .. _unittest: https://docs.python.org/3/library/unittest.html .. _subunit: https://github.com/testing-cabal/subunit stestr originally started as a fork of the `testrepository`_ project. But, instead of being an interface for any test runner that used subunit, like testrepository, stestr concentrated on being a dedicated test runner for python projects. While stestr was originally forked from testrepository it is not backwards compatible with testrepository. At a high level the basic concepts of operation are shared between the two projects but the actual usage is not exactly the same. .. _testrepository: https://testrepository.readthedocs.org/en/latest Installing stestr ----------------- stestr is available via pypi, so all you need to do is run:: pip install -U stestr to get stestr on your system. If you need to use a development version of stestr you can clone the repo and install it locally with:: git clone https://github.com/mtreinish/stestr.git && pip install -e stestr which will install stestr in your python environment in editable mode for local development Using stestr ------------ After you install stestr to use it to run tests is pretty straightforward. The first thing you'll want to do is create a ``.stestr.conf`` file for your project. This file is used to tell stestr where to find tests and basic information about how tests are run. A basic minimal example of the contents of this is:: [DEFAULT] test_path=./project_source_dir/tests which just tells stestr the relative path for the directory to use for test discovery. This is the same as ``--start-directory`` in the standard `unittest discovery`_. .. _unittest discovery: https://docs.python.org/3/library/unittest.html#test-discovery After this file is created you should be all set to start using stestr to run tests. To run tests just use:: stestr run it will first create a results repository at ``.stestr/`` in the current working directory and then execute all the tests found by test discovery. If you're just running a single test (or module) and want to avoid the overhead of doing test discovery you can use the ``--no-discover``/``-n`` option to specify that test. For all the details on these commands and more thorough explanation of options see the stestr manual: https://stestr.readthedocs.io/en/latest/MANUAL.html Migrating from testrepository ----------------------------- If you have a project that is already using testrepository stestr's source repo contains a helper script for migrating your repo to use stestr. This script just creates a ``.stestr.conf`` file from a ``.testr.conf`` file. (assuming it uses a standard subunit.run test command format) To run this from your project repo just call:: $STESTR_SOURCE_DIR/tools/testr_to_stestr.py and you'll have a ``.stestr.conf`` created. Building a manpage ------------------ The stestr manual has been formatted so that it renders well as html and as a manpage. The html output and is autogenerated and published to: https://stestr.readthedocs.io/en/latest/MANUAL.html but the manpage has to be generated by hand. To do this you have to manually run sphinx-build with the manpage builder. This has been automated in a small script that should be run from the root of the stestr repository:: tools/build_manpage.sh which will generate the troff file in doc/build/man/stestr.1 which is ready to be packaged and or put in your system's man pages. Contributing ------------ To browse the latest code, see: https://github.com/mtreinish/stestr To clone the latest code, use: ``git clone https://github.com/mtreinish/stestr.git`` Guidelines for contribution are documented at: http://stestr.readthedocs.io/en/latest/developer_guidelines.html Use `github pull requests`_ to submit patches. Before you submit a pull request ensure that all the automated testing will pass by running ``tox`` locally. This will run the test suite and also the automated style rule checks just as they will in CI. If CI fails on your change it will not be able to merge. .. _github pull requests: https://help.github.com/articles/about-pull-requests/ Community --------- Besides Github interactions there is also a stestr IRC channel: #stestr on Freenode feel free to join to ask questions, or just discuss stestr. Platform: UNKNOWN Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Software Development :: Testing Classifier: Topic :: Software Development :: Quality Assurance Provides-Extra: sql Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585224131.0 stestr-3.0.0/README.rst0000644000175000017500000001353400000000000020704 0ustar00computertrekercomputertreker00000000000000stestr ====== .. image:: https://img.shields.io/travis/mtreinish/stestr/master.svg?style=flat-square :target: https://travis-ci.org/mtreinish/stestr :alt: Build status .. image:: https://dev.azure.com/stestr/stestr/_apis/build/status/mtreinish.stestr?branchName=master :target: https://dev.azure.com/stestr/stestr/_build/latest?definitionId=1&branchName=master :alt: Azure DevOps build status .. image:: https://img.shields.io/coveralls/github/mtreinish/stestr/master.svg?style=flat-square :target: https://coveralls.io/github/mtreinish/stestr?branch=master :alt: Code coverage .. image:: https://img.shields.io/pypi/v/stestr.svg?style=flat-square :target: https://pypi.python.org/pypi/stestr :alt: Latest Version * Read this in other languages: `English`_, `日本語`_ * You can see the full rendered docs at: http://stestr.readthedocs.io/en/latest/ * The code of the project is on Github: https://github.com/mtreinish/stestr .. _English: https://github.com/mtreinish/stestr/blob/master/README.rst .. _日本語: https://github.com/mtreinish/stestr/blob/master/README_ja.rst .. note:: stestr v2.x.x release series will be the last series that supports Python 2. Support for Python 2.7 was dropped in stestr release 3.0.0. Overview -------- stestr is parallel Python test runner designed to execute `unittest`_ test suites using multiple processes to split up execution of a test suite. It also will store a history of all test runs to help in debugging failures and optimizing the scheduler to improve speed. To accomplish this goal it uses the `subunit`_ protocol to facilitate streaming and storing results from multiple workers. .. _unittest: https://docs.python.org/3/library/unittest.html .. _subunit: https://github.com/testing-cabal/subunit stestr originally started as a fork of the `testrepository`_ project. But, instead of being an interface for any test runner that used subunit, like testrepository, stestr concentrated on being a dedicated test runner for python projects. While stestr was originally forked from testrepository it is not backwards compatible with testrepository. At a high level the basic concepts of operation are shared between the two projects but the actual usage is not exactly the same. .. _testrepository: https://testrepository.readthedocs.org/en/latest Installing stestr ----------------- stestr is available via pypi, so all you need to do is run:: pip install -U stestr to get stestr on your system. If you need to use a development version of stestr you can clone the repo and install it locally with:: git clone https://github.com/mtreinish/stestr.git && pip install -e stestr which will install stestr in your python environment in editable mode for local development Using stestr ------------ After you install stestr to use it to run tests is pretty straightforward. The first thing you'll want to do is create a ``.stestr.conf`` file for your project. This file is used to tell stestr where to find tests and basic information about how tests are run. A basic minimal example of the contents of this is:: [DEFAULT] test_path=./project_source_dir/tests which just tells stestr the relative path for the directory to use for test discovery. This is the same as ``--start-directory`` in the standard `unittest discovery`_. .. _unittest discovery: https://docs.python.org/3/library/unittest.html#test-discovery After this file is created you should be all set to start using stestr to run tests. To run tests just use:: stestr run it will first create a results repository at ``.stestr/`` in the current working directory and then execute all the tests found by test discovery. If you're just running a single test (or module) and want to avoid the overhead of doing test discovery you can use the ``--no-discover``/``-n`` option to specify that test. For all the details on these commands and more thorough explanation of options see the stestr manual: https://stestr.readthedocs.io/en/latest/MANUAL.html Migrating from testrepository ----------------------------- If you have a project that is already using testrepository stestr's source repo contains a helper script for migrating your repo to use stestr. This script just creates a ``.stestr.conf`` file from a ``.testr.conf`` file. (assuming it uses a standard subunit.run test command format) To run this from your project repo just call:: $STESTR_SOURCE_DIR/tools/testr_to_stestr.py and you'll have a ``.stestr.conf`` created. Building a manpage ------------------ The stestr manual has been formatted so that it renders well as html and as a manpage. The html output and is autogenerated and published to: https://stestr.readthedocs.io/en/latest/MANUAL.html but the manpage has to be generated by hand. To do this you have to manually run sphinx-build with the manpage builder. This has been automated in a small script that should be run from the root of the stestr repository:: tools/build_manpage.sh which will generate the troff file in doc/build/man/stestr.1 which is ready to be packaged and or put in your system's man pages. Contributing ------------ To browse the latest code, see: https://github.com/mtreinish/stestr To clone the latest code, use: ``git clone https://github.com/mtreinish/stestr.git`` Guidelines for contribution are documented at: http://stestr.readthedocs.io/en/latest/developer_guidelines.html Use `github pull requests`_ to submit patches. Before you submit a pull request ensure that all the automated testing will pass by running ``tox`` locally. This will run the test suite and also the automated style rule checks just as they will in CI. If CI fails on your change it will not be able to merge. .. _github pull requests: https://help.github.com/articles/about-pull-requests/ Community --------- Besides Github interactions there is also a stestr IRC channel: #stestr on Freenode feel free to join to ask questions, or just discuss stestr. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585227405.0 stestr-3.0.0/README_ja.rst0000644000175000017500000002022400000000000021350 0ustar00computertrekercomputertreker00000000000000stestr(日本語訳) =================== .. image:: https://img.shields.io/travis/mtreinish/stestr/master.svg?style=flat-square :target: https://travis-ci.org/mtreinish/stestr :alt: Build status .. image:: https://dev.azure.com/stestr/stestr/_apis/build/status/mtreinish.stestr?branchName=master :target: https://dev.azure.com/stestr/stestr/_build/latest?definitionId=1&branchName=master :alt: Azure DevOps build status .. image:: https://img.shields.io/coveralls/github/mtreinish/stestr/master.svg?style=flat-square :target: https://coveralls.io/github/mtreinish/stestr?branch=master :alt: Code coverage .. image:: https://img.shields.io/pypi/v/stestr.svg?style=flat-square :target: https://pypi.python.org/pypi/stestr :alt: Latest Version * 他の言語で読む場合はこちら: `English`_, `日本語`_ * フルレンダリングされたドキュメントはこちら: http://stestr.readthedocs.io/en/latest/ * プロジェクトのコードは GitHub にあります: https://github.com/mtreinish/stestr .. _English: https://github.com/mtreinish/stestr/blob/master/README.rst .. _日本語: https://github.com/mtreinish/stestr/blob/master/README_ja.rst .. note:: stestr v2.x.x リリースシリーズは、Python 2 をサポートする最後のシリ ーズとなります。Python 2.7のサポートは「stestr リリース 3.0.0」 にて打ち切られました。 概要 ---- stestr は、パラレル Python テスト実行プログラムであり、一つのテストスイート を、分割実行するために、複数プロセスを使い、 `unittest`_ テストスイートを、 実行するようデザインされています。また、実行失敗のデバッグや実行速度改善に向け たスケジューラ最適化のために、すべてのテスト実行履歴を保存しています。この目標 達成のため、stestrでは、 `subunit`_ プロトコルを使用し、ストリーミングや、 複数ワーカーからの結果を保存することを容易にしています。 .. _unittest: https://docs.python.org/3/library/unittest.html .. _subunit: https://github.com/testing-cabal/subunit stestr は、元々 `testrepository`_ プロジェクトのフォークとして始まりました。 しかし、subunit を使用する testrepository のようなあらゆるテストランナー インターフェースとなる代わりに、stestr は、python プロジェクトに特化・集中 したテストランナーです。stestr は、元々 testrepository からフォークしました が、testrepository との後方互換性はありません。高いレベルでの基本的な実行 コンセプトは、それら2つのプロジェクトの間で共有されているものの、実際の使用法 は、完全に同一というわけでありません。 .. _testrepository: https://testrepository.readthedocs.org/en/latest stestr のインストール ----------------------- stestr は、pypi 経由で利用可能です。そのため、以下を実行するだけで:: pip install -U stestr あなたのシステムに、stestr を取得することができます。もし、開発バージョンの stestr を使う必要があれば、リポジトリをクローンし、ローカルにインストール することができます:: git clone https://github.com/mtreinish/stestr.git && pip install -e stestr この操作で、stestr をあなたの python 環境のローカル開発環境に対し、編集可能 モードでインストールできます。 stestr の利用 ----------------- stestr のインストール後、テスト実行のために使う方法は、とても簡単です。まずはじめに、 ``.stestr.conf`` ファイルをあなたのプロジェクトのために作成します。この ファイルは、「どこにテストがあるのか」「どうやってテストを実行する のか」という基本的な情報を stestr に伝えます。基本最小限の内容例としては次の ようなものとなります:: [DEFAULT] test_path=./project_source_dir/tests この記述は、テスト探索のために使われるディレクトリの相対パスを、stestr に伝え ます。これは、標準的な `unittest discovery`_ の ``--start-directory`` と 同様です。 .. _unittest discovery: https://docs.python.org/3/library/unittest.html#test-discovery このファイルを作成すれば、stestr を使い始めるためにやるべきことはすべて完了 です。テストを実行するためには、単に次のように使うだけです:: stestr run これにより、まず、結果を保持するためのリポジトリが、カレントワーキング ディレクトリの ``.stestr/`` に作成され、テスト探索により見つかったテストが すべて実行されます。もし、あなたが、単にひとつのテスト(あるいはモジュール)を 実行し、テスト探索によるオーバーヘッドを避けたいのであれば、``--no-discover`` もしくは ``-n`` オプションをそのテストに対して指定し、実行することにより 可能です。 これらのコマンドの詳細は、さらなるオプションの説明は、stestr マニュアルを 参照してください: https://stestr.readthedocs.io/en/latest/MANUAL.html testrepository からの移行 ----------------------------- もし、testrepository を既に使用しているプロジェクトを持っているのであれば、 stestr のソースリポジトリには、あなたのリポジトリを stestr を利用するように 移行するための、ヘルパースクリプトがあります。このスクリプトは、単に、 ``.testr.conf`` ファイルから、 ``.stestr.conf`` ファイルを作成します。 (標準的な subunit.run テストコマンド形式を利用していることを想定しています) これを実行するためには、あなたのプロジェクトリポジトリで、以下を実行します:: $STESTR_SOURCE_DIR/tools/testr_to_stestr.py これにより、 ``.stestr.conf`` が作成されます。 manpage の生成 ------------------ stestr マニュアルは、htmlと同様に、manpage としてもレンダリングするために整形 されています。html 出力物と自動生成され公開されているものはこちらです: https://stestr.readthedocs.io/en/latest/MANUAL.html しかしながら、その manpage は、手動で生成する必要があります。このためには、手動で sphinx-build コマンドを manpage builder とともに実行する必要があります。これは、簡単なスクリプトで 自動化されており、 stestr リポジトリのルートディレクトリで以下を実行します:: tools/build_manpage.sh これにより、troff ファイルが doc/build/man/stestr.1 に作成され、それは、 パッケージ可能で、あなたのシステムの man page としても配置可能です。 コントリビューション方法 ------------------------ 最新コードの参照: https://github.com/mtreinish/stestr 最新コードのクローン: ``git clone https://github.com/mtreinish/stestr.git`` コントリビューションのガイドラインドキュメント: http://stestr.readthedocs.io/en/latest/developer_guidelines.html パッチを出すためには、`github pull requests`_ を使用してください。 プルリクエストを出す前には、手元の環境で ``tox`` を実行して、すべての自動 テストがパスすることを確認してください。これは、CI環境で実行されるものと同様の テストスイートならびに、自動スタイルチェックを実行します。もし、あなたの変更に より、CI が fail となった場合、その変更はマージすることができません。 .. _github pull requests: https://help.github.com/articles/about-pull-requests/ コミュニティ ------------- GitHub でのやり取りに加え、stestr の IRC チャネルもあります: Freenode の #stestr チャネル stestr に関する質問、もしくは議論をしていますので、気軽に参加してください。 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/azure-pipelines.yml0000644000175000017500000000304400000000000023047 0ustar00computertrekercomputertreker00000000000000# Python package # Create and test a Python package on multiple Python versions. # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: # https://docs.microsoft.com/azure/devops/pipelines/languages/python trigger: - master jobs: - job: 'Windows_Tests' pool: {vmImage: 'vs2017-win2016'} strategy: matrix: Python35: python.version: '3.5' Python36: python.version: '3.6' Python37: python.version: '3.7' Python38: python.version: '3.8' steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(python.version)' displayName: 'Use Python $(python.version)' - script: choco install vcpython27 --yes condition: eq(variables['python.version'], '2.7') displayName: 'Install vcpython27' - script: | python -m pip install --upgrade pip pip install -U tox displayName: 'Install dependencies' - script: tox -e py displayName: 'Run Tox' - job: 'macOS_Tests' pool: {vmImage: 'macOS-10.14'} strategy: matrix: Python35: python.version: '3.5' Python36: python.version: '3.6' Python37: python.version: '3.7' Python38: python.version: '3.8' steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(python.version)' displayName: 'Use Python $(python.version)' - script: | python -m pip install --upgrade pip pip install -U tox displayName: 'Install dependencies' - script: tox -e py displayName: 'Run Tox' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.2720988 stestr-3.0.0/doc/0000755000175000017500000000000000000000000017754 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585232236.565426 stestr-3.0.0/doc/source/0000755000175000017500000000000000000000000021254 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1518910744.0 stestr-3.0.0/doc/source/CONTRIBUTING.rst0000644000175000017500000000125300000000000023716 0ustar00computertrekercomputertreker00000000000000Contributing ============ To browse the latest code, see: https://github.com/mtreinish/stestr To clone the latest code, use: ``git clone https://github.com/mtreinish/stestr.git`` Guidelines for contribution are documented at: http://stestr.readthedocs.io/en/latest/developer_guidelines.html Use `github pull requests`_ to submit patches. Before you submit a pull request ensure that all the automated testing will pass by running ``tox`` locally. This will run the test suite and also the automated style rule checks just as they will in CI. If CI fails on your change it will not be able to merge. .. _github pull requests: https://help.github.com/articles/about-pull-requests/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/doc/source/MANUAL.rst0000644000175000017500000005647300000000000023002 0ustar00computertrekercomputertreker00000000000000.. _manual: stestr user manual ================== Usage ----- .. autoprogram-cliff:: stestr.cli.StestrCLI :application: stestr .. autoprogram-cliff:: stestr.cm :application: stestr Overview -------- stestr is an application for running and tracking test results. Any test run that can be represented as a subunit stream can be inserted into a repository. However, the test running mechanism assumes python is being used. It is originally forked from the testrepository project so the usage is similar. A typical basic example workflow is:: # Create a store to manage test results in. $ stestr init # Do a test run $ stestr run Most commands in testr have comprehensive online help, and the commands:: $ stestr --help $ stestr [command] --help Will be useful to explore the system. Configuration ------------- To configure stestr for a project you can write a stestr configuration file. This lets you set basic information about how tests are run for a project. By default the config file needs to be ``.stestr.conf`` in the same directory that stestr is run from, normally the root of a project's repository. However, the ``--config``/``-c`` CLI argument can specify an alternate path for it. The 2 most important options in the stestr config file are ``test_path`` and ``top_dir``. These 2 options are used to set the `unittest discovery`_ options for stestr. (test_path is the same as ``--start-directory`` and top_dir is the same as ``--top-level-directory`` in the doc) Only test_path is a required field in the config file, if top_dir is not specified it defaults to ``./``. It's also worth noting that shell variables for these 2 config options (and only these 2 options) are expanded on platforms that have a shell. This enables you to have conditional discovery paths based on your environment. .. _unittest discovery: https://docs.python.org/3/library/unittest.html#test-discovery For example, having a config file like:: [DEFAULT] test_path=${TEST_PATH:-./foo/tests} will let you override the discovery start path using the TEST_PATH environment variable. A full example config file is:: [DEFAULT] test_path=./project/tests top_dir=./ group_regex=([^\.]*\.)* The ``group_regex`` option is used to specify is used to provide a scheduler hint for how tests should be divided between test runners. See the :ref:`group_regex` section for more information on how this works. You can also specify the ``parallel_class=True`` instead of group_regex to group tests in the stestr scheduler together by class. Since this is a common use case this enables that without needing to memorize the complicated regex for ``group_regex`` to do this. There is also an option to specify all the options in the config file via the CLI. This way you can run stestr directly without having to write a config file and manually specify the test_path like above with the ``--test-path``/``-t`` CLI argument. Running tests ------------- To run tests the ``stestr run`` command is used. By default this will run all tests discovered using the discovery parameters in the stestr config file. If you'd like to avoid the overhead of test discovery and just manually execute a single test (test class, or module) you can do this using the ``--no-discover``/``-n`` option. For example:: $ stestr run --no-discover project.tests.test_foo.TestFoo you can also give it a file path and stestr will convert that to the proper python path under the covers. (assuming your project don't manually mess with import paths) For example:: $ stestr run --no-discover project/tests/test_foo.py will also bypass discovery and directly call the test runner on the module specified. Additionally you can specify a specific class or method within that file using ``::`` to specify a class and method. For example:: $ stestr run --no-discover project/tests/test_foo.py::TestFoo::test_method will skip discovery and directly call the test runner on the test method in the specified test class. Running with pdb '''''''''''''''' If you'd like to run pdb during the execution of the tests you should use the ``--pdb`` flag on ``stestr run``. This flag behaves the same way as the ``--no-discover`` flag except that it does not launch an external process to run the tests. This enables pdb to work as expected without any issues with the tradeoff that output from the test runner will occur after tests have finished execution. It's also worth noting that if you are using a fixture to capture stdout (which is a common practice for parallel test excecution) you'll likely want to disable that fixture when running with pdb. Those fixtures can often interfere with pdb's output and will sometimes capture output from pdb. Test Selection -------------- Arguments passed to ``stestr run`` are used to filter test ids that will be run. stestr will perform unittest discovery to get a list of all test ids and then apply each argument as a regex filter. Tests that match any of the given filters will be run. For example, if you called ``stestr run foo bar`` this will only run the tests that have a regex match with foo **or** a regex match with bar. stestr allows you do to do simple test exclusion via passing a rejection/black regexp:: $ stestr run --black-regex 'slow_tests|bad_tests' stestr also allow you to combine these arguments:: $ stestr run --black-regex 'slow_tests|bad_tests' ui\.interface Here first we selected all tests which matches to ``ui\.interface``, then we are dropping all test which matches ``slow_tests|bad_tests`` from the final list. stestr also allows you to specify a blacklist file to define a set of regexes to exclude. You can specify a blacklist file with the ``--blacklist-file``/``-b`` option, for example:: $ stestr run --blacklist-file $path_to_file The format for the file is line separated regex, with ``#`` used to signify the start of a comment on a line. For example:: # Blacklist File ^regex1 # Excludes these tests .*regex2 # exclude those tests The regexp used in the blacklist file or passed as argument, will be used to drop tests from the initial selection list. It will generate a list which will exclude any tests matching ``^regex1`` or ``.*regex2``. If a blacklist file is used in conjunction with the normal filters then the regex filters passed in as an argument regex will be used for the initial test selection, and the exclusion regexes from the blacklist file on top of that. The dual of the blacklist file is the whitelist file which will include any tests matching the regexes in the file. You can specify the path to the file with ``--whitelist-file``/``-w``, for example:: $ stestr run --whitelist-file $path_to_file The format for the file is more or less identical to the blacklist file:: # Whitelist File ^regex1 # Include these tests .*regex2 # include those tests However, instead of excluding the matches it will include them. It's also worth noting that you can use the test list option to dry run any selection arguments you are using. You just need to use ``stestr list`` with your selection options to do this, for example:: $ stestr list 'regex3.*' --blacklist-file blacklist.txt This will list all the tests which will be run by stestr using that combination of arguments. Adjusting test run output ------------------------- By default the ``stestr run`` command uses an output filter called subunit-trace. (as does the ``stestr last`` command) This displays the tests as they are finished executing, as well as their worker and status. It also prints aggregate numbers about the run at the end. You can read more about subunit-trace in the module doc: :ref:`subunit_trace`. However, the test run output is configurable, you can disable this output with the ``--no-subunit-trace`` flag which will be completely silent except for any failures it encounters. There is also the ``--color`` flag which will enable colorization with subunit-trace output. If you prefer to deal with the raw subunit yourself and run your own output rendering or filtering you can use the ``--subunit`` flag to output the result stream as raw subunit v2. There is also an ``--abbreviate`` flag available, when this is used a single character is printed for each test as it is executed. A ``.`` is printed for a successful test, a ``F`` for a failed test, and a ``S`` for a skipped test. In the default subunit-trace output any captured output to stdout and stderr is printed after test execution, for both successful and failed tests. However, in some cases printing these attachments on a successful tests is not the preferred behavior. You can use the ``--suppress-attachments`` flag to disable printing stdout or stderr attachments for successful tests. While by default attachments for captured stdout and stderr are printed, it is also possible that a test has other text attachments (a common example is python logging) which are not printed on successful test execution, only on failures. If you would like to have these attachments also printed for successful tests you can use the ``--all-attachments`` flag to print all text attachments on both successful and failed tests. Both ``--all-attachments`` and ``--suppress-attachments`` can not be set at the same time. If both are set in the user config file then the ``suppress-attachments`` flag will take priority and no attachments will be printed for successful tests. If either ``--suppress-attachments`` or ``--all-attachments`` is set via the CLI it will take precedence over matching options set in the user config file. Combining Test Results ---------------------- There is sometimes a use case for running a single test suite split between multiple invocations of the stestr run command. For example, running a subset of tests with a different concurrency. In these cases you can use the ``--combine`` flag on ``stestr run``. When this flag is specified stestr will append the subunit stream from the test run into the most recent entry in the repository. Alternatively, you can manually load the test results from a subunit stream into an existing test result in the repository using the ``--id``/``-i`` flag on the ``stestr load`` command. This will append the results from the input subunit stream to the specified id. Running previously failed tests ------------------------------- ``stestr run`` also enables you to run just the tests that failed in the previous run. To do this you can use the ``--failing`` argument. A common workflow using this is: #. Run tests (and some fail):: $ stestr run #. Fix currently broken tests - repeat until there are no failures:: $ stestr run --failing #. Do a full run to find anything that regressed during the reduction process:: $ stestr run Another common use case is repeating a failure that occurred on a remote machine (e.g. during a jenkins test run). There are a few common ways to do approach this. Firstly, if you have a subunit stream from the run you can just load it:: $ stestr load < failing-stream and then run the tests which failed from that loaded run:: $ stestr run --failing If using a file type repository (which is the default) the streams generated by test runs are in the repository path, which defaults to *.stestr/* in the working directory, and stores the stream in a file named for their run id - e.g. .stestr/0 is the first run. .. note:: For right now these files are stored in the subunit v1 format, but all of the stestr commands, including load, only work with the subunit v2 format. This can be converted using the **subunit-1to2** tool in the `python-subunit`_ package. .. _python-subunit: https://pypi.org/project/python-subunit/ If you have access to the remote machine you can also get the subunit stream by running:: $ stestr last --subunit > failing-stream This is often a bit easier than trying to manually pull the stream file out of the .stestr directory. (also it will be in the subunit v2 format already) If you do not have a stream or access to the machine you may be able to use a list file. If you can get a file that contains one test id per line, you can run the named tests like this:: $ stestr run --load-list FILENAME This can also be useful when dealing with sporadically failing tests, or tests that only fail in combination with some other test - you can bisect the tests that were run to get smaller and smaller (or larger and larger) test subsets until the error is pinpointed. ``stestr run --until-failure`` will run your test suite again and again and again stopping only when interrupted or a failure occurs. This is useful for repeating timing-related test failures. Listing tests ------------- To see a list of tests found by stestr you can use the ``stestr list`` command. This will list all tests found by discovery. You can also use this to see what tests will be run by a given stestr run command. For instance, the tests that ``stestr run myfilter`` will run are shown by ``stestr list myfilter``. As with the run command, arguments to list are used to regex filter the tests. Parallel testing ---------------- stestr lets you run tests in parallel by default. So, it actually does this by def:: $ stestr run This will first list the tests, partition the tests into one partition per CPU on the machine, and then invoke multiple test runners at the same time, with each test runner getting one partition. Currently the partitioning algorithm is simple round-robin for tests that stestr has not seen run before, and equal-time buckets for tests that stestr has seen run. To determine how many CPUs are present in the machine, stestr will use the multiprocessing Python module On operating systems where this is not implemented, or if you need to control the number of workers that are used, the ``--concurrency`` option will let you do so:: $ stestr run --concurrency=2 When running tests in parallel, stestr adds a tag for each test to the subunit stream to show which worker executed that test. The tags are of the form ``worker-%d`` and are usually used to reproduce test isolation failures, where knowing exactly what test ran on a given worker is important. The %d that is substituted in is the partition number of tests from the test run - all tests in a single run with the same worker-N ran in the same test runner instance. To find out which slave a failing test ran on just look at the 'tags' line in its test error:: ====================================================================== label: testrepository.tests.ui.TestDemo.test_methodname tags: foo worker-0 ---------------------------------------------------------------------- error text And then find tests with that tag:: $ stestr last --subunit | subunit-filter -s --xfail --with-tag=worker-3 | subunit-ls > slave-3.list .. _group_regex: Grouping Tests -------------- In certain scenarios you may want to group tests of a certain type together so that they will be run by the same worker process. The ``group_regex`` option in the stestr config file permits this. When set, tests are grouped by the entire matching portion of the regex. The match must begin at the start of the string. Tests with no match are not grouped. For example, setting the following option in the stestr config file will group tests in the same class together (the last '.' splits the class and test method):: group_regex=([^\.]+\.)+ However, because grouping tests at the class level is a common use case there is also a config option, ``parallel_class``, to do this. For example, you can use:: parallel_class=True and it will group tests in the same class together. .. note:: This ``parallel_class`` option takes priority over the ``group_regex`` option. And if both on the CLI and in the config are set, we use the option on the CLI not in a config file. For example, ``--group-regex`` on the CLI and ``parallel-class`` in a config file are set, ``--group-regex`` is higer priority than ``parallel-class`` in this case. Test Scheduling --------------- By default stestr schedules the tests by first checking if there is any historical timing data on any tests. It then sorts the tests by that timing data loops over the tests in order and adds one to each worker that it will launch. For tests without timing data, the same is done, except the tests are in alphabetical order instead of based on timing data. If a group regex is used the same algorithm is used with groups instead of individual tests. However there are options to adjust how stestr will schedule tests. The primary option to do this is to manually schedule all the tests run. To do this use the ``--worker-file`` option for stestr run. This takes a path to a yaml file that instructs stestr how to run tests. It is formatted as a list of dicts with a single element each with a list describing the tests to run on each worker. For example:: - worker: - regex 1 - worker: - regex 2 - regex 3 would create 2 workers. The first would run all tests that match regex 1, and the second would run all tests that match regex 2 or regex 3. In addition if you need to mix manual scheduling and the standard scheduling mechanisms you can accomplish this with the ``concurrency`` field on a worker in the yaml. For example, building on the previous example:: - worker: - regex 1 - worker: - regex 2 - regex 3 - worker: - regex 4 concurrency: 3 In this case the tests that match regex 4 will be run against 3 workers and the tests will be partitioned across those workers with the normal scheduler. This includes respecting the other scheduler options, like ``group_regex`` or ``--random``. There is also an option on ``stestr run``, ``--random`` to randomize the order of tests as they are passed to the workers. This is useful in certain use cases, especially when you want to test isolation between test cases. User Config Files ----------------- If you prefer to have a different default output or setting for a particular command stestr enables you to write a user config file to overide the defaults for some options on some commands. By default stestr will look for this config file in ``~/.stestr.yaml`` and ``~/.config/stestr.yaml`` in that order. You can also specify the path to a config file with the ``--user-config`` parameter. The config file is a yaml file that has a top level key for the command and then a sub key for each option. For an example, a fully populated config file that changes the default on all available options in the config file is:: run: concurrency: 42 # This can be any integer value >= 0 random: True no-subunit-trace: True color: True abbreviate: True slowest: True suppress-attachments: True all-attachments: True failing: list: True last: no-subunit-trace: True color: True suppress-attachments: True all-attachments: True load: force-init: True subunit-trace: True color: True abbreviate: True suppress-attachments: True all-attachments: True If you choose to use a user config file you can specify any subset of the options and commands you choose. Automated test isolation bisection ---------------------------------- As mentioned above, its possible to manually analyze test isolation issues by interrogating the repository for which tests ran on which worker, and then creating a list file with those tests, re-running only half of them, checking the error still happens, rinse and repeat. However that is tedious. stestr can perform this analysis for you:: $ stestr run --analyze-isolation will perform that analysis for you. The process is: 1. The last run in the repository is used as a basis for analysing against - tests are only cross checked against tests run in the same worker in that run. This means that failures accrued from several different runs would not be processed with the right basis tests - you should do a full test run to seed your repository. This can be local, or just stestr load a full run from your Jenkins or other remote run environment. 2. Each test that is currently listed as a failure is run in a test process given just that id to run. 3. Tests that fail are excluded from analysis - they are broken on their own. 4. The remaining failures are then individually analysed one by one. 5. For each failing, it gets run in one work along with the first 1/2 of the tests that were previously run prior to it. 6. If the test now passes, that set of prior tests are discarded, and the other half of the tests is promoted to be the full list. If the test fails then other other half of the tests are discarded and the current set promoted. 7. Go back to running the failing test along with 1/2 of the current list of priors unless the list only has 1 test in it. If the failing test still failed with that test, we have found the isolation issue. If it did not then either the isolation issue is racy, or it is a 3-or-more test isolation issue. Neither of those cases are automated today. Forcing isolation ----------------- Sometimes it is useful to force a separate test runner instance for each test executed. The ``--isolated`` flag will cause stestr to execute a separate runner per test:: $ stestr run --isolated In this mode stestr first determines tests to run (either automatically listed, using the failing set, or a user supplied load-list), and then spawns one test runner per test it runs. To avoid cross-test-runner interactions concurrency is disabled in this mode. ``--analyze-isolation`` supersedes ``--isolated`` if they are both supplied. Repositories ------------ stestr uses a data repository to keep track of test previous test runs. There are different backend types that each offer different advantages. There are currently 2 repository types to choose from, **file** and **sql**. You can choose which repository type you want with the ``--repo-type``/``-r`` cli flag. **file** is the current default. You can also specify an alternative repository with the ``--repo-url``/``-u`` cli flags. The default value for a **file** repository type is to use the directory: ``$CWD/.stestr``. For a **sql** repository type is to use a sqlite database located at: ``$CWD/.stestr.sqlite``. .. note:: Make sure you put these flags before the cli subcommand .. note:: Different repository types that use local storage will conflict with each other in the same directory. If you initialize one repository type and then try to use another in the same directory, it will not work. File '''' The default stestr repository type has a very simple disk structure. It contains the following files: * format: This file identifies the precise layout of the repository, in case future changes are needed. * next-stream: This file contains the serial number to be used when adding another stream to the repository. * failing: This file is a stream containing just the known failing tests. It is updated whenever a new stream is added to the repository, so that it only references known failing tests. * #N - all the streams inserted in the repository are given a serial number. SQL ''' This is an experimental repository backend, that is based on the `subunit2sql`_ library. It's currently still under development and should be considered experimental for the time being. Eventually it'll replace the File repository type .. note:: The sql repository type requirements are not installed by default. They are listed under the 'sql' setuptools extras. You can install them with pip by running: ``pip install 'stestr[sql]'`` .. _subunit2sql: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585224131.0 stestr-3.0.0/doc/source/README.rst0000644000175000017500000001353400000000000022751 0ustar00computertrekercomputertreker00000000000000stestr ====== .. image:: https://img.shields.io/travis/mtreinish/stestr/master.svg?style=flat-square :target: https://travis-ci.org/mtreinish/stestr :alt: Build status .. image:: https://dev.azure.com/stestr/stestr/_apis/build/status/mtreinish.stestr?branchName=master :target: https://dev.azure.com/stestr/stestr/_build/latest?definitionId=1&branchName=master :alt: Azure DevOps build status .. image:: https://img.shields.io/coveralls/github/mtreinish/stestr/master.svg?style=flat-square :target: https://coveralls.io/github/mtreinish/stestr?branch=master :alt: Code coverage .. image:: https://img.shields.io/pypi/v/stestr.svg?style=flat-square :target: https://pypi.python.org/pypi/stestr :alt: Latest Version * Read this in other languages: `English`_, `日本語`_ * You can see the full rendered docs at: http://stestr.readthedocs.io/en/latest/ * The code of the project is on Github: https://github.com/mtreinish/stestr .. _English: https://github.com/mtreinish/stestr/blob/master/README.rst .. _日本語: https://github.com/mtreinish/stestr/blob/master/README_ja.rst .. note:: stestr v2.x.x release series will be the last series that supports Python 2. Support for Python 2.7 was dropped in stestr release 3.0.0. Overview -------- stestr is parallel Python test runner designed to execute `unittest`_ test suites using multiple processes to split up execution of a test suite. It also will store a history of all test runs to help in debugging failures and optimizing the scheduler to improve speed. To accomplish this goal it uses the `subunit`_ protocol to facilitate streaming and storing results from multiple workers. .. _unittest: https://docs.python.org/3/library/unittest.html .. _subunit: https://github.com/testing-cabal/subunit stestr originally started as a fork of the `testrepository`_ project. But, instead of being an interface for any test runner that used subunit, like testrepository, stestr concentrated on being a dedicated test runner for python projects. While stestr was originally forked from testrepository it is not backwards compatible with testrepository. At a high level the basic concepts of operation are shared between the two projects but the actual usage is not exactly the same. .. _testrepository: https://testrepository.readthedocs.org/en/latest Installing stestr ----------------- stestr is available via pypi, so all you need to do is run:: pip install -U stestr to get stestr on your system. If you need to use a development version of stestr you can clone the repo and install it locally with:: git clone https://github.com/mtreinish/stestr.git && pip install -e stestr which will install stestr in your python environment in editable mode for local development Using stestr ------------ After you install stestr to use it to run tests is pretty straightforward. The first thing you'll want to do is create a ``.stestr.conf`` file for your project. This file is used to tell stestr where to find tests and basic information about how tests are run. A basic minimal example of the contents of this is:: [DEFAULT] test_path=./project_source_dir/tests which just tells stestr the relative path for the directory to use for test discovery. This is the same as ``--start-directory`` in the standard `unittest discovery`_. .. _unittest discovery: https://docs.python.org/3/library/unittest.html#test-discovery After this file is created you should be all set to start using stestr to run tests. To run tests just use:: stestr run it will first create a results repository at ``.stestr/`` in the current working directory and then execute all the tests found by test discovery. If you're just running a single test (or module) and want to avoid the overhead of doing test discovery you can use the ``--no-discover``/``-n`` option to specify that test. For all the details on these commands and more thorough explanation of options see the stestr manual: https://stestr.readthedocs.io/en/latest/MANUAL.html Migrating from testrepository ----------------------------- If you have a project that is already using testrepository stestr's source repo contains a helper script for migrating your repo to use stestr. This script just creates a ``.stestr.conf`` file from a ``.testr.conf`` file. (assuming it uses a standard subunit.run test command format) To run this from your project repo just call:: $STESTR_SOURCE_DIR/tools/testr_to_stestr.py and you'll have a ``.stestr.conf`` created. Building a manpage ------------------ The stestr manual has been formatted so that it renders well as html and as a manpage. The html output and is autogenerated and published to: https://stestr.readthedocs.io/en/latest/MANUAL.html but the manpage has to be generated by hand. To do this you have to manually run sphinx-build with the manpage builder. This has been automated in a small script that should be run from the root of the stestr repository:: tools/build_manpage.sh which will generate the troff file in doc/build/man/stestr.1 which is ready to be packaged and or put in your system's man pages. Contributing ------------ To browse the latest code, see: https://github.com/mtreinish/stestr To clone the latest code, use: ``git clone https://github.com/mtreinish/stestr.git`` Guidelines for contribution are documented at: http://stestr.readthedocs.io/en/latest/developer_guidelines.html Use `github pull requests`_ to submit patches. Before you submit a pull request ensure that all the automated testing will pass by running ``tox`` locally. This will run the test suite and also the automated style rule checks just as they will in CI. If CI fails on your change it will not be able to merge. .. _github pull requests: https://help.github.com/articles/about-pull-requests/ Community --------- Besides Github interactions there is also a stestr IRC channel: #stestr on Freenode feel free to join to ask questions, or just discuss stestr. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585227405.0 stestr-3.0.0/doc/source/README_ja.rst0000644000175000017500000002022400000000000023415 0ustar00computertrekercomputertreker00000000000000stestr(日本語訳) =================== .. image:: https://img.shields.io/travis/mtreinish/stestr/master.svg?style=flat-square :target: https://travis-ci.org/mtreinish/stestr :alt: Build status .. image:: https://dev.azure.com/stestr/stestr/_apis/build/status/mtreinish.stestr?branchName=master :target: https://dev.azure.com/stestr/stestr/_build/latest?definitionId=1&branchName=master :alt: Azure DevOps build status .. image:: https://img.shields.io/coveralls/github/mtreinish/stestr/master.svg?style=flat-square :target: https://coveralls.io/github/mtreinish/stestr?branch=master :alt: Code coverage .. image:: https://img.shields.io/pypi/v/stestr.svg?style=flat-square :target: https://pypi.python.org/pypi/stestr :alt: Latest Version * 他の言語で読む場合はこちら: `English`_, `日本語`_ * フルレンダリングされたドキュメントはこちら: http://stestr.readthedocs.io/en/latest/ * プロジェクトのコードは GitHub にあります: https://github.com/mtreinish/stestr .. _English: https://github.com/mtreinish/stestr/blob/master/README.rst .. _日本語: https://github.com/mtreinish/stestr/blob/master/README_ja.rst .. note:: stestr v2.x.x リリースシリーズは、Python 2 をサポートする最後のシリ ーズとなります。Python 2.7のサポートは「stestr リリース 3.0.0」 にて打ち切られました。 概要 ---- stestr は、パラレル Python テスト実行プログラムであり、一つのテストスイート を、分割実行するために、複数プロセスを使い、 `unittest`_ テストスイートを、 実行するようデザインされています。また、実行失敗のデバッグや実行速度改善に向け たスケジューラ最適化のために、すべてのテスト実行履歴を保存しています。この目標 達成のため、stestrでは、 `subunit`_ プロトコルを使用し、ストリーミングや、 複数ワーカーからの結果を保存することを容易にしています。 .. _unittest: https://docs.python.org/3/library/unittest.html .. _subunit: https://github.com/testing-cabal/subunit stestr は、元々 `testrepository`_ プロジェクトのフォークとして始まりました。 しかし、subunit を使用する testrepository のようなあらゆるテストランナー インターフェースとなる代わりに、stestr は、python プロジェクトに特化・集中 したテストランナーです。stestr は、元々 testrepository からフォークしました が、testrepository との後方互換性はありません。高いレベルでの基本的な実行 コンセプトは、それら2つのプロジェクトの間で共有されているものの、実際の使用法 は、完全に同一というわけでありません。 .. _testrepository: https://testrepository.readthedocs.org/en/latest stestr のインストール ----------------------- stestr は、pypi 経由で利用可能です。そのため、以下を実行するだけで:: pip install -U stestr あなたのシステムに、stestr を取得することができます。もし、開発バージョンの stestr を使う必要があれば、リポジトリをクローンし、ローカルにインストール することができます:: git clone https://github.com/mtreinish/stestr.git && pip install -e stestr この操作で、stestr をあなたの python 環境のローカル開発環境に対し、編集可能 モードでインストールできます。 stestr の利用 ----------------- stestr のインストール後、テスト実行のために使う方法は、とても簡単です。まずはじめに、 ``.stestr.conf`` ファイルをあなたのプロジェクトのために作成します。この ファイルは、「どこにテストがあるのか」「どうやってテストを実行する のか」という基本的な情報を stestr に伝えます。基本最小限の内容例としては次の ようなものとなります:: [DEFAULT] test_path=./project_source_dir/tests この記述は、テスト探索のために使われるディレクトリの相対パスを、stestr に伝え ます。これは、標準的な `unittest discovery`_ の ``--start-directory`` と 同様です。 .. _unittest discovery: https://docs.python.org/3/library/unittest.html#test-discovery このファイルを作成すれば、stestr を使い始めるためにやるべきことはすべて完了 です。テストを実行するためには、単に次のように使うだけです:: stestr run これにより、まず、結果を保持するためのリポジトリが、カレントワーキング ディレクトリの ``.stestr/`` に作成され、テスト探索により見つかったテストが すべて実行されます。もし、あなたが、単にひとつのテスト(あるいはモジュール)を 実行し、テスト探索によるオーバーヘッドを避けたいのであれば、``--no-discover`` もしくは ``-n`` オプションをそのテストに対して指定し、実行することにより 可能です。 これらのコマンドの詳細は、さらなるオプションの説明は、stestr マニュアルを 参照してください: https://stestr.readthedocs.io/en/latest/MANUAL.html testrepository からの移行 ----------------------------- もし、testrepository を既に使用しているプロジェクトを持っているのであれば、 stestr のソースリポジトリには、あなたのリポジトリを stestr を利用するように 移行するための、ヘルパースクリプトがあります。このスクリプトは、単に、 ``.testr.conf`` ファイルから、 ``.stestr.conf`` ファイルを作成します。 (標準的な subunit.run テストコマンド形式を利用していることを想定しています) これを実行するためには、あなたのプロジェクトリポジトリで、以下を実行します:: $STESTR_SOURCE_DIR/tools/testr_to_stestr.py これにより、 ``.stestr.conf`` が作成されます。 manpage の生成 ------------------ stestr マニュアルは、htmlと同様に、manpage としてもレンダリングするために整形 されています。html 出力物と自動生成され公開されているものはこちらです: https://stestr.readthedocs.io/en/latest/MANUAL.html しかしながら、その manpage は、手動で生成する必要があります。このためには、手動で sphinx-build コマンドを manpage builder とともに実行する必要があります。これは、簡単なスクリプトで 自動化されており、 stestr リポジトリのルートディレクトリで以下を実行します:: tools/build_manpage.sh これにより、troff ファイルが doc/build/man/stestr.1 に作成され、それは、 パッケージ可能で、あなたのシステムの man page としても配置可能です。 コントリビューション方法 ------------------------ 最新コードの参照: https://github.com/mtreinish/stestr 最新コードのクローン: ``git clone https://github.com/mtreinish/stestr.git`` コントリビューションのガイドラインドキュメント: http://stestr.readthedocs.io/en/latest/developer_guidelines.html パッチを出すためには、`github pull requests`_ を使用してください。 プルリクエストを出す前には、手元の環境で ``tox`` を実行して、すべての自動 テストがパスすることを確認してください。これは、CI環境で実行されるものと同様の テストスイートならびに、自動スタイルチェックを実行します。もし、あなたの変更に より、CI が fail となった場合、その変更はマージすることができません。 .. _github pull requests: https://help.github.com/articles/about-pull-requests/ コミュニティ ------------- GitHub でのやり取りに加え、stestr の IRC チャネルもあります: Freenode の #stestr チャネル stestr に関する質問、もしくは議論をしていますので、気軽に参加してください。 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585232236.565426 stestr-3.0.0/doc/source/api/0000755000175000017500000000000000000000000022025 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585232236.565426 stestr-3.0.0/doc/source/api/commands/0000755000175000017500000000000000000000000023626 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501647450.0 stestr-3.0.0/doc/source/api/commands/__init__.rst0000644000175000017500000000014600000000000026120 0ustar00computertrekercomputertreker00000000000000.. _commands: stestr Commands =============== .. automodule:: stestr.commands.__init__ :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/commands/failing.rst0000644000175000017500000000017200000000000025771 0ustar00computertrekercomputertreker00000000000000.. _failing_command: stestr failing Command ====================== .. automodule:: stestr.commands.failing :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/commands/init.rst0000644000175000017500000000015600000000000025325 0ustar00computertrekercomputertreker00000000000000.. _init_command: stestr init Command =================== .. automodule:: stestr.commands.init :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/commands/last.rst0000644000175000017500000000015600000000000025325 0ustar00computertrekercomputertreker00000000000000.. _last_command: stestr last Command =================== .. automodule:: stestr.commands.last :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/commands/list.rst0000644000175000017500000000015600000000000025335 0ustar00computertrekercomputertreker00000000000000.. _list_command: stestr list Command =================== .. automodule:: stestr.commands.list :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/commands/load.rst0000644000175000017500000000015600000000000025301 0ustar00computertrekercomputertreker00000000000000.. _load_command: stestr load Command =================== .. automodule:: stestr.commands.load :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/commands/run.rst0000644000175000017500000000015200000000000025162 0ustar00computertrekercomputertreker00000000000000.. _run_command: stestr run Command ================== .. automodule:: stestr.commands.run :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/commands/slowest.rst0000644000175000017500000000017200000000000026060 0ustar00computertrekercomputertreker00000000000000.. _slowest_command: stestr slowest Command ====================== .. automodule:: stestr.commands.slowest :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/doc/source/api/config_file.rst0000644000175000017500000000055300000000000025026 0ustar00computertrekercomputertreker00000000000000.. _api_config_file: Configuration File Module ========================= This module is used to deal with anything related to the stestr config file. This includes actually parsing it, and also dealing with interpreting some of it's contents (like generating a test_processor based on a config file's contents). .. automodule:: stestr.config_file :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1483401835.0 stestr-3.0.0/doc/source/api/output.rst0000644000175000017500000000057300000000000024124 0ustar00computertrekercomputertreker00000000000000.. _api_output: The Output Module ================= This module provides functions for dealing with any output from stestr. This mostly just means helper functions to properly write output to stdout (or another file) Any function or class in this module that has a docstring is a stable interface and should be backwards compatible. .. automodule:: stestr.output :members: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5687592 stestr-3.0.0/doc/source/api/repository/0000755000175000017500000000000000000000000024244 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1483395719.0 stestr-3.0.0/doc/source/api/repository/abstract.rst0000644000175000017500000000021300000000000026575 0ustar00computertrekercomputertreker00000000000000.. _api_repository_abstract: Abstract Repository Class ========================= .. automodule:: stestr.repository.abstract :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1483395719.0 stestr-3.0.0/doc/source/api/repository/file.rst0000644000175000017500000000017100000000000025714 0ustar00computertrekercomputertreker00000000000000.. _api_repository_file: File Repository Type ==================== .. automodule:: stestr.repository.file :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1483395719.0 stestr-3.0.0/doc/source/api/repository/memory.rst0000644000175000017500000000020100000000000026277 0ustar00computertrekercomputertreker00000000000000.. _api_repository_memory: Memory Repository Type ====================== .. automodule:: stestr.repository.memory :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1485574959.0 stestr-3.0.0/doc/source/api/repository/sql.rst0000644000175000017500000000016600000000000025600 0ustar00computertrekercomputertreker00000000000000.. _api_repository_sql: SQL Repository Type ==================== .. automodule:: stestr.repository.sql :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1483399883.0 stestr-3.0.0/doc/source/api/scheduler.rst0000644000175000017500000000031300000000000024532 0ustar00computertrekercomputertreker00000000000000.. _api_scheduler: The Scheduler Module ==================== This module is used to deal with anything related to test scheduling/partitioning in stestr. .. automodule:: stestr.scheduler :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1483398977.0 stestr-3.0.0/doc/source/api/selection.rst0000644000175000017500000000027700000000000024552 0ustar00computertrekercomputertreker00000000000000.. _api_selection: Test Selection Module ===================== This module is used to deal with anything related to test selection in stestr. .. automodule:: stestr.selection :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501647450.0 stestr-3.0.0/doc/source/api/subunit_trace.rst0000644000175000017500000000014300000000000025424 0ustar00computertrekercomputertreker00000000000000.. _subunit_trace: Subunit Trace ============= .. automodule:: stestr.subunit_trace :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/doc/source/api/test_processor.rst0000644000175000017500000000231200000000000025633 0ustar00computertrekercomputertreker00000000000000.. _api_test_processor: Test Processor Module ===================== This module contains the definition of the ``TestProcessorFixture`` fixture class. This fixture is used for handling the actual spawning of worker processes for running tests, or listing tests. It is constructed as a `fixture`_ to handle the lifecycle of the test id list files which are used to pass test ids to the workers processes running the tests. .. _fixture: https://pypi.python.org/pypi/fixtures In the normal workflow a ``TestProcessorFixture`` get's returned by the :ref:`api_config_file`'s ``get_run_command()`` function. The config file parses the config file and the cli options to create a ``TestProcessorFixture`` with the correct options. This Fixture then gets returned to the CLI commands to enable them to run the commands. The ``TestProcessorFixture`` class is written to be fairly generic in the command it's executing. This is an artifact of being forked from testrepository where the test command is defined in the configuration file. In stestr the command is hard coded ``stestr.config_file`` module so this extra flexibility isn't really needed. API Reference ------------- .. automodule:: stestr.test_processor :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/doc/source/api.rst0000644000175000017500000000570100000000000022562 0ustar00computertrekercomputertreker00000000000000.. _api: Internal API Reference ====================== This document serves as a reference for the python API used in stestr. It should serve as a guide for both internal and external use of stestr components via python. The majority of the contents here are built from internal docstrings in the actual code. Repository ---------- .. toctree:: :maxdepth: 2 api/repository/abstract api/repository/file api/repository/memory api/repository/sql Commands -------- These modules are used for the operation of all the various subcommands in stestr. As of the 1.0.0 release each of these commands should be considered a stable interface that can be relied on externally. Each command module conforms to a basic format that is based on the `cliff`_ framework. The basic structure for these modules is the following three functions in each class:: def get_description(): """This function returns a string that is used for the subcommand help""" help_str = "A descriptive help string about the command" return help_str def get_parser(prog_name): """This function takes a parser and any subcommand arguments are defined here""" parser.add_argument(...) def take_action(parsed_args): """This is where the real work for the command is performed. This is the function that is called when the command is executed. This function is called being wrapped by sys.exit() so an integer return is expected that will be used for the command's return code. The arguments input parsed_args is the argparse.Namespace object from the parsed CLI options.""" return call_foo(...) .. _cliff: https://docs.openstack.org/cliff/latest/reference/index.html The command class will not work if all 3 of these function are not defined. However, to make the commands externally consumable each module also contains another public function which performs the real work for the command. Each one of these functions has a defined stable Python API signature with args and kwargs so that people can easily call the functions from other python programs. This function is what can be expected to be used outside of stestr as the stable interface. All the stable functions can be imported the command module directly:: from stestr import command def my_list(): command.list_command(...) .. toctree:: :maxdepth: 2 api/commands/__init__ api/commands/failing api/commands/init api/commands/last api/commands/list api/commands/load api/commands/run api/commands/slowest Internal APIs ------------- The modules in this list do not necessarily have any external api contract, they are intended for internal use inside of stestr. If anything in these provides a stable contract and is intended for usage outside of stestr it will be noted in the api doc. .. toctree:: :maxdepth: 2 api/config_file api/selection api/scheduler api/output api/test_processor api/subunit_trace ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/doc/source/conf.py0000644000175000017500000001730600000000000022562 0ustar00computertrekercomputertreker00000000000000# -*- coding: utf-8 -*- # # stestr documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'cliff.sphinxext', ] # Enable todos in the output todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'stestr' copyright = u'2016-2019, Matthew Treinish' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = 'trunk' # The full version, including alpha/beta/rc tags. release = 'trunk' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'stestrdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'stestr.tex', u'stestr Documentation', u'Matthew Treinish', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('MANUAL', 'stestr', u'A parallel Python test runner built around subunit', [u'Matthew Treinish'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'stestr', u'stestr Documentation', u'stestr Contributors', 'stestr', '', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1518910744.0 stestr-3.0.0/doc/source/developer_guidelines.rst0000644000175000017500000000134600000000000026207 0ustar00computertrekercomputertreker00000000000000.. _dev_guidelines: Development Guidelines for stestr ================================= Coding style ------------ PEP-8 is used for changes. We enforce running flake8 prior to landing any commits. Testing and QA -------------- For stestr please add tests where possible. There is no requirement for one test per change (because somethings are much harder to automatically test than the benefit from such tests). But, if unit testing is reasonable it will be expected to be present before it can merge. Running the tests ----------------- Generally just ``tox`` is all that is needed to run all the tests. However if dropping into pdb, it is currently more convenient to use ``python -m testtools.run testrepository.tests.test_suite``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/doc/source/index.rst0000644000175000017500000000066700000000000023126 0ustar00computertrekercomputertreker00000000000000.. Test Repository documentation master file, created by sphinx-quickstart on Mon Dec 3 23:24:00 2012. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. stestr ====== Contents: .. toctree:: :maxdepth: 2 README README_ja MANUAL CONTRIBUTING developer_guidelines internal_arch api Indices and tables ================== * :ref:`genindex` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/doc/source/internal_arch.rst0000644000175000017500000001055300000000000024623 0ustar00computertrekercomputertreker00000000000000Internal Architecture ===================== This document is an attempt to explain at a high level how stestr is constructed. It'll likely go stale quickly as the code changes, but hopefully it'll be a useful starting point for new developers to understand how the stestr is built. Full API documentation can be found at :ref:`api`. It's also worth noting that any explanation of workflow or internal operation is not necessarily an exact call path, but instead just a high level explanation of how the components operate. Basic Structure --------------- At a high level there are a couple different major components to stestr: the repository, and the cli layer. The repository is how stestr stores all results from test runs and the source of any data needed by any stestr operations that require past runs. There are actually multiple repository types which are different implementations of an abstract API. Right now there is only one complete implementation, the file repository type, which is useful in practice but that may not be the case in the future. The CLI layer is where the different stestr commands are defined and provides the command line interface for performing the different stestr operations. CLI Layer --------- The CLI layer is built using the `cliff.command`_ module. The stestr.cli module defines a basic interface using cliff. Each subcommand has its own module in stestr.commands and has 3 required functions to work properly: #. get_parser(prog_name) #. get_description() #. take_action(parsed_args) NOTE: To keep the api compatibility in stestr.commands, we still have each subcommands there. .. _cliff.command: https://docs.openstack.org/cliff/latest/reference/index.html get_parser(prog_name) ''''''''''''''''''''' This function is used to define subcommand arguments. It has a single argparse parser object passed into it. The intent of this function is to have any command specific arguments defined on the provided parser object by calling `parser.add_argument()`_ for each argument. .. _parser.add_argument(): https://docs.python.org/3/library/argparse.html#the-add-argument-method get_description() ''''''''''''''''' The intent of this function is to return an command specific help information. It is expected to return a string that will be used when the subcommand is defined in argparse and will be displayed before the arguments when ``--help`` is used on the subcommand. take_action(parsed_args) '''''''''''''''''''''''' This is where the real work for the command is performed. This is the function that is called when the command is executed. This function is called being wrapped by sys.exit() so an integer return is expected that will be used for the command's return code. The arguments input parsed_args is the argparse.Namespace object from the parsed CLI options. Operations for Running Tests ---------------------------- The basic flow when stestr run is called at a high level is fairly straight forward. In the default case when run is called the first operation performed is unittest discovery which is used to get a complete list of tests present. This list is then filtered by any user provided selection mechanisms. (for example a cli regex filter) This is used to select which tests the user actually intends to run. For more details on test selection see: :ref:`api_selection` which defines the functions which are used to actually perform the filtering. Once there is complete list of tests that will be run the list gets passed to the scheduler/partitioner. The scheduler takes the list of tests and splits it into N groups where N is the concurrency that stestr will use to run tests. If there is any timing data available in the repository from previous runs this is used by the scheduler to try balancing the test load between the workers. For the full details on how the partitioning is performed see: :ref:`api_scheduler`. With the tests split into multiple groups for each worker process we're ready to start executing the tests. Each group of tests is used to launch a test runner worker subprocess. As the name implies this is a test runner that emits a subunit stream to stdout. These stdout streams are combined in real time and stored in the repository at the end of the run (using the load command). The combined stream is also used for the CLI output either in a summary view or with a real time subunit output (which is enabled with the ``--subunit`` argument) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/requirements.txt0000644000175000017500000000070700000000000022477 0ustar00computertrekercomputertreker00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. future pbr!=2.1.0,>=2.0.0,!=4.0.0,!=4.0.1,!=4.0.2,!=4.0.3 # Apache-2.0 cliff>=2.8.0 # Apache-2.0 python-subunit>=1.4.0 # Apache-2.0/BSD fixtures>=3.0.0 # Apache-2.0/BSD testtools>=2.2.0 # MIT PyYAML>=3.10.0 # MIT voluptuous>=0.8.9 # BSD License ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585232236.578759 stestr-3.0.0/setup.cfg0000644000175000017500000000350000000000000021026 0ustar00computertrekercomputertreker00000000000000[metadata] name = stestr summary = A parallel Python test runner built around subunit description-file = README.rst author = Matthew Treinish author-email = mtreinish@kortar.org home-page = http://stestr.readthedocs.io/en/latest/ license = Apache-2.0 classifier = Intended Audience :: Information Technology Intended Audience :: System Administrators Intended Audience :: Developers License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Topic :: Software Development :: Testing Topic :: Software Development :: Quality Assurance project_urls = Documentation = https://stestr.readthedocs.io Source Code = https://github.com/mtreinish/stestr Bug Tracker = https://github.com/mtreinish/stestr/issues requires-python = >=3.5 [files] packages = stestr [entry_points] console_scripts = stestr = stestr.cli:main stestr.cm = run = stestr.commands.run:Run failing = stestr.commands.failing:Failing init = stestr.commands.init:Init last = stestr.commands.last:Last list = stestr.commands.list:List load = stestr.commands.load:Load slowest = stestr.commands.slowest:Slowest [extras] sql = subunit2sql>=1.8.0 [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 warning-is-error = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = stestr/locale domain = stestr [update_catalog] domain = stestr output_dir = stestr/locale input_file = stestr/locale/stestr.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = stestr/locale/stestr.pot [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1558359749.0 stestr-3.0.0/setup.py0000644000175000017500000000200600000000000020717 0ustar00computertrekercomputertreker00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5720925 stestr-3.0.0/stestr/0000755000175000017500000000000000000000000020533 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1479415047.0 stestr-3.0.0/stestr/__init__.py0000644000175000017500000000000000000000000022632 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/__main__.py0000644000175000017500000000120700000000000022625 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from stestr.cli import main if __name__ == '__main__': sys.exit(main(sys.argv[1:])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/bisect_tests.py0000644000175000017500000001361200000000000023603 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import testtools from stestr import output class IsolationAnalyzer(object): def __init__(self, latest_run, conf, run_func, repo, test_path=None, top_dir=None, group_regex=None, repo_type='file', repo_url=None, serial=False, concurrency=0): super(IsolationAnalyzer, self).__init__() self._worker_to_test = None self._test_to_worker = None self.latest_run = latest_run self.conf = conf self.group_regex = group_regex self.repo_type = repo_type self.repo_url = repo_url self.serial = serial self.concurrency = concurrency self.test_path = test_path self.top_dir = top_dir self.run_func = run_func self.repo = repo def bisect_tests(self, spurious_failures): test_conflicts = {} if not spurious_failures: raise ValueError('No failures provided to bisect the cause of') for spurious_failure in spurious_failures: candidate_causes = self._prior_tests(self.latest_run, spurious_failure) bottom = 0 top = len(candidate_causes) width = top - bottom while width: check_width = int(math.ceil(width / 2.0)) test_ids = candidate_causes[ bottom:bottom + check_width] + [spurious_failure] cmd = self.conf.get_run_command( test_ids, group_regex=self.group_regex, repo_type=self.repo_type, repo_url=self.repo_url, serial=self.serial, concurrency=self.concurrency, test_path=self.test_path, top_dir=self.top_dir) self.run_func(cmd, False, pretty_out=False, repo_type=self.repo_type, repo_url=self.repo_url) # check that the test we're probing still failed - still # awkward. found_fail = [] def find_fail(test_dict): if test_dict['id'] == spurious_failure: found_fail.append(True) checker = testtools.StreamToDict(find_fail) checker.startTestRun() try: self.repo.get_failing().get_test().run(checker) finally: checker.stopTestRun() if found_fail: # Our conflict is in bottom - clamp the range down. top = bottom + check_width if width == 1: # found the cause test_conflicts[ spurious_failure] = candidate_causes[bottom] width = 0 else: width = top - bottom else: # Conflict in the range we did not run: discard bottom. bottom = bottom + check_width if width == 1: # there will be no more to check, so we didn't # reproduce the failure. width = 0 else: width = top - bottom if spurious_failure not in test_conflicts: # Could not determine cause test_conflicts[spurious_failure] = 'unknown - no conflicts' if test_conflicts: table = [('failing test', 'caused by test')] for failure in sorted(test_conflicts): causes = test_conflicts[failure] table.append((failure, causes)) output.output_table(table) return 3 return 0 def _prior_tests(self, run, failing_id): """Calculate what tests from the test run run ran before test_id. Tests that ran in a different worker are not included in the result. """ if not getattr(self, '_worker_to_test', False): case = run.get_test() # Use None if there is no worker-N tag # If there are multiple, map them all. # (worker-N -> [testid, ...]) worker_to_test = {} # (testid -> [workerN, ...]) test_to_worker = {} def map_test(test_dict): tags = test_dict['tags'] id = test_dict['id'] workers = [] for tag in tags: if tag.startswith('worker-'): workers.append(tag) if not workers: workers = [None] for worker in workers: worker_to_test.setdefault(worker, []).append(id) test_to_worker.setdefault(id, []).extend(workers) mapper = testtools.StreamToDict(map_test) mapper.startTestRun() try: case.run(mapper) finally: mapper.stopTestRun() self._worker_to_test = worker_to_test self._test_to_worker = test_to_worker failing_workers = self._test_to_worker[failing_id] prior_tests = [] for worker in failing_workers: worker_tests = self._worker_to_test[worker] prior_tests.extend(worker_tests[:worker_tests.index(failing_id)]) return prior_tests ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/cli.py0000644000175000017500000001242400000000000021657 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from cliff import app from cliff import commandmanager from stestr import version __version__ = version.version_info.version_string_with_vcs() class StestrCLI(app.App): def __init__(self): super(StestrCLI, self).__init__( description="A parallel Python test runner built around subunit", version=__version__, command_manager=commandmanager.CommandManager('stestr.cm'), deferred_help=True, ) def initialize_app(self, argv): self.options.debug = True self.LOG.debug('initialize_app') def prepare_to_run_command(self, cmd): self.LOG.debug('prepare_to_run_command %s', cmd.__class__.__name__) group_regex = r'([^\.]*\.)*' \ if cmd.app_args.parallel_class else cmd.app_args.group_regex cmd.app_args.group_regex = group_regex def clean_up(self, cmd, result, err): self.LOG.debug('clean_up %s', cmd.__class__.__name__) if err: self.LOG.debug('got an error: %s', err) def build_option_parser(self, description, version, argparse_kwargs=None): parser = super(StestrCLI, self).build_option_parser(description, version, argparse_kwargs) parser = self._set_common_opts(parser) return parser def _set_common_opts(self, parser): parser.add_argument('--user-config', dest='user_config', default=None, help='An optional path to a default user config ' 'file if one is not specified ~/.stestr.yaml ' 'and ~/.config/stestr.yaml will be tried in ' 'that order') parser.add_argument('-d', '--here', dest='here', help="Set the directory or url that a command " "should run from. This affects all default " "path lookups but does not affect paths " "supplied to the command.", default=None, type=str) parser.add_argument('--config', '-c', dest='config', default='.stestr.conf', help="Set a stestr config file to use with this " "command. If one isn't specified then " ".stestr.conf in the directory that a command" " is running from is used") parser.add_argument('--repo-type', '-r', dest='repo_type', choices=['file', 'sql'], default='file', help="Select the repo backend to use") parser.add_argument('--repo-url', '-u', dest='repo_url', default=None, help="Set the repo url to use. An acceptable value" " for this depends on the repository type " "used.") parser.add_argument('--test-path', '-t', dest='test_path', default=None, help="Set the test path to use for unittest " "discovery. If both this and the " "corresponding config file option are set, " "this value will be used.") parser.add_argument('--top-dir', dest='top_dir', default=None, help="Set the top dir to use for unittest " "discovery. If both this and the " "corresponding config file option are set, " "this value will be used.") parser.add_argument('--group-regex', '--group_regex', '-g', dest='group_regex', default=None, help="Set a group regex to use for grouping tests" " together in the stestr scheduler. If " "both this and the corresponding config file " "option are set this value will be used.") parser.add_argument('--parallel-class', '-p', action='store_true', default=False, help="Set the flag to group tests by class. NOTE: " "This flag takes priority over the " "`--group-regex` option even if it's set.") return parser def main(argv=sys.argv[1:]): cli = StestrCLI() return cli.run(argv) if __name__ == '__main__': sys.exit(main(sys.argv[1:])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/colorizer.py0000644000175000017500000000717100000000000023123 0ustar00computertrekercomputertreker00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import sys class AnsiColorizer(object): """A colorizer is an object that loosely wraps around a stream allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream @classmethod def supported(cls, stream=sys.stdout): """Check the current platform supports coloring terminal output A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: # guess false in case of error return False def write(self, text, color): """Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[{};1m{}\x1b[0m'.format(color, text)) class NullColorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): self.stream = stream @classmethod def supported(cls, stream=sys.stdout): return True def write(self, text, color): self.stream.write(text) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5720925 stestr-3.0.0/stestr/commands/0000755000175000017500000000000000000000000022334 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501647450.0 stestr-3.0.0/stestr/commands/__init__.py0000644000175000017500000000204700000000000024450 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stestr.commands.failing import failing as failing_command from stestr.commands.init import init as init_command from stestr.commands.last import last as last_command from stestr.commands.list import list_command from stestr.commands.load import load as load_command from stestr.commands.run import run_command from stestr.commands.slowest import slowest as slowest_command __all__ = ['failing_command', 'init_command', 'last_command', 'list_command', 'load_command', 'run_command', 'slowest_command'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/commands/failing.py0000644000175000017500000001161300000000000024321 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Show the current failures in the repository.""" import sys from cliff import command import testtools from stestr import output from stestr.repository import util from stestr import results from stestr import user_config class Failing(command.Command): """Show the current failures known by the repository. Without --subunit, the process exit code will be non-zero if the previous test run was not successful and test failures are shown. But, with --subunit, the process exit code is non-zero only if the subunit stream could not be generated successfully from any failures. The test results and run status are included in the subunit stream emitted for the failed tests, so the stream should be used for interpretting the failing tests. If no subunit stream is emitted with --subunit and a zero exit code then there were no failures in the most recent run in the repository. """ def get_parser(self, prog_name): parser = super(Failing, self).get_parser(prog_name) parser.add_argument( "--subunit", action="store_true", default=False, help="Show output as a subunit stream.") parser.add_argument( "--list", action="store_true", default=False, help="Show only a list of failing tests.") return parser def take_action(self, parsed_args): user_conf = user_config.get_user_config(self.app_args.user_config) args = parsed_args if getattr(user_conf, 'failing', False): list_opt = args.list or user_conf.failing.get('list', False) else: list_opt = args.list return failing(repo_type=self.app_args.repo_type, repo_url=self.app_args.repo_url, list_tests=list_opt, subunit=args.subunit) def _show_subunit(run): stream = run.get_subunit_stream() if getattr(sys.stdout, 'buffer', False): sys.stdout.buffer.write(stream.read()) else: sys.stdout.write(stream.read()) return 0 def _make_result(repo, list_tests=False, stdout=sys.stdout): if list_tests: list_result = testtools.StreamSummary() return list_result, list_result else: def _get_id(): return repo.get_latest_run().get_id() output_result = results.CLITestResult(_get_id, stdout, None) summary_result = output_result.get_summary() return output_result, summary_result def failing(repo_type='file', repo_url=None, list_tests=False, subunit=False, stdout=sys.stdout): """Print the failing tests from the most recent run in the repository This function will print to STDOUT whether there are any tests that failed in the last run. It optionally will print the test_ids for the failing tests if ``list_tests`` is true. If ``subunit`` is true a subunit stream with just the failed tests will be printed to STDOUT. Note this function depends on the cwd for the repository if `repo_type` is set to file and `repo_url` is not specified it will use the repository located at CWD/.stestr :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param bool list_test: Show only a list of failing tests. :param bool subunit: Show output as a subunit stream. :param file stdout: The output file to write all output to. By default this is sys.stdout :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ if repo_type not in ['file', 'sql']: stdout.write('Repository type %s is not a type' % repo_type) return 1 repo = util.get_repo_open(repo_type, repo_url) run = repo.get_failing() if subunit: return _show_subunit(run) case = run.get_test() failed = False result, summary = _make_result(repo, list_tests=list_tests) result.startTestRun() try: case.run(result) finally: result.stopTestRun() failed = not results.wasSuccessful(summary) if failed: result = 1 else: result = 0 if list_tests: failing_tests = [ test for test, _ in summary.errors + summary.failures] output.output_tests(failing_tests, output=stdout) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/stestr/commands/init.py0000644000175000017500000000363600000000000023661 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initialise a new repository.""" import errno import sys from cliff import command from stestr.repository import util class Init(command.Command): """Create a new repository.""" def take_action(self, parsed_args): init(self.app_args.repo_type, self.app_args.repo_url) def init(repo_type='file', repo_url=None, stdout=sys.stdout): """Initialize a new repository This function will create initialize a new repostiory if one does not exist. If one exists the command will fail. Note this function depends on the cwd for the repository if `repo_type` is set to file and `repo_url` is not specified it will use the repository located at CWD/.stestr :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ try: util.get_repo_initialise(repo_type, repo_url) except OSError as e: if e.errno != errno.EEXIST: raise repo_path = repo_url or './stestr' stdout.write('The specified repository directory %s already exists. ' 'Please check if the repository already exists or ' 'select a different path\n' % repo_path) return 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462488.0 stestr-3.0.0/stestr/commands/last.py0000644000175000017500000002136200000000000023655 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Show the last run loaded into a repository.""" import sys from cliff import command from stestr import output from stestr.repository import abstract from stestr.repository import util from stestr import results from stestr import subunit_trace from stestr import user_config class Last(command.Command): """Show the last run loaded into a repository. Failing tests are shown on the console and a summary of the run is printed at the end. Without --subunit, the process exit code will be non-zero if the test run was not successful. With --subunit, the process exit code is non-zero only if the subunit stream could not be generated successfully. The test results and run status are included in the subunit stream, so the stream should be used to determining the result of the run instead of the exit code when using the --subunit flag. """ def get_parser(self, prog_name): parser = super(Last, self).get_parser(prog_name) parser.add_argument( "--subunit", action="store_true", default=False, help="Show output as a subunit stream.") parser.add_argument("--no-subunit-trace", action='store_true', default=False, help="Disable output with the subunit-trace " "output filter") parser.add_argument('--force-subunit-trace', action='store_true', default=False, help='Force subunit-trace output regardless of any' 'other options or config settings') parser.add_argument('--color', action='store_true', default=False, help='Enable color output in the subunit-trace ' 'output, if subunit-trace output is enabled. ' '(this is the default). If subunit-trace is ' 'disable this does nothing.') parser.add_argument('--suppress-attachments', action='store_true', dest='suppress_attachments', help='If set do not print stdout or stderr ' 'attachment contents on a successful test ' 'execution') parser.add_argument('--all-attachments', action='store_true', dest='all_attachments', help='If set print all text attachment contents on' ' a successful test execution') parser.add_argument('--show-binary-attachments', action='store_true', dest='show_binary_attachments', help='If set, show non-text attachments. This is ' 'generally only useful for debug purposes.') return parser def take_action(self, parsed_args): user_conf = user_config.get_user_config(self.app_args.user_config) args = parsed_args if args.suppress_attachments and args.all_attachments: msg = ("The --suppress-attachments and --all-attachments " "options are mutually exclusive, you can not use both " "at the same time") print(msg) sys.exit(1) if getattr(user_conf, 'last', False): if not user_conf.last.get('no-subunit-trace'): if not args.no_subunit_trace: pretty_out = True else: pretty_out = False else: pretty_out = False pretty_out = args.force_subunit_trace or pretty_out color = args.color or user_conf.last.get('color', False) suppress_attachments_conf = user_conf.run.get( 'suppress-attachments', False) all_attachments_conf = user_conf.run.get( 'all-attachments', False) if not args.suppress_attachments and not args.all_attachments: suppress_attachments = suppress_attachments_conf all_attachments = all_attachments_conf elif args.suppress_attachments: all_attachments = False suppress_attachments = args.suppress_attachments elif args.all_attachments: suppress_attachments = False all_attachments = args.all_attachments else: pretty_out = args.force_subunit_trace or not args.no_subunit_trace color = args.color suppress_attachments = args.suppress_attachments all_attachments = args.all_attachments return last(repo_type=self.app_args.repo_type, repo_url=self.app_args.repo_url, subunit_out=args.subunit, pretty_out=pretty_out, color=color, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=args.show_binary_attachments) def last(repo_type='file', repo_url=None, subunit_out=False, pretty_out=True, color=False, stdout=sys.stdout, suppress_attachments=False, all_attachments=False, show_binary_attachments=False): """Show the last run loaded into a a repository This function will print the results from the last run in the repository to STDOUT. It can optionally print the subunit stream for the last run to STDOUT if the ``subunit`` option is set to true. Note this function depends on the cwd for the repository if `repo_type` is set to file and `repo_url` is not specified it will use the repository located at CWD/.stestr :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param bool subunit_out: Show output as a subunit stream. :param pretty_out: Use the subunit-trace output filter. :param color: Enable colorized output with the subunit-trace output filter. :param bool subunit: Show output as a subunit stream. :param file stdout: The output file to write all output to. By default this is sys.stdout :param bool suppress_attachments: When set true attachments subunit_trace will not print attachments on successful test execution. :param bool all_attachments: When set true subunit_trace will print all text attachments on successful test execution. :param bool show_binary_attachments: When set to true, subunit_trace will print binary attachments in addition to text attachments. :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ try: repo = util.get_repo_open(repo_type, repo_url) except abstract.RepositoryNotFound as e: stdout.write(str(e) + '\n') return 1 try: latest_run = repo.get_latest_run() except KeyError as e: stdout.write(str(e) + '\n') return 1 if subunit_out: stream = latest_run.get_subunit_stream() output.output_stream(stream, output=stdout) # Exits 0 if we successfully wrote the stream. return 0 case = latest_run.get_test() try: if repo_type == 'file': previous_run = repo.get_test_run(repo.latest_id() - 1) # TODO(mtreinish): add a repository api to get the previous_run to # unify this logic else: previous_run = None except KeyError: previous_run = None failed = False if not pretty_out: output_result = results.CLITestResult(latest_run.get_id, stdout, previous_run) summary = output_result.get_summary() output_result.startTestRun() try: case.run(output_result) finally: output_result.stopTestRun() failed = not results.wasSuccessful(summary) else: stream = latest_run.get_subunit_stream() failed = subunit_trace.trace( stream, stdout, post_fails=True, color=color, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments) if failed: return 1 else: return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/commands/list.py0000644000175000017500000001415100000000000023663 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """List the tests from a project and show them.""" from io import BytesIO import sys from cliff import command from stestr import config_file from stestr import output class List(command.Command): """List the tests for a project. You can use a filter just like with the run command to see exactly what tests match. """ def get_parser(self, prog_name): parser = super(List, self).get_parser(prog_name) parser.add_argument("filters", nargs="*", default=None, help="A list of string regex filters to initially " "apply on the test list. Tests that match any of " "the regexes will be used. (assuming any other " "filtering specified also uses it)") parser.add_argument('--blacklist-file', '-b', default=None, dest='blacklist_file', help='Path to a blacklist file, this file ' 'contains a separate regex exclude on each ' 'newline') parser.add_argument('--whitelist-file', '-w', default=None, dest='whitelist_file', help='Path to a whitelist file, this file ' 'contains a separate regex on each newline.') parser.add_argument('--black-regex', '-B', default=None, dest='black_regex', help='Test rejection regex. If a test cases name ' 'matches on re.search() operation , ' 'it will be removed from the final test list. ' 'Effectively the black-regexp is added to ' ' black regexp list, but you do need to edit a ' 'file. The black filtering happens after the ' 'initial white selection, which by default is ' 'everything.') return parser def take_action(self, parsed_args): args = parsed_args filters = parsed_args.filters or None return list_command(config=self.app_args.config, repo_type=self.app_args.repo_type, repo_url=self.app_args.repo_url, group_regex=self.app_args.group_regex, test_path=self.app_args.test_path, top_dir=self.app_args.top_dir, blacklist_file=args.blacklist_file, whitelist_file=args.whitelist_file, black_regex=args.black_regex, filters=filters) def list_command(config='.stestr.conf', repo_type='file', repo_url=None, test_path=None, top_dir=None, group_regex=None, blacklist_file=None, whitelist_file=None, black_regex=None, filters=None, stdout=sys.stdout): """Print a list of test_ids for a project This function will print the test_ids for tests in a project. You can filter the output just like with the run command to see exactly what will be run. :param str config: The path to the stestr config file. Must be a string. :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param str test_path: Set the test path to use for unittest discovery. If both this and the corresponding config file option are set, this value will be used. :param str top_dir: The top dir to use for unittest discovery. This takes precedence over the value in the config file. (if one is present in the config file) :param str group_regex: Set a group regex to use for grouping tests together in the stestr scheduler. If both this and the corresponding config file option are set this value will be used. :param str blacklist_file: Path to a blacklist file, this file contains a separate regex exclude on each newline. :param str whitelist_file: Path to a whitelist file, this file contains a separate regex on each newline. :param str black_regex: Test rejection regex. If a test cases name matches on re.search() operation, it will be removed from the final test list. :param list filters: A list of string regex filters to initially apply on the test list. Tests that match any of the regexes will be used. (assuming any other filtering specified also uses it) :param file stdout: The output file to write all output to. By default this is sys.stdout """ ids = None conf = config_file.TestrConf(config) cmd = conf.get_run_command( regexes=filters, repo_type=repo_type, repo_url=repo_url, group_regex=group_regex, blacklist_file=blacklist_file, whitelist_file=whitelist_file, black_regex=black_regex, test_path=test_path, top_dir=top_dir) not_filtered = filters is None and blacklist_file is None\ and whitelist_file is None and black_regex is None try: cmd.setUp() # List tests if the fixture has not already needed to to filter. if not_filtered: ids = cmd.list_tests() else: ids = cmd.test_ids stream = BytesIO() for id in ids: stream.write(('%s\n' % id).encode('utf8')) stream.seek(0) output.output_stream(stream, output=stdout) return 0 finally: cmd.cleanUp() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462488.0 stestr-3.0.0/stestr/commands/load.py0000644000175000017500000003370400000000000023634 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Load data into a repository.""" import errno import functools import os import sys import warnings from cliff import command import subunit import testtools from stestr import output from stestr.repository import abstract as repository from stestr.repository import util from stestr import results from stestr import subunit_trace from stestr import user_config from stestr import utils class Load(command.Command): """Load a subunit stream into a repository. Failing tests are shown on the console and a summary of the stream is printed at the end. Without --subunit, the process exit code will be non-zero if the test run was not successful. With --subunit, the process exit code is non-zero if the subunit stream could not be generated successfully. The test results and run status are included in the subunit stream, so the stream should be used to determining the result of the run instead of the exit code when using the --subunit flag. """ def get_parser(self, prog_name): parser = super(Load, self).get_parser(prog_name) parser.add_argument("files", nargs="*", default=False, help="The subunit v2 stream files to load into the" " repository") parser.add_argument("--partial", action="store_true", default=False, help="DEPRECATED: The stream being loaded was a " "partial run. This option is deprecated and no " "does anything. It will be removed in the future") parser.add_argument("--force-init", action="store_true", default=False, help="Initialise the repository if it does not " "exist already") parser.add_argument("--subunit", action="store_true", default=False, help="Display results in subunit format.") parser.add_argument("--id", "-i", default=None, help="Append the stream into an existing entry in " "the repository") parser.add_argument("--subunit-trace", action='store_true', default=False, help="Display the loaded stream through the " "subunit-trace output filter") parser.add_argument('--color', action='store_true', default=False, help='Enable color output in the subunit-trace ' 'output, if subunit-trace output is enabled. If ' 'subunit-trace is disable this does nothing.') parser.add_argument('--abbreviate', action='store_true', dest='abbreviate', help='Print one character status for each test') parser.add_argument('--suppress-attachments', action='store_true', dest='suppress_attachments', help='If set do not print stdout or stderr ' 'attachment contents on a successful test ' 'execution') parser.add_argument('--all-attachments', action='store_true', dest='all_attachments', help='If set print all text attachment contents on' ' a successful test execution') parser.add_argument('--show-binary-attachments', action='store_true', dest='show_binary_attachments', help='If set, show non-text attachments. This is ' 'generally only useful for debug purposes.') return parser def take_action(self, parsed_args): user_conf = user_config.get_user_config(self.app_args.user_config) args = parsed_args if args.suppress_attachments and args.all_attachments: msg = ("The --suppress-attachments and --all-attachments " "options are mutually exclusive, you can not use both " "at the same time") print(msg) sys.exit(1) if getattr(user_conf, 'load', False): force_init = args.force_init or user_conf.load.get('force-init', False) pretty_out = args.subunit_trace or user_conf.load.get( 'subunit-trace', False) color = args.color or user_conf.load.get('color', False) abbreviate = args.abbreviate or user_conf.load.get('abbreviate', False) suppress_attachments_conf = user_conf.run.get( 'suppress-attachments', False) all_attachments_conf = user_conf.run.get( 'all-attachments', False) if not args.suppress_attachments and not args.all_attachments: suppress_attachments = suppress_attachments_conf all_attachments = all_attachments_conf elif args.suppress_attachments: all_attachments = False suppress_attachments = args.suppress_attachments elif args.all_attachments: suppress_attachments = False all_attachments = args.all_attachments else: force_init = args.force_init pretty_out = args.subunit_trace color = args.color abbreviate = args.abbreviate suppress_attachments = args.suppress_attachments all_attachments = args.all_attachments verbose_level = self.app.options.verbose_level stdout = open(os.devnull, 'w') if verbose_level == 0 else sys.stdout load(repo_type=self.app_args.repo_type, repo_url=self.app_args.repo_url, partial=args.partial, subunit_out=args.subunit, force_init=force_init, streams=args.files, pretty_out=pretty_out, color=color, stdout=stdout, abbreviate=abbreviate, suppress_attachments=suppress_attachments, serial=True, all_attachments=all_attachments, show_binary_attachments=args.show_binary_attachments) def load(force_init=False, in_streams=None, partial=False, subunit_out=False, repo_type='file', repo_url=None, run_id=None, streams=None, pretty_out=False, color=False, stdout=sys.stdout, abbreviate=False, suppress_attachments=False, serial=False, all_attachments=False, show_binary_attachments=False): """Load subunit streams into a repository This function will load subunit streams into the repository. It will output to STDOUT the results from the input stream. Internally this is used by the run command to both output the results as well as store the result in the repository. :param bool force_init: Initialize the specified repository if it hasn't been created. :param list in_streams: A list of file objects that will be saved into the repository :param bool partial: DEPRECATED: Specify the input is a partial stream. This option is deprecated and no longer does anything. It will be removed in the future. :param bool subunit_out: Output the subunit stream to stdout :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param run_id: The optional run id to save the subunit stream to. :param list streams: A list of file paths to read for the input streams. :param bool pretty_out: Use the subunit-trace output filter for the loaded stream. :param bool color: Enabled colorized subunit-trace output :param file stdout: The output file to write all output to. By default this is sys.stdout :param bool abbreviate: Use abbreviated output if set true :param bool suppress_attachments: When set true attachments subunit_trace will not print attachments on successful test execution. :param bool all_attachments: When set true subunit_trace will print all text attachments on successful test execution. :param bool show_binary_attachments: When set to true, subunit_trace will print binary attachments in addition to text attachments. :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ if partial: warnings.warn('The partial flag is deprecated and has no effect ' 'anymore') try: repo = util.get_repo_open(repo_type, repo_url) except repository.RepositoryNotFound: if force_init: try: repo = util.get_repo_initialise(repo_type, repo_url) except OSError as e: if e.errno != errno.EEXIST: raise repo_path = repo_url or './stestr' stdout.write('The specified repository directory %s already ' 'exists. Please check if the repository already ' 'exists or select a different path\n' % repo_path) exit(1) else: raise # Not a full implementation of TestCase, but we only need to iterate # back to it. Needs to be a callable - its a head fake for # testsuite.add. if in_streams: streams = utils.iter_streams(in_streams, 'subunit') elif streams: opener = functools.partial(open, mode='rb') streams = map(opener, streams) else: streams = [sys.stdin] def mktagger(pos, result): return testtools.StreamTagger([result], add=['worker-%d' % pos]) def make_tests(): for pos, stream in enumerate(streams): # Calls StreamResult API. case = subunit.ByteStreamToStreamResult( stream, non_subunit_name='stdout') decorate = functools.partial(mktagger, pos) case = testtools.DecorateTestCaseResult(case, decorate) yield (case, str(pos)) if not run_id: inserter = repo.get_inserter() else: inserter = repo.get_inserter(run_id=run_id) retval = 0 if serial: for stream in streams: # Calls StreamResult API. case = subunit.ByteStreamToStreamResult( stream, non_subunit_name='stdout') result = _load_case(inserter, repo, case, subunit_out, pretty_out, color, stdout, abbreviate, suppress_attachments, all_attachments, show_binary_attachments) if result or retval: retval = 1 else: retval = 0 else: case = testtools.ConcurrentStreamTestSuite(make_tests) retval = _load_case(inserter, repo, case, subunit_out, pretty_out, color, stdout, abbreviate, suppress_attachments, all_attachments, show_binary_attachments) return retval def _load_case(inserter, repo, case, subunit_out, pretty_out, color, stdout, abbreviate, suppress_attachments, all_attachments, show_binary_attachments): if subunit_out: output_result, summary_result = output.make_result(inserter.get_id, output=stdout) elif pretty_out: outcomes = testtools.StreamToDict( functools.partial(subunit_trace.show_outcome, stdout, enable_color=color, abbreviate=abbreviate, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments)) summary_result = testtools.StreamSummary() output_result = testtools.CopyStreamResult([outcomes, summary_result]) output_result = testtools.StreamResultRouter(output_result) cat = subunit.test_results.CatFiles(stdout) output_result.add_rule(cat, 'test_id', test_id=None) else: try: previous_run = repo.get_latest_run() except KeyError: previous_run = None output_result = results.CLITestResult( inserter.get_id, stdout, previous_run) summary_result = output_result.get_summary() result = testtools.CopyStreamResult([inserter, output_result]) result.startTestRun() try: case.run(result) finally: result.stopTestRun() if pretty_out and not subunit_out: start_times = [] stop_times = [] for worker in subunit_trace.RESULTS: for test in subunit_trace.RESULTS[worker]: if not test['timestamps'][0] or not test['timestamps'][1]: continue start_times.append(test['timestamps'][0]) stop_times.append(test['timestamps'][1]) if not start_times or not stop_times: sys.stderr.write("\nNo tests were successful during the run") return 1 start_time = min(start_times) stop_time = max(stop_times) elapsed_time = stop_time - start_time subunit_trace.print_fails(stdout) subunit_trace.print_summary(stdout, elapsed_time) if not results.wasSuccessful(summary_result): return 1 else: return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462488.0 stestr-3.0.0/stestr/commands/run.py0000644000175000017500000007552100000000000023524 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Run a projects tests and load them into stestr.""" import errno import functools import io import os import subprocess import sys import warnings from cliff import command import subunit import testtools from stestr import bisect_tests from stestr.commands import load from stestr.commands import slowest from stestr import config_file from stestr import output from stestr.repository import abstract as repository from stestr.repository import util from stestr import results from stestr.subunit_runner import program from stestr.subunit_runner import run as subunit_run from stestr.testlist import parse_list from stestr import user_config def _to_int(possible, default=0, out=sys.stderr): try: i = int(possible) except (ValueError, TypeError): i = default msg = ('Unable to convert "%s" to an integer. Using %d.\n' % (possible, default)) out.write(str(msg)) return i class Run(command.Command): """Run the tests for a project and store them into the repository. Without --subunit, the process exit code will be non-zero if the test run was not successful. However, with --subunit, the process exit code is non-zero only if the subunit stream could not be generated successfully. The test results and run status are included in the subunit stream, so the stream should be used to determining the result of the run instead of the exit code when using the --subunit flag. """ def get_parser(self, prog_name): parser = super(Run, self).get_parser(prog_name) parser.add_argument("filters", nargs="*", default=None, help="A list of string regex filters to initially " "apply on the test list. Tests that match any of " "the regexes will be used. (assuming any other " "filtering specified also uses it)") parser.add_argument("--failing", action="store_true", default=False, help="Run only tests known to be failing.") parser.add_argument("--serial", action="store_true", default=False, help="Run tests in a serial process.") parser.add_argument("--concurrency", action="store", default=None, type=int, help="How many processes to use. The default (0) " "autodetects your CPU count.") parser.add_argument("--load-list", default=None, help="Only run tests listed in the named file."), parser.add_argument("--partial", action="store_true", default=False, help="DEPRECATED: Only some tests will be run. " "Implied by --failing. This option is deprecated " "and no longer does anything. It will be removed " "in the future") parser.add_argument("--subunit", action="store_true", default=False, help="Display results in subunit format.") parser.add_argument("--until-failure", action="store_true", default=False, help="Repeat the run again and again until " "failure occurs.") parser.add_argument("--analyze-isolation", action="store_true", default=False, help="Search the last test run for 2-test test " "isolation interactions.") parser.add_argument("--isolated", action="store_true", default=False, help="Run each test id in a separate test runner.") parser.add_argument("--worker-file", action="store", default=None, dest='worker_path', help="Optional path of a manual worker grouping " "file to use for the run") parser.add_argument('--blacklist-file', '-b', default=None, dest='blacklist_file', help='Path to a blacklist file, this file ' 'contains a separate regex exclude on each ' 'newline') parser.add_argument('--whitelist-file', '-w', default=None, dest='whitelist_file', help='Path to a whitelist file, this file ' 'contains a separate regex on each newline.') parser.add_argument('--black-regex', '-B', default=None, dest='black_regex', help='Test rejection regex. If a test cases name ' 'matches on re.search() operation , ' 'it will be removed from the final test list. ' 'Effectively the black-regexp is added to ' ' black regexp list, but you do need to edit a ' 'file. The black filtering happens after the ' 'initial white selection, which by default is ' 'everything.') parser.add_argument('--no-discover', '-n', default=None, metavar='TEST_ID', help="Takes in a single test to bypasses test " "discover and just execute the test specified. A " "file may be used in place of a test name.") parser.add_argument('--pdb', default=None, metavar='TEST_ID', help="Run a single test id with the intent of " "using pdb. This does not launch any separate " "processes to ensure pdb works as expected. It " "will bypass test discovery and just execute the " "test specified. A file may be used in place of a " "test name.") parser.add_argument('--random', action="store_true", default=False, help="Randomize the test order after they are " "partitioned into separate workers") parser.add_argument('--combine', action='store_true', default=False, help="Combine the results from the test run with " "the last run in the repository") parser.add_argument('--no-subunit-trace', action='store_true', default=False, help='Disable the default subunit-trace output ' 'filter') parser.add_argument('--force-subunit-trace', action='store_true', default=False, help='Force subunit-trace output regardless of any' 'other options or config settings') parser.add_argument('--color', action='store_true', default=False, help='Enable color output in the subunit-trace ' 'output, if subunit-trace output is enabled. ' '(this is the default). If subunit-trace is ' 'disable this does nothing.') parser.add_argument('--slowest', action='store_true', default=False, help='After the test run, print the slowest ' 'tests.') parser.add_argument('--abbreviate', action='store_true', dest='abbreviate', help='Print one character status for each test') parser.add_argument('--suppress-attachments', action='store_true', dest='suppress_attachments', help='If set do not print stdout or stderr ' 'attachment contents on a successful test ' 'execution') parser.add_argument('--all-attachments', action='store_true', dest='all_attachments', help='If set print all text attachment contents on' ' a successful test execution') parser.add_argument('--show-binary-attachments', action='store_true', dest='show_binary_attachments', help='If set, show non-text attachments. This is ' 'generally only useful for debug purposes.') return parser def take_action(self, parsed_args): user_conf = user_config.get_user_config(self.app_args.user_config) filters = parsed_args.filters args = parsed_args if args.suppress_attachments and args.all_attachments: msg = ("The --suppress-attachments and --all-attachments " "options are mutually exclusive, you can not use both " "at the same time") print(msg) sys.exit(1) if getattr(user_conf, 'run', False): if not user_conf.run.get('no-subunit-trace'): if not args.no_subunit_trace: pretty_out = True else: pretty_out = False else: pretty_out = False pretty_out = args.force_subunit_trace or pretty_out if args.concurrency is None: concurrency = user_conf.run.get('concurrency', 0) else: concurrency = args.concurrency random = args.random or user_conf.run.get('random', False) color = args.color or user_conf.run.get('color', False) abbreviate = args.abbreviate or user_conf.run.get( 'abbreviate', False) suppress_attachments_conf = user_conf.run.get( 'suppress-attachments', False) all_attachments_conf = user_conf.run.get( 'all-attachments', False) if not args.suppress_attachments and not args.all_attachments: suppress_attachments = suppress_attachments_conf all_attachments = all_attachments_conf elif args.suppress_attachments: all_attachments = False suppress_attachments = args.suppress_attachments elif args.all_attachments: suppress_attachments = False all_attachments = args.all_attachments else: pretty_out = args.force_subunit_trace or not args.no_subunit_trace concurrency = args.concurrency or 0 random = args.random color = args.color abbreviate = args.abbreviate suppress_attachments = args.suppress_attachments all_attachments = args.all_attachments verbose_level = self.app.options.verbose_level stdout = open(os.devnull, 'w') if verbose_level == 0 else sys.stdout # Make sure all (python) callers have provided an int() concurrency = _to_int(concurrency) if concurrency and concurrency < 0: msg = ("The provided concurrency value: %s is not valid. An " "integer >= 0 must be used.\n" % concurrency) stdout.write(msg) return 2 result = run_command( config=self.app_args.config, repo_type=self.app_args.repo_type, repo_url=self.app_args.repo_url, test_path=self.app_args.test_path, top_dir=self.app_args.top_dir, group_regex=self.app_args.group_regex, failing=args.failing, serial=args.serial, concurrency=concurrency, load_list=args.load_list, partial=args.partial, subunit_out=args.subunit, until_failure=args.until_failure, analyze_isolation=args.analyze_isolation, isolated=args.isolated, worker_path=args.worker_path, blacklist_file=args.blacklist_file, whitelist_file=args.whitelist_file, black_regex=args.black_regex, no_discover=args.no_discover, random=random, combine=args.combine, filters=filters, pretty_out=pretty_out, color=color, stdout=stdout, abbreviate=abbreviate, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=args.show_binary_attachments, pdb=args.pdb) # Always output slowest test info if requested, regardless of other # test run options user_slowest = False if getattr(user_conf, 'run', False): user_slowest = user_conf.run.get('slowest', False) if args.slowest or user_slowest: slowest.slowest(repo_type=self.app_args.repo_type, repo_url=self.app_args.repo_url) return result def _find_failing(repo): run = repo.get_failing() case = run.get_test() ids = [] def gather_errors(test_dict): if test_dict['status'] == 'fail': ids.append(test_dict['id']) result = testtools.StreamToDict(gather_errors) result.startTestRun() try: case.run(result) finally: result.stopTestRun() return ids def run_command(config='.stestr.conf', repo_type='file', repo_url=None, test_path=None, top_dir=None, group_regex=None, failing=False, serial=False, concurrency=0, load_list=None, partial=False, subunit_out=False, until_failure=False, analyze_isolation=False, isolated=False, worker_path=None, blacklist_file=None, whitelist_file=None, black_regex=None, no_discover=False, random=False, combine=False, filters=None, pretty_out=True, color=False, stdout=sys.stdout, abbreviate=False, suppress_attachments=False, all_attachments=False, show_binary_attachments=True, pdb=False): """Function to execute the run command This function implements the run command. It will run the tests specified in the parameters based on the provided config file and/or arguments specified in the way specified by the arguments. The results will be printed to STDOUT and loaded into the repository. :param str config: The path to the stestr config file. Must be a string. :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param str test_path: Set the test path to use for unittest discovery. If both this and the corresponding config file option are set, this value will be used. :param str top_dir: The top dir to use for unittest discovery. This takes precedence over the value in the config file. (if one is present in the config file) :param str group_regex: Set a group regex to use for grouping tests together in the stestr scheduler. If both this and the corresponding config file option are set this value will be used. :param bool failing: Run only tests known to be failing. :param bool serial: Run tests serially :param int concurrency: "How many processes to use. The default (0) autodetects your CPU count and uses that. :param str load_list: The path to a list of test_ids. If specified only tests listed in the named file will be run. :param bool partial: DEPRECATED: Only some tests will be run. Implied by `--failing`. This flag is deprecated because and doesn't do anything it will be removed in a future release. :param bool subunit_out: Display results in subunit format. :param bool until_failure: Repeat the run again and again until failure occurs. :param bool analyze_isolation: Search the last test run for 2-test test isolation interactions. :param bool isolated: Run each test id in a separate test runner. :param str worker_path: Optional path of a manual worker grouping file to use for the run. :param str blacklist_file: Path to a blacklist file, this file contains a separate regex exclude on each newline. :param str whitelist_file: Path to a whitelist file, this file contains a separate regex on each newline. :param str black_regex: Test rejection regex. If a test cases name matches on re.search() operation, it will be removed from the final test list. :param str no_discover: Takes in a single test_id to bypasses test discover and just execute the test specified. A file name may be used in place of a test name. :param bool random: Randomize the test order after they are partitioned into separate workers :param bool combine: Combine the results from the test run with the last run in the repository :param list filters: A list of string regex filters to initially apply on the test list. Tests that match any of the regexes will be used. (assuming any other filtering specified also uses it) :param bool pretty_out: Use the subunit-trace output filter :param bool color: Enable colorized output in subunit-trace :param file stdout: The file object to write all output to. By default this is sys.stdout :param bool abbreviate: Use abbreviated output if set true :param bool suppress_attachments: When set true attachments subunit_trace will not print attachments on successful test execution. :param bool all_attachments: When set true subunit_trace will print all text attachments on successful test execution. :param bool show_binary_attachments: When set to true, subunit_trace will print binary attachments in addition to text attachments. :param str pdb: Takes in a single test_id to bypasses test discover and just execute the test specified without launching any additional processes. A file name may be used in place of a test name. :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ if partial: warnings.warn('The partial flag is deprecated and has no effect ' 'anymore') try: repo = util.get_repo_open(repo_type, repo_url) # If a repo is not found, and there a testr config exists just create it except repository.RepositoryNotFound: if not os.path.isfile(config) and not test_path: msg = ("No config file found and --test-path not specified. " "Either create or specify a .stestr.conf or use " "--test-path ") stdout.write(msg) exit(1) try: repo = util.get_repo_initialise(repo_type, repo_url) except OSError as e: if e.errno != errno.EEXIST: raise repo_path = repo_url or './stestr' stdout.write('The specified repository directory %s already ' 'exists. Please check if the repository already ' 'exists or select a different path\n' % repo_path) return 1 combine_id = None concurrency = _to_int(concurrency) if concurrency and concurrency < 0: msg = ("The provided concurrency value: %s is not valid. An integer " ">= 0 must be used.\n" % concurrency) stdout.write(msg) return 2 if combine: latest_id = repo.latest_id() combine_id = str(latest_id) if no_discover and pdb: msg = ("--no-discover and --pdb are mutually exclusive options, " "only specify one at a time") stdout.write(msg) return 2 if pdb and until_failure: msg = ("pdb mode does not function with the --until-failure flag, " "only specify one at a time") stdout.write(msg) return 2 if no_discover: ids = no_discover if '::' in ids: ids = ids.replace('::', '.') if ids.find('/') != -1: root = ids.replace('.py', '') ids = root.replace('/', '.') stestr_python = sys.executable if os.environ.get('PYTHON'): python_bin = os.environ.get('PYTHON') elif stestr_python: python_bin = stestr_python else: raise RuntimeError("The Python interpreter was not found and " "PYTHON is not set") run_cmd = python_bin + ' -m stestr.subunit_runner.run ' + ids def run_tests(): run_proc = [('subunit', output.ReturnCodeToSubunit( subprocess.Popen(run_cmd, shell=True, stdout=subprocess.PIPE)))] return load.load(in_streams=run_proc, subunit_out=subunit_out, repo_type=repo_type, repo_url=repo_url, run_id=combine_id, pretty_out=pretty_out, color=color, stdout=stdout, abbreviate=abbreviate, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments) if not until_failure: return run_tests() else: while True: result = run_tests() # If we're using subunit output we want to make sure to check # the result from the repository because load() returns 0 # always on subunit output if subunit: summary = testtools.StreamSummary() last_run = repo.get_latest_run().get_subunit_stream() stream = subunit.ByteStreamToStreamResult(last_run) summary.startTestRun() try: stream.run(summary) finally: summary.stopTestRun() if not results.wasSuccessful(summary): result = 1 if result: return result if pdb: ids = pdb if '::' in ids: ids = ids.replace('::', '.') if ids.find('/') != -1: root = ids.replace('.py', '') ids = root.replace('/', '.') runner = subunit_run.SubunitTestRunner stream = io.BytesIO() program.TestProgram(module=None, argv=['stestr', ids], testRunner=functools.partial(runner, stdout=stream)) stream.seek(0) run_proc = [('subunit', stream)] return load.load(in_streams=run_proc, subunit_out=subunit_out, repo_type=repo_type, repo_url=repo_url, run_id=combine_id, pretty_out=pretty_out, color=color, stdout=stdout, abbreviate=abbreviate, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments) if failing or analyze_isolation: ids = _find_failing(repo) else: ids = None if load_list: list_ids = set() # Should perhaps be text.. currently does its own decode. with open(load_list, 'rb') as list_file: list_ids = set(parse_list(list_file.read())) if ids is None: # Use the supplied list verbatim ids = list_ids else: # We have some already limited set of ids, just reduce to ids # that are both failing and listed. ids = list_ids.intersection(ids) conf = config_file.TestrConf(config) if not analyze_isolation: cmd = conf.get_run_command( ids, regexes=filters, group_regex=group_regex, repo_type=repo_type, repo_url=repo_url, serial=serial, worker_path=worker_path, concurrency=concurrency, blacklist_file=blacklist_file, whitelist_file=whitelist_file, black_regex=black_regex, top_dir=top_dir, test_path=test_path, randomize=random) if isolated: result = 0 cmd.setUp() try: ids = cmd.list_tests() finally: cmd.cleanUp() for test_id in ids: # TODO(mtreinish): add regex cmd = conf.get_run_command( [test_id], filters, group_regex=group_regex, repo_type=repo_type, repo_url=repo_url, serial=serial, worker_path=worker_path, concurrency=concurrency, blacklist_file=blacklist_file, whitelist_file=whitelist_file, black_regex=black_regex, randomize=random, test_path=test_path, top_dir=top_dir) run_result = _run_tests( cmd, until_failure, subunit_out=subunit_out, combine_id=combine_id, repo_type=repo_type, repo_url=repo_url, pretty_out=pretty_out, color=color, abbreviate=abbreviate, stdout=stdout, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments) if run_result > result: result = run_result return result else: return _run_tests(cmd, until_failure, subunit_out=subunit_out, combine_id=combine_id, repo_type=repo_type, repo_url=repo_url, pretty_out=pretty_out, color=color, stdout=stdout, abbreviate=abbreviate, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments) else: # Where do we source data about the cause of conflicts. latest_run = repo.get_latest_run() # Stage one: reduce the list of failing tests (possibly further # reduced by testfilters) to eliminate fails-on-own tests. spurious_failures = set() for test_id in ids: # TODO(mtrienish): Add regex cmd = conf.get_run_command( [test_id], group_regex=group_regex, repo_type=repo_type, repo_url=repo_url, serial=serial, worker_path=worker_path, concurrency=concurrency, blacklist_file=blacklist_file, whitelist_file=whitelist_file, black_regex=black_regex, randomize=random, test_path=test_path, top_dir=top_dir) if not _run_tests(cmd, until_failure): # If the test was filtered, it won't have been run. if test_id in repo.get_test_ids(repo.latest_id()): spurious_failures.add(test_id) # This is arguably ugly, why not just tell the system that # a pass here isn't a real pass? [so that when we find a # test that is spuriously failing, we don't forget # that it is actually failing. # Alternatively, perhaps this is a case for data mining: # when a test starts passing, keep a journal, and allow # digging back in time to see that it was a failure, # what it failed with etc... # The current solution is to just let it get marked as # a pass temporarily. if not spurious_failures: # All done. return 0 bisect_runner = bisect_tests.IsolationAnalyzer( latest_run, conf, _run_tests, repo, test_path=test_path, top_dir=top_dir, group_regex=group_regex, repo_type=repo_type, repo_url=repo_url, serial=serial, concurrency=concurrency) # spurious-failure -> cause. return bisect_runner.bisect_tests(spurious_failures) def _run_tests(cmd, until_failure, subunit_out=False, combine_id=None, repo_type='file', repo_url=None, pretty_out=True, color=False, stdout=sys.stdout, abbreviate=False, suppress_attachments=False, all_attachments=False, show_binary_attachments=False): """Run the tests cmd was parameterised with.""" cmd.setUp() try: def run_tests(): run_procs = [('subunit', output.ReturnCodeToSubunit( proc)) for proc in cmd.run_tests()] if not run_procs: stdout.write("The specified regex doesn't match with anything") return 1 return load.load((None, None), in_streams=run_procs, subunit_out=subunit_out, repo_type=repo_type, repo_url=repo_url, run_id=combine_id, pretty_out=pretty_out, color=color, stdout=stdout, abbreviate=abbreviate, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments) if not until_failure: return run_tests() else: while True: result = run_tests() # If we're using subunit output we want to make sure to check # the result from the repository because load() returns 0 # always on subunit output if subunit_out: repo = util.get_repo_open(repo_type, repo_url) summary = testtools.StreamSummary() last_run = repo.get_latest_run().get_subunit_stream() stream = subunit.ByteStreamToStreamResult(last_run) summary.startTestRun() try: stream.run(summary) finally: summary.stopTestRun() if not results.wasSuccessful(summary): result = 1 if result: return result finally: cmd.cleanUp() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/commands/slowest.py0000644000175000017500000000663500000000000024420 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Show the longest running tests in the repository.""" import math from operator import itemgetter import sys from cliff import command from stestr import output from stestr.repository import util class Slowest(command.Command): """Show the slowest tests from the last test run. This command shows a table, with the longest running tests at the top. """ def get_parser(self, prog_name): parser = super(Slowest, self).get_parser(prog_name) parser.add_argument( "--all", action="store_true", default=False, help="Show timing for all tests.") return parser def take_action(self, parsed_args): args = parsed_args return slowest(repo_type=self.app_args.repo_type, repo_url=self.app_args.repo_url, show_all=args.all) def format_times(times): times = list(times) precision = 3 digits_before_point = 1 for time in times: if time[1] <= 0: continue digits_before_point = int(math.log10(time[1])) + 1 break min_length = digits_before_point + precision + 1 def format_time(time): # Limit the number of digits after the decimal # place, and also enforce a minimum width # based on the longest duration return "%*.*f" % (min_length, precision, time) times = [(name, format_time(time)) for name, time in times] return times def slowest(repo_type='file', repo_url=None, show_all=False, stdout=sys.stdout): """Print the slowest times from the last run in the repository This function will print to STDOUT the 10 slowests tests in the last run. Optionally, using the ``show_all`` argument, it will print all the tests, instead of just 10. sorted by time. :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param bool show_all: Show timing for all tests. :param file stdout: The output file to write all output to. By default this is sys.stdout :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ repo = util.get_repo_open(repo_type, repo_url) try: latest_id = repo.latest_id() except KeyError: return 3 # what happens when there is no timing info? test_times = repo.get_test_times(repo.get_test_ids(latest_id)) known_times = list(test_times['known'].items()) known_times.sort(key=itemgetter(1), reverse=True) if len(known_times) > 0: # By default show 10 rows if not show_all: known_times = known_times[:10] known_times = format_times(known_times) header = ('Test id', 'Runtime (s)') rows = [header] + known_times output.output_table(rows, output=stdout) return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/config_file.py0000644000175000017500000001743600000000000023364 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import sys import configparser from stestr.repository import util from stestr import test_processor class TestrConf(object): """Create a TestrConf object to represent a specified config file This class is used to represent an stestr config file. It :param str config_file: The path to the config file to use """ _escape_trailing_backslash_re = re.compile(r'(?<=[^\\])\\$') def __init__(self, config_file): self.parser = configparser.ConfigParser() self.parser.read(config_file) self.config_file = config_file def _sanitize_path(self, path): if os.sep == '\\': # Trailing backslashes have to be escaped. Othwerise, the # command we're issuing will be incorrectly interpreted on # Windows. path = self._escape_trailing_backslash_re.sub(r'\\\\', path) return path def get_run_command(self, test_ids=None, regexes=None, test_path=None, top_dir=None, group_regex=None, repo_type='file', repo_url=None, serial=False, worker_path=None, concurrency=0, blacklist_file=None, whitelist_file=None, black_regex=None, randomize=False, parallel_class=None): """Get a test_processor.TestProcessorFixture for this config file Any parameters about running tests will be used for initialize the output fixture so the settings are correct when that fixture is used to run tests. Parameters will take precedence over values in the config file. :param options: A argparse Namespace object of the cli options that were used in the invocation of the original CLI command that needs a TestProcessorFixture :param list test_ids: an optional list of test_ids to use when running tests :param list regexes: an optional list of regex strings to use for filtering the tests to run. See the test_filters parameter in TestProcessorFixture to see how this is used. :param str test_path: Set the test path to use for unittest discovery. If both this and the corresponding config file option are set, this value will be used. :param str top_dir: The top dir to use for unittest discovery. This takes precedence over the value in the config file. (if one is present in the config file) :param str group_regex: Set a group regex to use for grouping tests together in the stestr scheduler. If both this and the corresponding config file option are set this value will be used. :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param bool serial: If tests are run from the returned fixture, they will be run serially :param str worker_path: Optional path of a manual worker grouping file to use for the run. :param int concurrency: How many processes to use. The default (0) autodetects your CPU count and uses that. :param str blacklist_file: Path to a blacklist file, this file contains a separate regex exclude on each newline. :param str whitelist_file: Path to a whitelist file, this file contains a separate regex on each newline. :param str black_regex: Test rejection regex. If a test cases name matches on re.search() operation, it will be removed from the final test list. :param bool randomize: Randomize the test order after they are partitioned into separate workers :param bool parallel_class: Set the flag to group tests together in the stestr scheduler by class. If both this and the corresponding config file option which includes `group-regex` are set, this value will be used. :returns: a TestProcessorFixture object for the specified config file and any arguments passed into this function :rtype: test_processor.TestProcessorFixture """ if not test_path and self.parser.has_option('DEFAULT', 'test_path'): test_path = self.parser.get('DEFAULT', 'test_path') elif not test_path: sys.exit("No test_path can be found in either the command line " "options nor in the specified config file {}. Please " "specify a test path either in the config file or via " "the --test-path argument".format(self.config_file)) if not top_dir and self.parser.has_option('DEFAULT', 'top_dir'): top_dir = self.parser.get('DEFAULT', 'top_dir') elif not top_dir: top_dir = './' test_path = self._sanitize_path(test_path) top_dir = self._sanitize_path(top_dir) stestr_python = sys.executable # let's try to be explicit, even if it means a longer set of ifs if sys.platform == 'win32': # it may happen, albeit rarely if not stestr_python: raise RuntimeError("The Python interpreter was not found") python = stestr_python else: if os.environ.get('PYTHON'): python = '${PYTHON}' elif stestr_python: python = stestr_python else: raise RuntimeError("The Python interpreter was not found and " "PYTHON is not set") # The python binary path may contain whitespaces. if os.path.exists('"%s"' % python): python = '"%s"' % python command = '%s -m stestr.subunit_runner.run discover -t "%s" "%s" ' \ '$LISTOPT $IDOPTION' % (python, top_dir, test_path) listopt = "--list" idoption = "--load-list $IDFILE" # If the command contains $IDOPTION read that command from config # Use a group regex if one is defined if parallel_class: group_regex = r'([^\.]*\.)*' if not group_regex \ and self.parser.has_option('DEFAULT', 'parallel_class') \ and self.parser.getboolean('DEFAULT', 'parallel_class'): group_regex = r'([^\.]*\.)*' if not group_regex and self.parser.has_option('DEFAULT', 'group_regex'): group_regex = self.parser.get('DEFAULT', 'group_regex') if group_regex: def group_callback(test_id, regex=re.compile(group_regex)): match = regex.match(test_id) if match: return match.group(0) else: group_callback = None # Handle the results repository repository = util.get_repo_open(repo_type, repo_url) return test_processor.TestProcessorFixture( test_ids, command, listopt, idoption, repository, test_filters=regexes, group_callback=group_callback, serial=serial, worker_path=worker_path, concurrency=concurrency, blacklist_file=blacklist_file, black_regex=black_regex, whitelist_file=whitelist_file, randomize=randomize) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/output.py0000644000175000017500000001627500000000000022460 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import sys import subunit import testtools def output_table(table, output=sys.stdout): """Display a table of information. :param table: A list of sets representing each row in the table. Each element in the set represents a column in the table. :param output: The output file object to write the table to. By default this is sys.stdout """ # stringify contents = [] for row in table: new_row = [] for column in row: new_row.append(str(column)) contents.append(new_row) if not contents: return widths = [0] * len(contents[0]) for row in contents: for idx, column in enumerate(row): if widths[idx] < len(column): widths[idx] = len(column) # Show a row outputs = [] def show_row(row): for idx, column in enumerate(row): outputs.append(column) if idx == len(row) - 1: outputs.append('\n') return # spacers for the next column outputs.append(' ' * (widths[idx] - len(column))) outputs.append(' ') show_row(contents[0]) # title spacer for idx, width in enumerate(widths): outputs.append('-' * width) if idx == len(widths) - 1: outputs.append('\n') continue outputs.append(' ') for row in contents[1:]: show_row(row) output.write(''.join(outputs)) def output_tests(tests, output=sys.stdout): """Display a list of tests. :param tests: A list of test objects to output :param output: The output file object to write the list to. By default this is sys.stdout """ for test in tests: id_str = test.id() output.write(str(id_str)) output.write('\n') def make_result(get_id, output=sys.stdout): serializer = subunit.StreamResultToBytes(output) # By pass user transforms - just forward it all, result = serializer # and interpret everything as success. summary = testtools.StreamSummary() summary.startTestRun() summary.stopTestRun() return result, summary def output_summary(successful, tests, tests_delta, time, time_delta, values, output=sys.stdout): """Display a summary view for the test run. :param bool successful: Was the test run successful :param int tests: The number of tests that ran :param int tests_delta: The change in the number of tests that ran since the last run :param float time: The number of seconds that it took for the run to execute :param float time_delta: The change in run time since the last run :param values: A list of sets that are used for a breakdown of statuses other than success. Each set is in the format: (status, number of tests, change in number of tests). :param output: The output file object to use. This defaults to stdout """ summary = [] a = summary.append if tests: a("Ran {}".format(tests)) if tests_delta: a(" (%+d)" % (tests_delta,)) a(" tests") if time: if not summary: a("Ran tests") a(" in {:0.3f}s".format(time)) if time_delta: a(" ({:+0.3f}s)".format(time_delta)) if summary: a("\n") if successful: a('PASSED') else: a('FAILED') if values: a(' (') values_strings = [] for name, value, delta in values: value_str = '{}={}'.format(name, value) if delta: value_str += ' (%+d)' % (delta,) values_strings.append(value_str) a(', '.join(values_strings)) a(')') output.write(''.join(summary) + '\n') def output_stream(stream, output=sys.stdout): _binary_stdout = subunit.make_stream_binary(output) contents = stream.read(65536) assert type(contents) is bytes, \ "Bad stream contents %r" % type(contents) # If there are unflushed bytes in the text wrapper, we need to sync.. output.flush() while contents: _binary_stdout.write(contents) contents = stream.read(65536) _binary_stdout.flush() class ReturnCodeToSubunit(object): """Converts a process return code to a subunit error on the process stdout. The ReturnCodeToSubunit object behaves as a read-only stream, supplying the read, readline and readlines methods. If the process exits non-zero a synthetic test is added to the output, making the error accessible to subunit stream consumers. If the process closes its stdout and then does not terminate, reading from the ReturnCodeToSubunit stream will hang. :param process: A subprocess.Popen object that is generating subunit. """ def __init__(self, process): """Adapt a process to a readable stream.""" self.proc = process self.done = False self.source = self.proc.stdout self.lastoutput = bytes((b'\n')[0]) def _append_return_code_as_test(self): if self.done is True: return self.source = io.BytesIO() returncode = self.proc.wait() if returncode != 0: if self.lastoutput != bytes((b'\n')[0]): # Subunit V1 is line orientated, it has to start on a fresh # line. V2 needs to start on any fresh utf8 character border # - which is not guaranteed in an arbitrary stream endpoint, so # injecting a \n gives us such a guarantee. self.source.write(bytes('\n')) stream = subunit.StreamResultToBytes(self.source) stream.status(test_id='process-returncode', test_status='fail', file_name='traceback', mime_type='text/plain;charset=utf8', file_bytes=( 'returncode %d' % returncode).encode('utf8')) self.source.seek(0) self.done = True def read(self, count=-1): if count == 0: return '' result = self.source.read(count) if result: self.lastoutput = result[-1] return result self._append_return_code_as_test() return self.source.read(count) def readline(self): result = self.source.readline() if result: self.lastoutput = result[-1] return result self._append_return_code_as_test() return self.source.readline() def readlines(self): result = self.source.readlines() if result: self.lastoutput = result[-1][-1] self._append_return_code_as_test() result.extend(self.source.readlines()) return result ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5720925 stestr-3.0.0/stestr/repository/0000755000175000017500000000000000000000000022752 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1482362144.0 stestr-3.0.0/stestr/repository/__init__.py0000644000175000017500000000000000000000000025051 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1579641056.0 stestr-3.0.0/stestr/repository/abstract.py0000644000175000017500000001651200000000000025134 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage of test results. A Repository provides storage and indexing of results. The AbstractRepository class defines the contract to which any Repository implementation must adhere. The stestr.repository.file module (see: :ref:`api_repository_file` is the usual repository that will be used. The stestr.repository.memory module (see: :ref:`api_repository_memory`) provides a memory only repository useful for internal testing. Repositories are identified by their URL, and new ones are made by calling the initialize function in the appropriate repository module. """ from testtools import StreamToDict class AbstractRepositoryFactory(object): """Interface for making or opening repositories.""" def initialise(self, url): """Create a repository at URL. Call on the class of the repository you wish to create. """ raise NotImplementedError(self.initialise) def open(self, url): """Open the repository at url. Raise RepositoryNotFound if there is no repository at the given url. """ raise NotImplementedError(self.open) class AbstractRepository(object): """The base class for Repository implementations. There are no interesting attributes or methods as yet. """ def count(self): """Return the number of test runs this repository has stored. :return count: The count of test runs stored in the repository. """ raise NotImplementedError(self.count) def get_failing(self): """Get a TestRun that contains all of and only current failing tests. :return: a TestRun. """ raise NotImplementedError(self.get_failing) def get_inserter(self, partial=False, run_id=None, metadata=None): """Get an inserter that will insert a test run into the repository. Repository implementations should implement _get_inserter. get_inserter() does not add timing data to streams: it should be provided by the caller of get_inserter (e.g. commands.load). :param partial: DEPREACTED: If True, the stream being inserted only executed some tests rather than all the projects tests. This option is deprecated and no longer does anything. It will be removed in the future. :return an inserter: Inserters meet the extended TestResult protocol that testtools 0.9.2 and above offer. The startTestRun and stopTestRun methods in particular must be called. """ return self._get_inserter(partial, run_id, metadata) def _get_inserter(self, partial=False, run_id=None, metadata=None): """Get an inserter for get_inserter. The result is decorated with an AutoTimingTestResultDecorator. """ raise NotImplementedError(self._get_inserter) def get_latest_run(self): """Return the latest run. Equivalent to get_test_run(latest_id()). """ return self.get_test_run(self.latest_id()) def get_test_run(self, run_id): """Retrieve a TestRun object for run_id. :param run_id: The test run id to retrieve. :return: A TestRun object. """ raise NotImplementedError(self.get_test_run) def get_test_times(self, test_ids): """Retrieve estimated times for the tests test_ids. :param test_ids: The test ids to query for timing data. :return: A dict with two keys: 'known' and 'unknown'. The unknown key contains a set with the test ids that did run. The known key contains a dict mapping test ids to time in seconds. """ test_ids = frozenset(test_ids) known_times = self._get_test_times(test_ids) unknown_times = test_ids - set(known_times) return dict(known=known_times, unknown=unknown_times) def _get_test_times(self, test_ids): """Retrieve estimated times for tests test_ids. :param test_ids: The test ids to query for timing data. :return: A dict mapping test ids to duration in seconds. Tests that no timing data is present for should not be returned - the base class get_test_times function will collate the missing test ids and put that in to its result automatically. """ raise NotImplementedError(self._get_test_times) def latest_id(self): """Return the run id for the most recently inserted test run.""" raise NotImplementedError(self.latest_id) def get_test_ids(self, run_id): """Return the test ids from the specified run. :param run_id: the id of the test run to query. :return: a list of test ids for the tests that were part of the specified test run. """ run = self.get_test_run(run_id) ids = [] def gather(test_dict): ids.append(test_dict['id']) result = StreamToDict(gather) result.startTestRun() try: run.get_test().run(result) finally: result.stopTestRun() return ids def find_metadata(self, metadata): """Return the list of run_ids for a given metadata string. :param: metadata: the metadata string to search for. :return: a list of any test_ids that have that metadata value. """ raise NotImplementedError(self.find_metadata) class AbstractTestRun(object): """A test run that has been stored in a repository. Should implement the StreamResult protocol as well as the stestr specific methods documented here. """ def get_id(self): """Get the id of the test run. Sometimes test runs will not have an id, e.g. test runs for 'failing'. In that case, this should return None. """ raise NotImplementedError(self.get_id) def get_subunit_stream(self): """Get a subunit stream for this test run.""" raise NotImplementedError(self.get_subunit_stream) def get_test(self): """Get a testtools.TestCase-like object that can be run. :return: A TestCase like object which can be run to get the individual tests reported to a testtools.StreamResult/TestResult. (Clients of repository should provide an ExtendedToStreamDecorator decorator to permit either API to be used). """ raise NotImplementedError(self.get_test) def get_metadata(self): """Get the metadata value for the test run. :return: A string of the metadata or None if it doesn't exist. """ raise NotImplementedError(self.get_metadata) class RepositoryNotFound(Exception): """Raised when we try to open a repository that isn't there.""" def __init__(self, url): self.url = url msg = 'No repository found in %s. Create one by running "stestr init".' Exception.__init__(self, msg % url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/stestr/repository/file.py0000644000175000017500000003117100000000000024246 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Persistent storage of test results.""" import errno from io import BytesIO from operator import methodcaller import os import sys import tempfile from future.moves.dbm import dumb as my_dbm from subunit import TestProtocolClient import subunit.v2 import testtools from testtools.compat import _b from stestr.repository import abstract as repository from stestr import utils def atomicish_rename(source, target): if os.name != "posix" and os.path.exists(target): os.remove(target) os.rename(source, target) class RepositoryFactory(repository.AbstractRepositoryFactory): def initialise(klass, url): """Create a repository at url/path.""" base = os.path.join(os.path.expanduser(url), '.stestr') try: os.mkdir(base) except OSError as e: if e.errno == errno.EEXIST and not os.listdir(base): # It shouldn't be harmful initializing an empty dir pass else: raise with open(os.path.join(base, 'format'), 'wt') as stream: stream.write('1\n') result = Repository(base) result._write_next_stream(0) return result def open(self, url): path = os.path.expanduser(url) base = os.path.join(path, '.stestr') try: stream = open(os.path.join(base, 'format'), 'rt') except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise repository.RepositoryNotFound(url) raise with stream: if '1\n' != stream.read(): raise ValueError(url) return Repository(base) class Repository(repository.AbstractRepository): """Disk based storage of test results. This repository stores each stream it receives as a file in a directory. Indices are then built on top of this basic store. This particular disk layout is subject to change at any time, as its primarily a bootstrapping exercise at this point. Any changes made are likely to have an automatic upgrade process. """ def __init__(self, base): """Create a file-based repository object for the repo at 'base'. :param base: The path to the repository. """ self.base = base def _allocate(self): # XXX: lock the file. K?! value = self.count() self._write_next_stream(value + 1) return value def _next_stream(self): with open(os.path.join(self.base, 'next-stream'), 'rt') as fp: next_content = fp.read() try: return int(next_content) except ValueError: raise ValueError("Corrupt next-stream file: %r" % next_content) def count(self): return self._next_stream() def latest_id(self): result = self._next_stream() - 1 if result < 0: raise KeyError("No tests in repository") return result def get_failing(self): try: with open(os.path.join(self.base, "failing"), 'rb') as fp: run_subunit_content = fp.read() except IOError: err = sys.exc_info()[1] if err.errno == errno.ENOENT: run_subunit_content = _b('') else: raise return _DiskRun(None, run_subunit_content) def _get_metadata(self, run_id): db = my_dbm.open(self._path('meta.dbm'), 'c') try: metadata = db.get(str(run_id)) finally: db.close() return metadata def get_test_run(self, run_id): try: with open(os.path.join(self.base, str(run_id)), 'rb') as fp: run_subunit_content = fp.read() except IOError as e: if e.errno == errno.ENOENT: raise KeyError("No such run.") else: raise metadata = self._get_metadata(run_id) return _DiskRun(run_id, run_subunit_content, metadata=metadata) def _get_inserter(self, partial, run_id=None, metadata=None): return _Inserter(self, partial, run_id, metadata=metadata) def _get_test_times(self, test_ids): # May be too slow, but build and iterate. # 'c' because an existing repo may be missing a file. try: db = my_dbm.open(self._path('times.dbm'), 'c') except my_dbm.error: os.remove(self._path('times.dbm')) db = my_dbm.open(self._path('times.dbm'), 'c') try: result = {} for test_id in test_ids: if type(test_id) != str: test_id = test_id.encode('utf8') stripped_test_id = utils.cleanup_test_name(test_id) # gdbm does not support get(). try: duration = db[stripped_test_id] except KeyError: duration = None if duration is not None: result[test_id] = float(duration) return result finally: db.close() def _path(self, suffix): return os.path.join(self.base, suffix) def _write_next_stream(self, value): # Note that this is unlocked and not threadsafe : single # user, repo-per-working-tree model makes this acceptable in the short # term. Likewise we don't fsync - this data isn't valuable enough to # force disk IO. prefix = self._path('next-stream') with open(prefix + '.new', 'wt') as stream: stream.write('%d\n' % value) atomicish_rename(prefix + '.new', prefix) def find_metadata(self, metadata): run_ids = [] db = my_dbm.open(self._path('meta.dbm'), 'c') try: for run_id in db: if db.get(run_id) == metadata: run_ids.append(run_id) finally: db.close() return run_ids class _DiskRun(repository.AbstractTestRun): """A test run that was inserted into the repository.""" def __init__(self, run_id, subunit_content, metadata=None): """Create a _DiskRun with the content subunit_content.""" self._run_id = run_id self._content = subunit_content assert type(subunit_content) is bytes self._metadata = metadata def get_id(self): return self._run_id def get_subunit_stream(self): # Transcode - we want V2. v1_stream = BytesIO(self._content) v1_case = subunit.ProtocolTestCase(v1_stream) output = BytesIO() output_stream = subunit.v2.StreamResultToBytes(output) output_stream = testtools.ExtendedToStreamDecorator(output_stream) output_stream.startTestRun() try: v1_case.run(output_stream) finally: output_stream.stopTestRun() output.seek(0) return output def get_test(self): # case = subunit.ProtocolTestCase(self.get_subunit_stream()) case = subunit.ProtocolTestCase(BytesIO(self._content)) def wrap_result(result): # Wrap in a router to mask out startTestRun/stopTestRun from the # ExtendedToStreamDecorator. result = testtools.StreamResultRouter( result, do_start_stop_run=False) # Wrap that in ExtendedToStreamDecorator to convert v1 calls to # StreamResult. return testtools.ExtendedToStreamDecorator(result) return testtools.DecorateTestCaseResult( case, wrap_result, methodcaller('startTestRun'), methodcaller('stopTestRun')) def get_metadata(self): return self._metadata class _SafeInserter(object): def __init__(self, repository, partial=False, run_id=None, metadata=None): # XXX: Perhaps should factor into a decorator and use an unaltered # TestProtocolClient. self._repository = repository self._run_id = run_id self._metadata = metadata if not self._run_id: fd, name = tempfile.mkstemp(dir=self._repository.base) self.fname = name stream = os.fdopen(fd, 'wb') else: self.fname = os.path.join(self._repository.base, self._run_id) stream = open(self.fname, 'ab') self.partial = partial # The time take by each test, flushed at the end. self._times = {} self._test_start = None self._time = None subunit_client = testtools.StreamToExtendedDecorator( TestProtocolClient(stream)) self.hook = testtools.CopyStreamResult([ subunit_client, testtools.StreamToDict(self._handle_test)]) self._stream = stream def _handle_test(self, test_dict): start, stop = test_dict['timestamps'] if test_dict['status'] == 'exists' or None in (start, stop): return test_id = utils.cleanup_test_name(test_dict['id']) self._times[test_id] = str((stop - start).total_seconds()) def startTestRun(self): self.hook.startTestRun() def stopTestRun(self): self.hook.stopTestRun() self._stream.flush() self._stream.close() run_id = self._name() if not self._run_id: final_path = os.path.join(self._repository.base, str(run_id)) atomicish_rename(self.fname, final_path) if self._metadata: db = my_dbm.open(self._repository._path('meta.dbm'), 'c') try: dbm_run_id = str(run_id) db[dbm_run_id] = str(self._metadata) finally: db.close() # May be too slow, but build and iterate. db = my_dbm.open(self._repository._path('times.dbm'), 'c') try: db_times = {} for key, value in self._times.items(): if type(key) != str: key = key.encode('utf8') db_times[key] = value if getattr(db, 'update', None): db.update(db_times) else: for key, value in db_times.items(): db[key] = value finally: db.close() if not self._run_id: self._run_id = run_id def status(self, *args, **kwargs): self.hook.status(*args, **kwargs) def _cancel(self): """Cancel an insertion.""" self._stream.close() os.unlink(self.fname) def get_id(self): return self._run_id class _FailingInserter(_SafeInserter): """Insert a stream into the 'failing' file.""" def _name(self): return "failing" class _Inserter(_SafeInserter): def _name(self): if not self._run_id: return self._repository._allocate() else: return self._run_id def stopTestRun(self): super(_Inserter, self).stopTestRun() # XXX: locking (other inserts may happen while we update the failing # file). # Combine failing + this run : strip passed tests, add failures. # use memory repo to aggregate. a bit awkward on layering ;). # Should just pull the failing items aside as they happen perhaps. # Or use a router and avoid using a memory object at all. from stestr.repository import memory repo = memory.Repository() if self.partial: # Seed with current failing inserter = testtools.ExtendedToStreamDecorator(repo.get_inserter()) inserter.startTestRun() failing = self._repository.get_failing() failing.get_test().run(inserter) inserter.stopTestRun() inserter = testtools.ExtendedToStreamDecorator( repo.get_inserter(partial=True)) inserter.startTestRun() run = self._repository.get_test_run(self.get_id()) run.get_test().run(inserter) inserter.stopTestRun() # and now write to failing inserter = _FailingInserter(self._repository) _inserter = testtools.ExtendedToStreamDecorator(inserter) _inserter.startTestRun() try: repo.get_failing().get_test().run(_inserter) except Exception: inserter._cancel() raise else: _inserter.stopTestRun() return self.get_id() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/stestr/repository/memory.py0000644000175000017500000001517200000000000024642 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """In memory storage of test results.""" from extras import try_import from io import BytesIO from operator import methodcaller import subunit import testtools from stestr.repository import abstract as repository OrderedDict = try_import('collections.OrderedDict', dict) class RepositoryFactory(repository.AbstractRepositoryFactory): """A factory that can initialise and open memory repositories. This is used for testing where a repository may be created and later opened, but tests should not see each others repositories. """ def __init__(self): self.repos = {} def initialise(self, url): self.repos[url] = Repository() return self.repos[url] def open(self, url): try: return self.repos[url] except KeyError: raise repository.RepositoryNotFound(url) class Repository(repository.AbstractRepository): """In memory storage of test results.""" def __init__(self): # Test runs: self._runs = [] self._failing = OrderedDict() # id -> test self._times = {} # id -> duration def count(self): return len(self._runs) def get_failing(self): return _Failures(self) def get_test_run(self, run_id): if run_id < 0: raise KeyError("No such run.") return self._runs[run_id] def latest_id(self): result = self.count() - 1 if result < 0: raise KeyError("No tests in repository") return result def _get_inserter(self, partial, run_id=None, metadata=None): return _Inserter(self, partial, run_id, metadata) def _get_test_times(self, test_ids): result = {} for test_id in test_ids: duration = self._times.get(test_id, None) if duration is not None: result[test_id] = duration return result # XXX: Too much duplication between this and _Inserter class _Failures(repository.AbstractTestRun): """Report on failures from a memory repository.""" def __init__(self, repository): self._repository = repository def get_id(self): return None def get_subunit_stream(self): result = BytesIO() serialiser = subunit.v2.StreamResultToBytes(result) serialiser = testtools.ExtendedToStreamDecorator(serialiser) serialiser.startTestRun() try: self.run(serialiser) finally: serialiser.stopTestRun() result.seek(0) return result def get_test(self): def wrap_result(result): # Wrap in a router to mask out startTestRun/stopTestRun from the # ExtendedToStreamDecorator. result = testtools.StreamResultRouter(result, do_start_stop_run=False) # Wrap that in ExtendedToStreamDecorator to convert v1 calls to # StreamResult. return testtools.ExtendedToStreamDecorator(result) return testtools.DecorateTestCaseResult( self, wrap_result, methodcaller('startTestRun'), methodcaller('stopTestRun')) def run(self, result): # Speaks original V1 protocol. for case in self._repository._failing.values(): case.run(result) class _Inserter(repository.AbstractTestRun): """Insert test results into a memory repository.""" def __init__(self, repository, partial, run_id=None, metadata=None): self._repository = repository self._partial = partial self._tests = [] # Subunit V2 stream for get_subunit_stream self._subunit = None self._run_id = run_id self._metadata = metadata def startTestRun(self): self._subunit = BytesIO() serialiser = subunit.v2.StreamResultToBytes(self._subunit) self._hook = testtools.CopyStreamResult([ testtools.StreamToDict(self._handle_test), serialiser]) self._hook.startTestRun() def _handle_test(self, test_dict): self._tests.append(test_dict) start, stop = test_dict['timestamps'] if test_dict['status'] == 'exists' or None in (start, stop): return duration_delta = stop - start duration_seconds = ( (duration_delta.microseconds + ( duration_delta.seconds + duration_delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6) self._repository._times[test_dict['id']] = duration_seconds def stopTestRun(self): self._hook.stopTestRun() self._repository._runs.append(self) if not self._run_id: self._run_id = len(self._repository._runs) - 1 if not self._partial: self._repository._failing = OrderedDict() for test_dict in self._tests: test_id = test_dict['id'] if test_dict['status'] == 'fail': case = testtools.testresult.real.test_dict_to_case(test_dict) self._repository._failing[test_id] = case else: self._repository._failing.pop(test_id, None) return self._run_id def status(self, *args, **kwargs): self._hook.status(*args, **kwargs) def get_id(self): return self._run_id def get_subunit_stream(self): self._subunit.seek(0) return self._subunit def get_test(self): def wrap_result(result): # Wrap in a router to mask out startTestRun/stopTestRun from the # ExtendedToStreamDecorator. result = testtools.StreamResultRouter(result, do_start_stop_run=False) # Wrap that in ExtendedToStreamDecorator to convert v1 calls to # StreamResult. return testtools.ExtendedToStreamDecorator(result) return testtools.DecorateTestCaseResult( self, wrap_result, methodcaller('startTestRun'), methodcaller('stopTestRun')) def run(self, result): # Speaks original. for test_dict in self._tests: case = testtools.testresult.real.test_dict_to_case(test_dict) case.run(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/repository/sql.py0000644000175000017500000002771400000000000024136 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Persistent storage of test results.""" from __future__ import print_function import datetime import io import os.path import re import subprocess import sys import sqlalchemy from sqlalchemy import orm import subunit.v2 from subunit2sql.db import api as db_api from subunit2sql import read_subunit from subunit2sql import shell from subunit2sql import write_subunit import testtools from stestr.repository import abstract as repository from stestr import utils def atomicish_rename(source, target): if os.name != "posix" and os.path.exists(target): os.remove(target) os.rename(source, target) class RepositoryFactory(repository.AbstractRepositoryFactory): def initialise(klass, url): """Create a repository at url/path.""" print("WARNING: The SQL repository type is still experimental. You " "might encounter issues while using it.", file=sys.stderr) result = Repository(url) # TODO(mtreinish): Figure out the python api to run the migrations for # setting up the schema. proc = subprocess.Popen(['subunit2sql-db-manage', '--database-connection', url, 'upgrade', 'head'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() sys.stdout.write(str(out)) sys.stderr.write(str(err)) return result def open(self, url): repo = Repository(url) # To test the repository's existence call get_ids_for_all_tests() # if it raises an OperationalError that means the DB doesn't exist or # it couldn't connect, either way the repository was not found. try: session = repo.session_factory() db_api.get_ids_for_all_tests(session=session) session.close() except sqlalchemy.exc.OperationalError: raise repository.RepositoryNotFound(url) return repo class Repository(repository.AbstractRepository): """subunit2sql based storage of test results. This repository stores each stream in a subunit2sql DB. Refer to the subunit2sql documentation for """ def __init__(self, url): """Create a subunit2sql-based repository object for the repo at 'url'. :param base: The path to the repository. """ self.base = url self.engine = sqlalchemy.create_engine(url) self.session_factory = orm.sessionmaker(bind=self.engine) # TODO(mtreinish): We need to add a subunit2sql api to get the count def count(self): super(Repository, self).count() def _get_latest_run(self): session = self.session_factory() latest_run = db_api.get_latest_run(session) session.close() if not latest_run: raise KeyError("No tests in repository") return latest_run def latest_id(self): return self._get_latest_run().uuid def get_failing(self): latest_run = self._get_latest_run() session = self.session_factory() failed_test_runs = db_api.get_test_runs_by_status_for_run_ids( 'fail', [latest_run.id], session=session) session.close() return _Subunit2SqlRun(self.base, None, test_runs=failed_test_runs) def get_test_run(self, run_id): return _Subunit2SqlRun(self.base, run_id) def _get_inserter(self, partial, run_id=None, metadata=None): return _SqlInserter(self, partial, run_id, metadata) def _get_test_times(self, test_ids): result = {} # TODO(mtreinish): after subunit2sql adds a bulk query for getting # multiple tests by test_id at once remove the for loop session = self.session_factory() for test_id in test_ids: stripped_test_id = utils.cleanup_test_name(test_id) test = db_api.get_test_by_test_id(stripped_test_id, session=session) if test: # NOTE(mtreinish): We need to make sure the test_id with attrs # is used in the output dict, otherwise the scheduler won't # see it result[test_id] = test.run_time session.close() return result def find_metadata(self, metadata): session = self.session_factory() runs = db_api.get_runs_by_key_value('stestr_run_meta', metadata, session=session) return [x.uuid for x in runs] class _Subunit2SqlRun(repository.AbstractTestRun): """A test run that was inserted into the repository.""" def __init__(self, url, run_id, test_runs=None): engine = sqlalchemy.create_engine(url) self.session_factory = orm.sessionmaker(bind=engine) self._run_id = run_id self._test_runs = test_runs def get_id(self): return self._run_id def get_subunit_stream(self): stream = io.BytesIO() if self._run_id: session = self.session_factory() test_runs = db_api.get_tests_run_dicts_from_run_id(self._run_id, session) session.close() else: test_runs = self._test_runs output = subunit.v2.StreamResultToBytes(stream) output.startTestRun() for test_id in test_runs: test = test_runs[test_id] # NOTE(mtreinish): test_run_metadata is not guaranteed to be # present for the test run metadata = test.get('metadata', None) write_subunit.write_test(output, test['start_time'], test['stop_time'], test['status'], test_id, metadata) output.stopTestRun() stream.seek(0) return stream def get_test(self): stream = self.get_subunit_stream() case = subunit.ByteStreamToStreamResult(stream) return case def get_metadata(self): if self._run_id: session = self.session_factory() metadata = db_api.get_run_metadata(self._run_id, session=session) for meta in metadata: if meta.key == 'stestr_run_meta': return meta.value return None class _SqlInserter(repository.AbstractTestRun): """Insert test results into a sql repository.""" def __init__(self, repository, partial=False, run_id=None, metadata=None): self._repository = repository self.partial = partial self._subunit = None self._run_id = run_id self._metadata = metadata # Create a new session factory self.engine = sqlalchemy.create_engine(self._repository.base) self.session_factory = orm.sessionmaker(bind=self.engine, autocommit=True) def startTestRun(self): self._subunit = io.BytesIO() self.subunit_stream = subunit.v2.StreamResultToBytes(self._subunit) self.hook = testtools.CopyStreamResult([ testtools.StreamToDict(self._handle_test), self.subunit_stream]) self.hook.startTestRun() self.start_time = datetime.datetime.utcnow() session = self.session_factory() if not self._run_id: self.run = db_api.create_run(session=session) if self._metadata: db_api.add_run_metadata({'stestr_run_meta': self._metadata}, self.run.id, session=session) self._run_id = self.run.uuid else: int_id = db_api.get_run_id_from_uuid(self._run_id, session=session) self.run = db_api.get_run_by_id(int_id, session=session) session.close() self.totals = {} def _update_test(self, test_dict, session, start_time, stop_time): test_id = utils.cleanup_test_name(test_dict['id']) db_test = db_api.get_test_by_test_id(test_id, session) if not db_test: if test_dict['status'] == 'success': success = 1 fails = 0 elif test_dict['status'] == 'fail': fails = 1 success = 0 else: fails = 0 success = 0 run_time = read_subunit.get_duration(start_time, stop_time) db_test = db_api.create_test(test_id, (success + fails), success, fails, run_time, session) else: test_dict['start_time'] = start_time test_dict['end_time'] = stop_time test_values = shell.increment_counts(db_test, test_dict) # If skipped nothing to update if test_values: db_api.update_test(test_values, db_test.id, session) return db_test def _get_attrs(self, test_id): attr_regex = re.compile(r'\[(.*)\]') matches = attr_regex.search(test_id) attrs = None if matches: attrs = matches.group(1) return attrs def _handle_test(self, test_dict): start, end = test_dict.pop('timestamps') if test_dict['status'] == 'exists' or None in (start, end): return elif test_dict['id'] == 'process-returncode': return session = self.session_factory() try: # Update the run counts if test_dict['status'] not in self.totals: self.totals[test_dict['status']] = 1 else: self.totals[test_dict['status']] += 1 values = {} if test_dict['status'] in ('success', 'xfail'): values['passes'] = self.totals['success'] elif test_dict['status'] in ('fail', 'uxsuccess'): values['fails'] = self.totals['fail'] elif test_dict['status'] == 'skip': values['skips'] = self.totals['skip'] db_api.update_run(values, self.run.id, session=session) # Update the test totals db_test = self._update_test(test_dict, session, start, end) # Add the test run test_run = db_api.create_test_run(db_test.id, self.run.id, test_dict['status'], start, end, session) metadata = {} attrs = self._get_attrs(test_dict['id']) if attrs: metadata['attrs'] = attrs if test_dict.get('tags', None): metadata['tags'] = ",".join(test_dict['tags']) if metadata: db_api.add_test_run_metadata( metadata, test_run.id, session) # TODO(mtreinish): Add attachments support to the DB. session.close() except Exception: session.rollback() raise def stopTestRun(self): self.hook.stopTestRun() stop_time = datetime.datetime.utcnow() self._subunit.seek(0) values = {} values['run_time'] = read_subunit.get_duration(self.start_time, stop_time) session = self.session_factory() db_api.update_run(values, self.run.id, session=session) session.close() def status(self, *args, **kwargs): self.hook.status(*args, **kwargs) def get_id(self): return self._run_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/repository/util.py0000644000175000017500000000510100000000000024276 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import os import sys def _get_default_repo_url(repo_type): if repo_type == 'sql': repo_file = os.path.join(os.getcwd(), '.stestr.sqlite') repo_url = 'sqlite:///' + repo_file elif repo_type == 'file': repo_url = os.getcwd() else: raise TypeError('Unrecognized repository type %s' % repo_type) return repo_url def get_repo_open(repo_type, repo_url=None): """Return an already initialized repo object given the parameters :param str repo_type: The repo module to use for the returned repo :param str repo_url: An optional repo url, if one is not specified the default $CWD/.stestr will be used. """ try: repo_module = importlib.import_module('stestr.repository.' + repo_type) except ImportError: if repo_type == 'sql': sys.exit("sql repository type requirements aren't installed. To " "use the sql repository ensure you installed the extra " "requirements with `pip install 'stestr[sql]'`") else: raise if not repo_url: repo_url = _get_default_repo_url(repo_type) return repo_module.RepositoryFactory().open(repo_url) def get_repo_initialise(repo_type, repo_url=None): """Return a newly initialized repo object given the parameters :param str repo_type: The repo module to use for the returned repo :param str repo_url: An optional repo url, if one is not specified the default $CWD/.stestr will be used. """ try: repo_module = importlib.import_module('stestr.repository.' + repo_type) except ImportError: if repo_type == 'sql': sys.exit("sql repository type requirements aren't installed. To " "use the sql repository ensure you installed the extra " "requirements with `pip install 'stestr[sql]'`") else: raise if not repo_url: repo_url = _get_default_repo_url(repo_type) return repo_module.RepositoryFactory().initialise(repo_url) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5720925 stestr-3.0.0/stestr/repository/vcs/0000755000175000017500000000000000000000000023545 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1578172206.0 stestr-3.0.0/stestr/repository/vcs/__init__.py0000644000175000017500000000000000000000000025644 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1578235415.0 stestr-3.0.0/stestr/repository/vcs/detect.py0000644000175000017500000000132100000000000025364 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stestr.repository.vcs import git def get_vcs_version(): # TODO(mtreinish): Add additional VCS systems if git.is_repo(): return git.get_revision() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1578324089.0 stestr-3.0.0/stestr/repository/vcs/git.py0000644000175000017500000000211600000000000024702 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Interact with git repos.""" import subprocess def is_repo(): cmd = ['git', 'rev-parse', '--is-inside-work-tree'] proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE) proc.communicate() if proc.returncode > 0: return False return True def get_revision(): cmd = ['git', 'rev-list', '-1', 'HEAD'] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sha1, _ = proc.communicate() return sha1.decode('utf8') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/results.py0000644000175000017500000001434000000000000022610 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subunit import testtools from stestr import output def wasSuccessful(summary): return not (summary.errors or summary.failures or summary.unexpectedSuccesses) class SummarizingResult(testtools.StreamSummary): def __init__(self): super(SummarizingResult, self).__init__() def startTestRun(self): super(SummarizingResult, self).startTestRun() self._first_time = None self._last_time = None def status(self, *args, **kwargs): if kwargs.get('timestamp') is not None: timestamp = kwargs['timestamp'] if self._last_time is None: self._first_time = timestamp self._last_time = timestamp if timestamp < self._first_time: self._first_time = timestamp if timestamp > self._last_time: self._last_time = timestamp super(SummarizingResult, self).status(*args, **kwargs) def get_num_failures(self): return len(self.failures) + len(self.errors) def get_time_taken(self): if None in (self._last_time, self._first_time): return None return (self._last_time - self._first_time).total_seconds() class CatFiles(testtools.StreamResult): """Cat file attachments received to a stream.""" def __init__(self, byte_stream): self.stream = subunit.make_stream_binary(byte_stream) self.last_file = None def status(self, test_id=None, test_status=None, test_tags=None, runnable=True, file_name=None, file_bytes=None, eof=False, mime_type=None, route_code=None, timestamp=None): if file_name is None: return if self.last_file != file_name: self.stream.write(("--- %s ---\n" % file_name).encode('utf8')) self.last_file = file_name self.stream.write(file_bytes) self.stream.flush() class CLITestResult(testtools.StreamResult): """A TestResult for the CLI.""" def __init__(self, get_id, stream, previous_run=None): """Construct a CLITestResult writing to stream. :param get_id: A nullary callable that returns the id of the test run. This expects a callable instead of the actual value because in some repository backends the run_id is only generated after stopTestRun() is called. :param stream: The stream to use for result :param previous_run: The CLITestResult for the previous run """ super(CLITestResult, self).__init__() self._previous_run = previous_run self._summary = SummarizingResult() self.stream = testtools.compat.unicode_output_stream(stream) self.sep1 = testtools.compat._u('=' * 70 + '\n') self.sep2 = testtools.compat._u('-' * 70 + '\n') self.filterable_states = {'success', 'uxsuccess', 'xfail', 'skip'} self.get_id = get_id def _format_error(self, label, test, error_text, test_tags=None): test_tags = test_tags or () tags = ' '.join(test_tags) if tags: tags = str('tags: %s\n' % tags) return str(''.join([ self.sep1, str('{}: {}\n'.format(label, test.id())), tags, self.sep2, error_text, ])) def status(self, **kwargs): super(CLITestResult, self).status(**kwargs) self._summary.status(**kwargs) test_status = kwargs.get('test_status') test_tags = kwargs.get('test_tags') if test_status == 'fail': self.stream.write( self._format_error(str('FAIL'), *(self._summary.errors[-1]), test_tags=test_tags)) if test_status not in self.filterable_states: return def _get_previous_summary(self): if self._previous_run is None: return None previous_summary = SummarizingResult() previous_summary.startTestRun() test = self._previous_run.get_test() test.run(previous_summary) previous_summary.stopTestRun() return previous_summary def _output_summary(self, run_id): """Output a test run. :param run_id: The run id. """ time = self._summary.get_time_taken() time_delta = None num_tests_run_delta = None num_failures_delta = None values = [('id', run_id, None)] failures = self._summary.get_num_failures() previous_summary = self._get_previous_summary() if failures: if previous_summary: num_failures_delta = failures - \ previous_summary.get_num_failures() values.append(('failures', failures, num_failures_delta)) if previous_summary: num_tests_run_delta = self._summary.testsRun - \ previous_summary.testsRun if time: previous_time_taken = previous_summary.get_time_taken() if previous_time_taken: time_delta = time - previous_time_taken skips = len(self._summary.skipped) if skips: values.append(('skips', skips, None)) output.output_summary( not bool(failures), self._summary.testsRun, num_tests_run_delta, time, time_delta, values, output=self.stream) def startTestRun(self): super(CLITestResult, self).startTestRun() self._summary.startTestRun() def stopTestRun(self): super(CLITestResult, self).stopTestRun() run_id = self.get_id() self._summary.stopTestRun() self._output_summary(run_id) def get_summary(self): return self._summary ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/stestr/scheduler.py0000644000175000017500000001734600000000000023076 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import multiprocessing import operator import random import yaml from stestr import selection def partition_tests(test_ids, concurrency, repository, group_callback, randomize=False): """Partition test_ids by concurrency. Test durations from the repository are used to get partitions which have roughly the same expected runtime. New tests - those with no recorded duration - are allocated in round-robin fashion to the partitions created using test durations. :param list test_ids: The list of test_ids to be partitioned :param int concurrency: The concurrency that will be used for running the tests. This is the number of partitions that test_ids will be split into. :param repository: A repository object that :param group_callback: A callback function that is used as a scheduler hint to group test_ids together and treat them as a single unit for scheduling. This function expects a single test_id parameter and it will return a group identifier. Tests_ids that have the same group identifier will be kept on the same worker. :param bool randomize: If true each partition's test order will be randomized :return: A list where each element is a distinct subset of test_ids, and the union of all the elements is equal to set(test_ids). """ def noop(_): return None _group_callback = group_callback partitions = [list() for i in range(concurrency)] timed_partitions = [[0.0, partition] for partition in partitions] time_data = {} if repository: time_data = repository.get_test_times(test_ids) timed_tests = time_data['known'] unknown_tests = time_data['unknown'] else: timed_tests = {} unknown_tests = set(test_ids) # Group tests: generate group_id -> test_ids. group_ids = collections.defaultdict(list) if _group_callback is None: group_callback = noop else: group_callback = _group_callback for test_id in test_ids: group_id = group_callback(test_id) or test_id group_ids[group_id].append(test_id) # Time groups: generate three sets of groups: # - fully timed dict(group_id -> time), # - partially timed dict(group_id -> time) and # - unknown (set of group_id) # We may in future treat partially timed different for scheduling, but # at least today we just schedule them after the fully timed groups. timed = {} partial = {} unknown = [] for group_id, group_tests in group_ids.items(): untimed_ids = unknown_tests.intersection(group_tests) group_time = sum( [timed_tests[test_id] for test_id in untimed_ids.symmetric_difference( group_tests)]) if not untimed_ids: timed[group_id] = group_time elif group_time: partial[group_id] = group_time else: unknown.append(group_id) # Scheduling is NP complete in general, so we avoid aiming for # perfection. A quick approximation that is sufficient for our general # needs: # sort the groups by time # allocate to partitions by putting each group in to the partition with # the current (lowest time, shortest length[in tests]) def consume_queue(groups): queue = sorted( groups.items(), key=operator.itemgetter(1), reverse=True) for group_id, duration in queue: timed_partitions[0][0] = timed_partitions[0][0] + duration timed_partitions[0][1].extend(group_ids[group_id]) timed_partitions.sort(key=lambda item: (item[0], len(item[1]))) consume_queue(timed) consume_queue(partial) # Assign groups with entirely unknown times in round robin fashion to # the partitions. for partition, group_id in zip(itertools.cycle(partitions), unknown): partition.extend(group_ids[group_id]) if randomize: out_parts = [] for partition in partitions: temp_part = list(partition) random.shuffle(temp_part) out_parts.append(list(temp_part)) return out_parts else: return partitions def local_concurrency(): """Get the number of available CPUs on the system. :return: An int for the number of cpus. Or None if it couldn't be found """ try: return multiprocessing.cpu_count() except NotImplementedError: # No concurrency logic known. return None def generate_worker_partitions(ids, worker_path, repository=None, group_callback=None, randomize=False): """Parse a worker yaml file and generate test groups :param list ids: A list of test ids too be partitioned :param path worker_path: The path to a worker file :param repository: A repository object that will be used for looking up timing data. This is optional, and also will only be used for scheduling if there is a count field on a worker. :param group_callback: A callback function that is used as a scheduler hint to group test_ids together and treat them as a single unit for scheduling. This function expects a single test_id parameter and it will return a group identifier. Tests_ids that have the same group identifier will be kept on the same worker. This is optional and also will only be used for scheduling if there is a count field on a worker. :param bool randomize: If true each partition's test order will be randomized. This is optional and also will only be used for scheduling if there is a count field on a worker. :returns: A list where each element is a distinct subset of test_ids. """ with open(worker_path, 'r') as worker_file: workers_desc = yaml.safe_load(worker_file.read()) worker_groups = [] for worker in workers_desc: if isinstance(worker, dict) and 'worker' in worker.keys(): if isinstance(worker['worker'], list): local_worker_list = selection.filter_tests( worker['worker'], ids) if 'concurrency' in worker.keys() and worker[ 'concurrency'] > 1: partitioned_tests = partition_tests( local_worker_list, worker['concurrency'], repository, group_callback, randomize) worker_groups.extend(partitioned_tests) else: # If a worker partition is empty don't add it to the output if local_worker_list: worker_groups.append(local_worker_list) else: raise TypeError('The input yaml is the incorrect format') else: raise TypeError('The input yaml is the incorrect format') return worker_groups ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/selection.py0000644000175000017500000001245200000000000023076 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import contextlib import re import sys def filter_tests(filters, test_ids): """Filter test_ids by the test_filters. :param list filters: A list of regex filters to apply to the test_ids. The output will contain any test_ids which have a re.search() match for any of the regexes in this list. If this is None all test_ids will be returned :param list test_ids: A list of test_ids that will be filtered :return: A list of test ids. """ if filters is None: return test_ids _filters = [] for f in filters: if isinstance(f, str): try: _filters.append(re.compile(f)) except re.error: print("Invalid regex: %s provided in filters" % f, file=sys.stderr) sys.exit(5) else: _filters.append(f) def include(test_id): for pred in _filters: if pred.search(test_id): return True return list(filter(include, test_ids)) def black_reader(blacklist_file): with contextlib.closing(open(blacklist_file, 'r')) as black_file: regex_comment_lst = [] # tuple of (regex_compiled, msg, skipped_lst) for line in black_file: raw_line = line.strip() split_line = raw_line.split('#') # Before the # is the regex line_regex = split_line[0].strip() if len(split_line) > 1: # After the # is a comment comment = ''.join(split_line[1:]).strip() else: comment = 'Skipped because of regex %s:' % line_regex if not line_regex: continue try: regex_comment_lst.append((re.compile(line_regex), comment, [])) except re.error: print("Invalid regex: %s in provided blacklist file" % line_regex, file=sys.stderr) sys.exit(5) return regex_comment_lst def _get_regex_from_whitelist_file(file_path): lines = [] for line in open(file_path).read().splitlines(): split_line = line.strip().split('#') # Before the # is the regex line_regex = split_line[0].strip() if line_regex: try: lines.append(re.compile(line_regex)) except re.error: print("Invalid regex: %s in provided whitelist file" % line_regex, file=sys.stderr) sys.exit(5) return lines def construct_list(test_ids, blacklist_file=None, whitelist_file=None, regexes=None, black_regex=None): """Filters the discovered test cases :param list test_ids: The set of test_ids to be filtered :param str blacklist_file: The path to a blacklist file :param str whitelist_file: The path to a whitelist file :param list regexes: A list of regex filters to apply to the test_ids. The output will contain any test_ids which have a re.search() match for any of the regexes in this list. If this is None all test_ids will be returned :param str black_regex: :return: iterable of strings. The strings are full test_ids :rtype: set """ if not regexes: regexes = None # handle the other false things white_re = None if whitelist_file: white_re = _get_regex_from_whitelist_file(whitelist_file) if not regexes and white_re: regexes = white_re elif regexes and white_re: regexes += white_re if blacklist_file: black_data = black_reader(blacklist_file) else: black_data = None if black_regex: msg = "Skipped because of regexp provided as a command line argument:" try: record = (re.compile(black_regex), msg, []) except re.error: print("Invalid regex: %s used for black_regex" % black_regex, file=sys.stderr) sys.exit(5) if black_data: black_data.append(record) else: black_data = [record] list_of_test_cases = filter_tests(regexes, test_ids) set_of_test_cases = set(list_of_test_cases) if not black_data: return set_of_test_cases # NOTE(afazekas): We might use a faster logic when the # print option is not requested for (rex, msg, s_list) in black_data: for test_case in list_of_test_cases: if rex.search(test_case): # NOTE(mtreinish): In the case of overlapping regex the test # case might have already been removed from the set of tests if test_case in set_of_test_cases: set_of_test_cases.remove(test_case) s_list.append(test_case) return set_of_test_cases ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5720925 stestr-3.0.0/stestr/subunit_runner/0000755000175000017500000000000000000000000023615 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/subunit_runner/__init__.py0000644000175000017500000000000000000000000025714 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/subunit_runner/program.py0000644000175000017500000002166400000000000025647 0ustar00computertrekercomputertreker00000000000000# Copyright 2019 Matthew Treinish # Copyright (c) 2009 testtools developers. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os import sys import unittest import extras def filter_by_ids(suite_or_case, test_ids): """Remove tests from suite_or_case where their id is not in test_ids. :param suite_or_case: A test suite or test case. :param test_ids: Something that supports the __contains__ protocol. :return: suite_or_case, unless suite_or_case was a case that itself fails the predicate when it will return a new unittest.TestSuite with no contents. For subclasses of TestSuite, filtering is done by: - attempting to call suite.filter_by_ids(test_ids) - if there is no method, iterating the suite and identifying tests to remove, then removing them from _tests, manually recursing into each entry. For objects with an id() method - TestCases, filtering is done by: - attempting to return case.filter_by_ids(test_ids) - if there is no such method, checking for case.id() in test_ids and returning case if it is, or TestSuite() if it is not. For anything else, it is not filtered - it is returned as-is. To provide compatibility with this routine for a custom TestSuite, just define a filter_by_ids() method that will return a TestSuite equivalent to the original minus any tests not in test_ids. Similarly to provide compatibility for a custom TestCase that does something unusual define filter_by_ids to return a new TestCase object that will only run test_ids that are in the provided container. If none would run, return an empty TestSuite(). The contract for this function does not require mutation - each filtered object can choose to return a new object with the filtered tests. However because existing custom TestSuite classes in the wild do not have this method, we need a way to copy their state correctly which is tricky: thus the backwards-compatible code paths attempt to mutate in place rather than guessing how to reconstruct a new suite. """ # Compatible objects if extras.safe_hasattr(suite_or_case, 'filter_by_ids'): return suite_or_case.filter_by_ids(test_ids) # TestCase objects. if extras.safe_hasattr(suite_or_case, 'id'): if suite_or_case.id() in test_ids: return suite_or_case else: return unittest.TestSuite() # Standard TestSuites or derived classes [assumed to be mutable]. if isinstance(suite_or_case, unittest.TestSuite): filtered = [] for item in suite_or_case: filtered.append(filter_by_ids(item, test_ids)) suite_or_case._tests[:] = filtered # Everything else: return suite_or_case def iterate_tests(test_suite_or_case): """Iterate through all of the test cases in 'test_suite_or_case'.""" try: suite = iter(test_suite_or_case) except TypeError: yield test_suite_or_case else: for test in suite: for subtest in iterate_tests(test): yield subtest def list_test(test): """Return the test ids that would be run if test() was run. When things fail to import they can be represented as well, though we use an ugly hack (see http://bugs.python.org/issue19746 for details) to determine that. The difference matters because if a user is filtering tests to run on the returned ids, a failed import can reduce the visible tests but it can be impossible to tell that the selected test would have been one of the imported ones. :return: A tuple of test ids that would run and error strings describing things that failed to import. """ unittest_import_strs = { 'unittest2.loader.ModuleImportFailure.', 'unittest.loader.ModuleImportFailure.', 'discover.ModuleImportFailure.' } test_ids = [] errors = [] for test in iterate_tests(test): # Much ugly. for prefix in unittest_import_strs: if test.id().startswith(prefix): errors.append(test.id()[len(prefix):]) break else: test_ids.append(test.id()) return test_ids, errors class TestProgram(unittest.TestProgram): # defaults for testing module = None verbosity = 1 failfast = catchbreak = buffer = progName = None _discovery_parser = None def __init__(self, module='__main__', defaultTest=None, argv=None, testRunner=None, testLoader=unittest.defaultTestLoader, exit=False, verbosity=1, failfast=None, catchbreak=None, buffer=None, warnings=None, tb_locals=False): if isinstance(module, str): self.module = __import__(module) for part in module.split('.')[1:]: self.module = getattr(self.module, part) else: self.module = module if argv is None: argv = sys.argv self.exit = exit self.failfast = failfast self.catchbreak = catchbreak self.verbosity = verbosity self.buffer = buffer self.tb_locals = tb_locals if warnings is None and not sys.warnoptions: # even if DeprecationWarnings are ignored by default # print them anyway unless other warnings settings are # specified by the warnings arg or the -W python flag self.warnings = 'default' else: # here self.warnings is set either to the value passed # to the warnings args or to None. # If the user didn't pass a value self.warnings will # be None. This means that the behavior is unchanged # and depends on the values passed to -W. self.warnings = warnings self.defaultTest = defaultTest # XXX: Local edit (see http://bugs.python.org/issue22860) self.listtests = False self.load_list = None self.testRunner = testRunner self.testLoader = testLoader self.progName = os.path.basename(argv[0]) self.parseArgs(argv) # XXX: Local edit (see http://bugs.python.org/issue22860) if self.load_list: # TODO(mtreinish): preserve existing suites (like testresources # does in OptimisingTestSuite.add, but with a standard protocol). # This is needed because the load_tests hook allows arbitrary # suites, even if that is rarely used. source = open(self.load_list, 'rb') try: lines = source.readlines() finally: source.close() test_ids = {line.strip().decode('utf-8') for line in lines} self.test = filter_by_ids(self.test, test_ids) # XXX: Local edit (see http://bugs.python.org/issue22860) if not self.listtests: self.runTests() else: runner = self._get_runner() if extras.safe_hasattr(runner, 'list'): try: runner.list(self.test, loader=self.testLoader) except TypeError: runner.list(self.test) else: for test in iterate_tests(self.test): sys.stdout.write('%s\n' % test.id()) def _getParentArgParser(self): parser = super(TestProgram, self)._getParentArgParser() # XXX: Local edit (see http://bugs.python.org/issue22860) parser.add_argument( '-l', '--list', dest='listtests', default=False, action='store_true', help='List tests rather than executing them') parser.add_argument( '--load-list', dest='load_list', default=None, help='Specifies a file containing test ids, only tests matching ' 'those ids are executed') return parser def _get_runner(self): testRunner = self.testRunner try: testRunner = self.testRunner(failfast=self.failfast, tb_locals=self.tb_locals) except TypeError: testRunner = self.testRunner() # If for some reason we failed to initialize the runner initialize # with defaults if isinstance(testRunner, functools.partial): testRunner = self.testRunner() return testRunner def runTests(self): if self.catchbreak: unittest.installHandler() testRunner = self._get_runner() self.result = testRunner.run(self.test) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/subunit_runner/run.py0000644000175000017500000000577000000000000025004 0ustar00computertrekercomputertreker00000000000000# Copyright 2019 Matthew Treinish # Copyright (C) Jelmer Vernooij 2007 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from functools import partial import os import sys from subunit import StreamResultToBytes from subunit.test_results import AutoTimingTestResultDecorator from testtools import ExtendedToStreamDecorator from stestr.subunit_runner import program class SubunitTestRunner(object): def __init__(self, failfast=False, tb_locals=False, stdout=sys.stdout): """Create a Test Runner. :param failfast: Stop running tests at the first failure. :param stdout: Output stream parameter, defaults to sys.stdout :param tb_locals: If set true local variables will be shown Either stream or stdout can be supplied, and stream will take precedence. """ self.failfast = failfast self.stream = stdout self.tb_locals = tb_locals def run(self, test): "Run the given test case or test suite." result, _ = self._list(test) result = ExtendedToStreamDecorator(result) result = AutoTimingTestResultDecorator(result) if self.failfast is not None: result.failfast = self.failfast result.tb_locals = self.tb_locals result.startTestRun() try: test(result) finally: result.stopTestRun() return result def list(self, test, loader=None): "List the test." result, errors = self._list(test) if loader is not None: errors = loader.errors if errors: failed_descr = '\n'.join(errors).encode('utf8') result.status(file_name="import errors", runnable=False, file_bytes=failed_descr, mime_type="text/plain;charset=utf8") sys.exit(2) def _list(self, test): test_ids, errors = program.list_test(test) try: fileno = self.stream.fileno() except Exception: fileno = None if fileno is not None: stream = os.fdopen(fileno, 'wb', 0) else: stream = self.stream result = StreamResultToBytes(stream) for test_id in test_ids: result.status(test_id=test_id, test_status='exists') return result, errors def main(): runner = SubunitTestRunner program.TestProgram( module=None, argv=sys.argv, testRunner=partial(runner, stdout=sys.stdout)) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462488.0 stestr-3.0.0/stestr/subunit_trace.py0000644000175000017500000003677100000000000023772 0ustar00computertrekercomputertreker00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2014 Samsung Electronics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Trace a subunit stream in reasonable detail and high accuracy.""" from __future__ import absolute_import from __future__ import print_function import argparse import functools import os import re import sys import pbr.version import subunit import testtools from stestr import colorizer from stestr import results # NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module # was renamed to dbm.ndbm, this block takes that into account try: import anydbm as dbm except ImportError: import dbm DAY_SECONDS = 60 * 60 * 24 FAILS = [] RESULTS = {} def total_seconds(timedelta): # NOTE(mtreinish): This method is built-in to the timedelta class in # python >= 2.7 it is here to enable it's use on older versions return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 + timedelta.microseconds) / 10 ** 6 def cleanup_test_name(name, strip_tags=True, strip_scenarios=False): """Clean up the test name for display. By default we strip out the tags in the test because they don't help us in identifying the test that is run to it's result. Make it possible to strip out the testscenarios information (not to be confused with tempest scenarios) however that's often needed to indentify generated negative tests. """ if strip_tags: tags_start = name.find('[') tags_end = name.find(']') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname if strip_scenarios: tags_start = name.find('(') tags_end = name.find(')') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname return name def get_duration(timestamps): start, end = timestamps if not start or not end: duration = '' else: delta = end - start duration = '%d.%06ds' % ( delta.days * DAY_SECONDS + delta.seconds, delta.microseconds) return duration def find_worker(test): """Get the worker number. If there are no workers because we aren't in a concurrent environment, assume the worker number is 0. """ for tag in test['tags']: if tag.startswith('worker-'): return int(tag[7:]) return 0 # Print out stdout/stderr if it exists, always def print_attachments(stream, test, all_channels=False, show_binary_attachments=False): """Print out subunit attachments. Print out subunit attachments that contain content. This runs in 2 modes, one for successes where we print out just stdout and stderr, and an override that dumps all the attachments. """ channels = ('stdout', 'stderr') for name, detail in test['details'].items(): # NOTE(sdague): the subunit names are a little crazy, and actually # are in the form pythonlogging:'' (with the colon and quotes) name = name.split(':')[0] if detail.content_type.type == 'test': detail.content_type.type = 'text' if all_channels or name in channels: title = "Captured %s:" % name stream.write("\n{}\n{}\n".format(title, ('~' * len(title)))) # indent attachment lines 4 spaces to make them visually # offset if detail.content_type.type == 'text': for line in detail.iter_text(): stream.write(" %s\n" % line) elif show_binary_attachments: # binary for line in detail.iter_bytes(): stream.write(" %s\n" % line) def find_test_run_time_diff(test_id, run_time): times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'), 'times.dbm') if os.path.isfile(times_db_path): try: test_times = dbm.open(times_db_path) except Exception: return False try: avg_runtime = float(test_times.get(str(test_id), False)) except Exception: try: avg_runtime = float(test_times[str(test_id)]) except Exception: avg_runtime = False if avg_runtime and avg_runtime > 0: run_time = float(run_time.rstrip('s')) perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100 return perc_diff return False def show_outcome(stream, test, print_failures=False, failonly=False, enable_diff=False, threshold='0', abbreviate=False, enable_color=False, suppress_attachments=False, all_attachments=False, show_binary_attachments=True): global RESULTS status = test['status'] # TODO(sdague): ask lifeless why on this? if status == 'exists': return worker = find_worker(test) name = cleanup_test_name(test['id']) duration = get_duration(test['timestamps']) if worker not in RESULTS: RESULTS[worker] = [] RESULTS[worker].append(test) # don't count the end of the return code as a fail if name == 'process-returncode': return for color in [colorizer.AnsiColorizer, colorizer.NullColorizer]: if not enable_color: color = colorizer.NullColorizer(stream) break if color.supported(): color = color(stream) break if status == 'fail' or status == 'uxsuccess': FAILS.append(test) if abbreviate: color.write('F', 'red') else: stream.write('{{{}}} {} [{}] ... '.format( worker, name, duration)) color.write('FAILED', 'red') stream.write('\n') if not print_failures: print_attachments( stream, test, all_channels=True, show_binary_attachments=show_binary_attachments) elif not failonly: if status == 'success' or status == 'xfail': if abbreviate: color.write('.', 'green') else: out_string = '{{{}}} {} [{}'.format(worker, name, duration) perc_diff = find_test_run_time_diff(test['id'], duration) if enable_diff: if perc_diff and abs(perc_diff) >= abs(float(threshold)): if perc_diff > 0: out_string = out_string + ' +%.2f%%' % perc_diff else: out_string = out_string + ' %.2f%%' % perc_diff stream.write(out_string + '] ... ') color.write('ok', 'green') stream.write('\n') if not suppress_attachments: print_attachments( stream, test, all_channels=all_attachments, show_binary_attachments=show_binary_attachments) elif status == 'skip': if abbreviate: color.write('S', 'blue') else: reason = test['details'].get('reason', '') if reason: reason = ': ' + reason.as_text() stream.write('{{{}}} {} ... '.format( worker, name)) color.write('SKIPPED', 'blue') stream.write('%s' % (reason)) stream.write('\n') else: if abbreviate: stream.write('%s' % test['status'][0]) else: stream.write('{{{}}} {} [{}] ... {}\n'.format( worker, name, duration, test['status'])) if not print_failures: print_attachments( stream, test, all_channels=True, show_binary_attachments=show_binary_attachments) stream.flush() def print_fails(stream): """Print summary failure report. Currently unused, however there remains debate on inline vs. at end reporting, so leave the utility function for later use. """ if not FAILS: return stream.write("\n==============================\n") stream.write("Failed %s tests - output below:" % len(FAILS)) stream.write("\n==============================\n") for f in FAILS: stream.write("\n%s\n" % f['id']) stream.write("%s\n" % ('-' * len(f['id']))) print_attachments(stream, f, all_channels=True) stream.write('\n') def count_tests(key, value): count = 0 for k, v in RESULTS.items(): for item in v: if key in item: if re.search(value, item[key]): count += 1 return count def run_time(): runtime = 0.0 for k, v in RESULTS.items(): for test in v: test_dur = get_duration(test['timestamps']).strip('s') # NOTE(toabctl): get_duration() can return an empty string # which leads to a ValueError when casting to float if test_dur: runtime += float(test_dur) return runtime def worker_stats(worker): tests = RESULTS[worker] num_tests = len(tests) stop_time = tests[-1]['timestamps'][1] start_time = tests[0]['timestamps'][0] if not start_time or not stop_time: delta = 'N/A' else: delta = stop_time - start_time return num_tests, str(delta) def print_summary(stream, elapsed_time): stream.write("\n======\nTotals\n======\n") stream.write("Ran: {} tests in {:.4f} sec.\n".format( count_tests('status', '.*'), total_seconds(elapsed_time))) stream.write(" - Passed: %s\n" % count_tests('status', '^success$')) stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$')) stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$')) stream.write(" - Unexpected Success: %s\n" % count_tests('status', '^uxsuccess$')) stream.write(" - Failed: %s\n" % count_tests('status', '^fail$')) stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time()) # we could have no results, especially as we filter out the process-codes if RESULTS: stream.write("\n==============\nWorker Balance\n==============\n") for w in range(max(RESULTS.keys()) + 1): if w not in RESULTS: stream.write( " - WARNING: missing Worker %s!\n" % w) else: num, time = worker_stats(w) out_str = " - Worker {} ({} tests) => {}".format(w, num, time) if time.isdigit(): out_str += 's' out_str += '\n' stream.write(out_str) __version__ = pbr.version.VersionInfo('stestr').version_string() def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s' % __version__) parser.add_argument('--no-failure-debug', '-n', action='store_true', dest='print_failures', help='Disable printing failure ' 'debug information in realtime') parser.add_argument('--fails', '-f', action='store_true', dest='post_fails', help='Print failure debug ' 'information after the stream is proccesed') parser.add_argument('--failonly', action='store_true', dest='failonly', help="Don't print success items", default=( os.environ.get('TRACE_FAILONLY', False) is not False)) parser.add_argument('--abbreviate', '-a', action='store_true', dest='abbreviate', help='Print one character status' 'for each test') parser.add_argument('--perc-diff', '-d', action='store_true', dest='enable_diff', help="Print percent change in run time on each test ") parser.add_argument('--diff-threshold', '-t', dest='threshold', help="Threshold to use for displaying percent change " "from the avg run time. If one is not specified " "the percent change will always be displayed") parser.add_argument('--no-summary', action='store_true', help="Don't print the summary of the test run after " " completes") parser.add_argument('--color', action='store_true', help="Print results with colors") return parser.parse_args() def trace(stdin, stdout, print_failures=False, failonly=False, enable_diff=False, abbreviate=False, color=False, post_fails=False, no_summary=False, suppress_attachments=False, all_attachments=False, show_binary_attachments=False): stream = subunit.ByteStreamToStreamResult( stdin, non_subunit_name='stdout') outcomes = testtools.StreamToDict( functools.partial(show_outcome, stdout, print_failures=print_failures, failonly=failonly, enable_diff=enable_diff, abbreviate=abbreviate, enable_color=color, suppress_attachments=suppress_attachments, all_attachments=all_attachments, show_binary_attachments=show_binary_attachments)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result = testtools.StreamResultRouter(result) cat = subunit.test_results.CatFiles(stdout) result.add_rule(cat, 'test_id', test_id=None) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() start_times = [] stop_times = [] for worker in RESULTS: start_times += [x['timestamps'][0] for x in RESULTS[worker]] stop_times += [x['timestamps'][1] for x in RESULTS[worker]] start_time = min(start_times) stop_time = max(stop_times) elapsed_time = stop_time - start_time if count_tests('status', '.*') == 0: print("The test run didn't actually run any tests", file=sys.stderr) return 1 if post_fails: print_fails(stdout) if not no_summary: print_summary(stdout, elapsed_time) # NOTE(mtreinish): Ideally this should live in testtools streamSummary # this is just in place until the behavior lands there (if it ever does) if count_tests('status', '^success$') == 0: print("\nNo tests were successful during the run", file=sys.stderr) return 1 return 0 if results.wasSuccessful(summary) else 1 def main(): args = parse_args() exit(trace(sys.stdin, sys.stdout, args.print_failures, args.failonly, args.enable_diff, args.abbreviate, args.color, args.post_fails, args.no_summary)) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/test_processor.py0000644000175000017500000002575700000000000024203 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os import re import signal import subprocess import sys import tempfile import fixtures from subunit import v2 from stestr import results from stestr import scheduler from stestr import selection from stestr import testlist class TestProcessorFixture(fixtures.Fixture): """Write a temporary file to disk with test ids in it. The TestProcessorFixture is used to handle the lifecycle of running the subunit.run commands. A fixture is used for this class to handle the temporary list files creation. :param test_ids: The test_ids to use. May be None indicating that no ids are known and they should be discovered by listing or configuration if they must be known to run tests. Test ids are needed to run tests when filtering or partitioning is needed: if the run concurrency is > 1 partitioning is needed, and filtering is needed if the user has passed in filters. :param cmd_template: string to be used for the command that will be filled out with the IDFILE when it is created. :param listopt: Option to substitute into LISTOPT to cause test listing to take place. :param idoption: Option to substitute into cmd when supplying any test ids. :param repository: The repository to query for test times, if needed. :param parallel: If not True, prohibit parallel use : used to implement --parallel run recursively. :param listpath: The file listing path to use. If None, a unique path is created. :param test_filters: An optional list of test filters to apply. Each filter should be a string suitable for passing to re.compile. Filters are applied using search() rather than match(), so if anchoring is needed it should be included in the regex. The test ids used for executing are the union of all the individual filters: to take the intersection instead, craft a single regex that matches all your criteria. Filters are automatically applied by run_tests(), or can be applied by calling filter_tests(test_ids). :param group_callback: If supplied, should be a function that accepts a test id and returns a group id. A group id is an arbitrary value used as a dictionary key in the scheduler. All test ids with the same group id are scheduled onto the same backend test process. :param bool serial: Run tests serially :param path worker_path: Optional path of a manual worker grouping file to use for the run :param int concurrency: How many processes to use. The default (0) autodetects your CPU count and uses that. :param path blacklist_file: Path to a blacklist file, this file contains a separate regex exclude on each newline. :param path whitelist_file: Path to a whitelist file, this file contains a separate regex on each newline. :param boolean randomize: Randomize the test order after they are partitioned into separate workers """ def __init__(self, test_ids, cmd_template, listopt, idoption, repository, parallel=True, listpath=None, test_filters=None, group_callback=None, serial=False, worker_path=None, concurrency=0, blacklist_file=None, black_regex=None, whitelist_file=None, randomize=False): """Create a TestProcessorFixture.""" self.test_ids = test_ids self.template = cmd_template self.listopt = listopt self.idoption = idoption self.repository = repository self.parallel = parallel if serial: self.parallel = False self._listpath = listpath self.test_filters = test_filters self._group_callback = group_callback self.worker_path = None self.worker_path = worker_path self.concurrency_value = concurrency self.blacklist_file = blacklist_file self.whitelist_file = whitelist_file self.black_regex = black_regex self.randomize = randomize def setUp(self): super(TestProcessorFixture, self).setUp() variable_regex = r'\$(IDOPTION|IDFILE|IDLIST|LISTOPT)' variables = {} list_variables = {'LISTOPT': self.listopt} cmd = self.template default_idstr = None def list_subst(match): return list_variables.get(match.groups(1)[0], '') self.list_cmd = re.sub(variable_regex, list_subst, cmd) nonparallel = not self.parallel selection_logic = (self.test_filters or self.blacklist_file or self.whitelist_file or self.black_regex) if nonparallel: self.concurrency = 1 else: self.concurrency = None if self.concurrency_value: self.concurrency = int(self.concurrency_value) if not self.concurrency: self.concurrency = scheduler.local_concurrency() if not self.concurrency: self.concurrency = 1 if self.test_ids is None: if self.concurrency == 1: if default_idstr: self.test_ids = default_idstr.split() if self.concurrency != 1 or selection_logic or self.worker_path: # Have to be able to tell each worker what to run / filter # tests. self.test_ids = self.list_tests() if self.test_ids is None: # No test ids to supply to the program. self.list_file_name = None name = '' idlist = '' else: self.test_ids = selection.construct_list( self.test_ids, blacklist_file=self.blacklist_file, whitelist_file=self.whitelist_file, regexes=self.test_filters, black_regex=self.black_regex) name = self.make_listfile() variables['IDFILE'] = name idlist = ' '.join(self.test_ids) variables['IDLIST'] = idlist def subst(match): return variables.get(match.groups(1)[0], '') if self.test_ids is None: # No test ids, no id option. idoption = '' else: idoption = re.sub(variable_regex, subst, self.idoption) variables['IDOPTION'] = idoption self.cmd = re.sub(variable_regex, subst, cmd) def make_listfile(self): name = None try: if self._listpath: name = self._listpath stream = open(name, 'wb') else: fd, name = tempfile.mkstemp() stream = os.fdopen(fd, 'wb') with stream: self.list_file_name = name testlist.write_list(stream, self.test_ids) except Exception: if name: os.unlink(name) raise self.addCleanup(os.unlink, name) return name def _clear_SIGPIPE(self): """Clear SIGPIPE : child processes expect the default handler.""" signal.signal(signal.SIGPIPE, signal.SIG_DFL) def _start_process(self, cmd): # NOTE(claudiub): Windows does not support passing in a preexec_fn # argument. preexec_fn = None if sys.platform == 'win32' else self._clear_SIGPIPE return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, preexec_fn=preexec_fn) def list_tests(self): """List the tests returned by list_cmd. :return: A list of test ids. """ run_proc = self._start_process(self.list_cmd) out, err = run_proc.communicate() if run_proc.returncode != 0: sys.stdout.write("\n=========================\n" "Failures during discovery" "\n=========================\n") new_out = io.BytesIO() v2.ByteStreamToStreamResult( io.BytesIO(out), 'stdout').run( results.CatFiles(new_out)) out = new_out.getvalue() if out: sys.stdout.write(out.decode('utf8')) if err: sys.stderr.write(err.decode('utf8')) sys.stdout.write("\n" + "=" * 80 + "\n" "The above traceback was encountered during " "test discovery which imports all the found test" " modules in the specified test_path.\n") exit(100) ids = testlist.parse_enumeration(out) return ids def run_tests(self): """Run the tests defined by the command :return: A list of spawned processes. """ result = [] test_ids = self.test_ids # Handle the single worker case (this is also run recursively per # worker in the parallel case) if self.concurrency == 1 and (test_ids is None or test_ids): run_proc = self._start_process(self.cmd) # Prevent processes stalling if they read from stdin; we could # pass this through in future, but there is no point doing that # until we have a working can-run-debugger-inline story. run_proc.stdin.close() return [run_proc] # If there is a worker path, use that to get worker groups elif self.worker_path: test_id_groups = scheduler.generate_worker_partitions( test_ids, self.worker_path, self.repository, self._group_callback, self.randomize) # If we have multiple workers partition the tests and recursively # create single worker TestProcessorFixtures for each worker else: test_id_groups = scheduler.partition_tests(test_ids, self.concurrency, self.repository, self._group_callback) for test_ids in test_id_groups: if not test_ids: # No tests in this partition continue fixture = self.useFixture( TestProcessorFixture(test_ids, self.template, self.listopt, self.idoption, self.repository, parallel=False)) result.extend(fixture.run_tests()) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/testlist.py0000644000175000017500000000352300000000000022763 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handling of lists of tests - common code to --load-list etc.""" import io from extras import try_import bytestream_to_streamresult = try_import('subunit.ByteStreamToStreamResult') stream_result = try_import('testtools.testresult.doubles.StreamResult') def write_list(stream, test_ids): """Write test_ids out to stream. :param stream: A file-like object. :param test_ids: An iterable of test ids. """ # May need utf8 explicitly? stream.write(bytes(( '\n'.join(list(test_ids) + [''])).encode('utf8'))) def parse_list(list_bytes): """Parse list_bytes into a list of test ids.""" return _v1(list_bytes) def parse_enumeration(enumeration_bytes): """Parse enumeration_bytes into a list of test_ids.""" # If subunit v2 is available, use it. if bytestream_to_streamresult is not None: return _v2(enumeration_bytes) else: return _v1(enumeration_bytes) def _v1(list_bytes): return [id.strip() for id in list_bytes.decode('utf8').split( str('\n')) if id.strip()] def _v2(list_bytes): parser = bytestream_to_streamresult(io.BytesIO(list_bytes), non_subunit_name='stdout') result = stream_result() parser.run(result) return [event[1] for event in result._events if event[2] == 'exists'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5754259 stestr-3.0.0/stestr/tests/0000755000175000017500000000000000000000000021675 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1481236334.0 stestr-3.0.0/stestr/tests/__init__.py0000644000175000017500000000000000000000000023774 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1573747621.0 stestr-3.0.0/stestr/tests/base.py0000644000175000017500000000207600000000000023166 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import testtools class TestCase(testtools.TestCase): def setUp(self): super(TestCase, self).setUp() stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, level=None)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5754259 stestr-3.0.0/stestr/tests/files/0000755000175000017500000000000000000000000022777 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1482362144.0 stestr-3.0.0/stestr/tests/files/__init__.py0000644000175000017500000000000000000000000025076 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1518910744.0 stestr-3.0.0/stestr/tests/files/bisect-fail-serial-tests0000644000175000017500000000144000000000000027520 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools FOO = None class TestFakeClass(testtools.TestCase): def test_A(self): global FOO FOO = True self.assertTrue(FOO) def test_B(self): global FOO print(FOO) self.assertFalse(FOO) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/tests/files/failing-tests0000644000175000017500000000167500000000000025504 0ustar00computertrekercomputertreker00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools class FakeTestClass(testtools.TestCase): def test_pass(self): self.assertTrue(False) def test_pass_list(self): test_list = ['test', 'a', 'b'] self.assertIn('fail', test_list) def test_unexpected_pass(self): self.expectFailure("we are sad", self.assertEqual, 1, 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/tests/files/passing-tests0000644000175000017500000000166200000000000025533 0ustar00computertrekercomputertreker00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools class FakeTestClass(testtools.TestCase): def test_pass(self): self.assertTrue(True) def test_pass_list(self): test_list = ['test', 'a', 'b'] self.assertIn('test', test_list) def test_xfail(self): self.expectFailure("we are sad", self.assertEqual, 1, 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1482362144.0 stestr-3.0.0/stestr/tests/files/setup.cfg0000644000175000017500000000114200000000000024616 0ustar00computertrekercomputertreker00000000000000[metadata] name = tempest_unit_tests version = 1 summary = Fake Project for testing wrapper scripts author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Intended Audience :: Information Technology Intended Audience :: System Administrators Intended Audience :: Developers License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [global] setup-hooks = pbr.hooks.setup_hook ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/tests/files/stestr.yaml0000644000175000017500000000000000000000000025175 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1482442131.0 stestr-3.0.0/stestr/tests/files/testr-conf0000644000175000017500000000006400000000000025006 0ustar00computertrekercomputertreker00000000000000[DEFAULT] test_path=./tests group_regex=([^\.]*\.)* ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5754259 stestr-3.0.0/stestr/tests/repository/0000755000175000017500000000000000000000000024114 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1482539873.0 stestr-3.0.0/stestr/tests/repository/__init__.py0000644000175000017500000000000000000000000026213 0ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1582122864.0 stestr-3.0.0/stestr/tests/repository/test_file.py0000644000175000017500000001532000000000000026445 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the file repository implementation.""" import os.path import shutil import tempfile import fixtures import testtools from testtools import matchers from stestr.repository import file from stestr.tests import base class FileRepositoryFixture(fixtures.Fixture): def __init__(self, path=None, initialise=True): super(FileRepositoryFixture, self).__init__() self.path = path self.initialise = initialise def setUp(self): super(FileRepositoryFixture, self).setUp() if self.path and os.path.isdir(self.path): self.tempdir = self.path else: self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) if self.initialise: self.repo = file.RepositoryFactory().initialise(self.tempdir) class HomeDirTempDir(fixtures.Fixture): """Creates a temporary directory in ~.""" def setUp(self): super(HomeDirTempDir, self).setUp() home_dir = os.path.expanduser('~') self.temp_dir = tempfile.mkdtemp(dir=home_dir) self.addCleanup(shutil.rmtree, self.temp_dir) self.short_path = os.path.join('~', os.path.basename(self.temp_dir)) class TestFileRepository(base.TestCase): def setUp(self): super(TestFileRepository, self).setUp() self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) def test_initialise(self): self.useFixture(FileRepositoryFixture(path=self.tempdir)) base = os.path.join(self.tempdir, '.stestr') self.assertTrue(os.path.isdir(base)) self.assertTrue(os.path.isfile(os.path.join(base, 'format'))) with open(os.path.join(base, 'format'), 'rt') as stream: contents = stream.read() self.assertEqual("1\n", contents) with open(os.path.join(base, 'next-stream'), 'rt') as stream: contents = stream.read() self.assertEqual("0\n", contents) def test_initialise_empty_dir(self): self.useFixture(FileRepositoryFixture(path=self.tempdir, initialise=False)) base = os.path.join(self.tempdir, '.stestr') os.mkdir(base) self.assertFalse(os.path.isfile(os.path.join(base, 'format'))) self.repo = file.RepositoryFactory().initialise(self.tempdir) self.assertTrue(os.path.isdir(base)) self.assertTrue(os.path.isfile(os.path.join(base, 'format'))) with open(os.path.join(base, 'format'), 'rt') as stream: contents = stream.read() self.assertEqual("1\n", contents) with open(os.path.join(base, 'next-stream'), 'rt') as stream: contents = stream.read() self.assertEqual("0\n", contents) def test_initialise_non_empty_dir(self): self.useFixture(FileRepositoryFixture(path=self.tempdir, initialise=False)) base = os.path.join(self.tempdir, '.stestr') os.mkdir(base) with open(os.path.join(base, 'foo'), 'wt') as stream: stream.write('1\n') factory = file.RepositoryFactory() self.assertRaises(OSError, factory.initialise, self.tempdir) # Skip if windows since ~ in a path doesn't work there @testtools.skipIf(os.name == 'nt', "Windows doesn't support '~' expand") def test_initialise_expands_user_directory(self): short_path = self.useFixture(HomeDirTempDir()).short_path repo = file.RepositoryFactory().initialise(short_path) self.assertTrue(os.path.exists(repo.base)) def test_inserter_output_path(self): repo = self.useFixture(FileRepositoryFixture()).repo inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() self.assertTrue(os.path.exists(os.path.join(repo.base, '0'))) def test_inserting_creates_id(self): # When inserting a stream, an id is returned from stopTestRun. repo = self.useFixture(FileRepositoryFixture()).repo result = repo.get_inserter() result.startTestRun() result.stopTestRun() self.assertEqual(0, result.get_id()) # Skip if windows since ~ in a path doesn't work there @testtools.skipIf(os.name == 'nt', "Windows doesn't support '~' expand") def test_open_expands_user_directory(self): short_path = self.useFixture(HomeDirTempDir()).short_path repo1 = file.RepositoryFactory().initialise(short_path) repo2 = file.RepositoryFactory().open(short_path) self.assertEqual(repo1.base, repo2.base) def test_next_stream_corruption_error(self): repo = self.useFixture(FileRepositoryFixture()).repo open(os.path.join(repo.base, 'next-stream'), 'wb').close() self.assertThat(repo.count, matchers.Raises( matchers.MatchesException( ValueError("Corrupt next-stream file: ''")))) # Skip if windows since chmod doesn't work there @testtools.skipIf(os.name == 'nt', "Windows doesn't support chmod") def test_get_test_run_unexpected_ioerror_errno(self): repo = self.useFixture(FileRepositoryFixture()).repo inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() self.assertTrue(os.path.isfile(os.path.join(repo.base, '0'))) os.chmod(os.path.join(repo.base, '0'), 0000) self.assertRaises(IOError, repo.get_test_run, '0') def test_get_metadata(self): repo = self.useFixture(FileRepositoryFixture()).repo result = repo.get_inserter(metadata='fun') result.startTestRun() result.stopTestRun() run = repo.get_test_run(result.get_id()) self.assertEqual(b'fun', run.get_metadata()) def test_find_metadata(self): repo = self.useFixture(FileRepositoryFixture()).repo result = repo.get_inserter(metadata='fun') result.startTestRun() result.stopTestRun() result_bad = repo.get_inserter(metadata='not_fun') result_bad.startTestRun() result_bad.stopTestRun() run_ids = repo.find_metadata(b'fun') run_ids_int = [int(x) for x in run_ids] self.assertIn(result.get_id(), run_ids_int) self.assertNotIn(result_bad.get_id(), run_ids_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/tests/repository/test_sql.py0000644000175000017500000000725100000000000026331 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the sql repository implementation.""" import os import os.path import tempfile import uuid import fixtures import testtools from stestr.repository import sql from stestr.tests import base class SqlRepositoryFixture(fixtures.Fixture): def __init__(self, url=None): super(SqlRepositoryFixture, self).__init__() self.url = url def setUp(self): super(SqlRepositoryFixture, self).setUp() self.repo = sql.RepositoryFactory().initialise(self.url) class TestSqlRepository(base.TestCase): def setUp(self): super(TestSqlRepository, self).setUp() # NOTE(mtreinish): Windows likes to fail if the file is already open # when we access it later, so lets explicitly close it before we move # forward _close_me, self.tempfile = tempfile.mkstemp(suffix='.sqlite') os.close(_close_me) self.addCleanup(os.remove, self.tempfile) self.url = 'sqlite:///' + self.tempfile def test_initialise(self): self.useFixture(SqlRepositoryFixture(url=self.url)) def test_get_failing(self): repo = self.useFixture(SqlRepositoryFixture(url=self.url)).repo # NOTE: "No tests in repository" self.assertRaises(KeyError, repo.get_failing) inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() self.assertIsInstance(repo.get_failing(), sql._Subunit2SqlRun) def test_inserter_output_path(self): repo = self.useFixture(SqlRepositoryFixture(url=self.url)).repo inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() run_id = inserter.get_id() run_uuid = uuid.UUID(run_id) self.assertEqual(uuid.UUID(repo.latest_id()), run_uuid) def test_run_get_subunit_stream(self): repo = self.useFixture(SqlRepositoryFixture(url=self.url)).repo inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() run_id = inserter.get_id() run = repo.get_test_run(run_id) stream = run.get_subunit_stream() self.assertIsNotNone(stream) self.assertTrue(stream.readable()) self.assertEqual([], stream.readlines()) @testtools.skipIf(os.name == 'nt', 'tempfile fails on appveyor') def test_get_metadata(self): repo = self.useFixture(SqlRepositoryFixture(url=self.url)).repo result = repo.get_inserter(metadata='fun') result.startTestRun() result.stopTestRun() run = repo.get_test_run(result.get_id()) self.assertEqual('fun', run.get_metadata()) @testtools.skipIf(os.name == 'nt', 'tempfile fails on appveyor') def test_find_metadata(self): repo = self.useFixture(SqlRepositoryFixture(url=self.url)).repo result = repo.get_inserter(metadata='fun') result.startTestRun() result.stopTestRun() result_bad = repo.get_inserter(metadata='not_fun') result_bad.startTestRun() result_bad.stopTestRun() run_ids = repo.find_metadata('fun') self.assertIn(result.get_id(), run_ids) self.assertNotIn(result_bad.get_id(), run_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/repository/test_util.py0000644000175000017500000000453200000000000026506 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tempfile from unittest import mock from stestr.repository import util from stestr.tests import base class TestUtil(base.TestCase): def setUp(self): super(TestUtil, self).setUp() self.temp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.temp_dir) cwd = os.getcwd() os.chdir(self.temp_dir) self.temp_dir = os.getcwd() self.addCleanup(os.chdir, cwd) def test_get_default_url_sql(self): repo_url = util._get_default_repo_url('sql') self.assertEqual('sqlite:///' + os.path.join(self.temp_dir, '.stestr.sqlite'), repo_url) def test_get_default_url_file(self): repo_url = util._get_default_repo_url('file') self.assertEqual(self.temp_dir, repo_url) def test_get_default_url_invalid_type(self): self.assertRaises(TypeError, util._get_default_repo_url, 'invalid_type') @mock.patch('importlib.import_module', side_effect=ImportError) def test_sql_get_repo_init_no_deps(self, import_mock): self.assertRaises(SystemExit, util.get_repo_initialise, 'sql') @mock.patch('importlib.import_module', side_effect=ImportError) def test_non_sql_get_repo_init_no_deps_import_error(self, import_mock): self.assertRaises(ImportError, util.get_repo_initialise, 'file') @mock.patch('importlib.import_module', side_effect=ImportError) def test_sql_get_repo_open_no_deps(self, import_mock): self.assertRaises(SystemExit, util.get_repo_open, 'sql') @mock.patch('importlib.import_module', side_effect=ImportError) def test_non_sql_get_repo_open_no_deps_import_error(self, import_mock): self.assertRaises(ImportError, util.get_repo_open, 'file') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5754259 stestr-3.0.0/stestr/tests/sample_streams/0000755000175000017500000000000000000000000024714 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/stestr/tests/sample_streams/all_skips.subunit0000644000175000017500000000462500000000000030317 0ustar00computertrekercomputertreker00000000000000+@\VKۧX@IsetUpClass (tempest.api.data_processing.test_data_sources.DataSourceTest)1`+p@VKۧX@IsetUpClass (tempest.api.data_processing.test_data_sources.DataSourceTest)text/plain;charset=utf8reasonSahara support is requiredkγ+@fVKۧX@IsetUpClass (tempest.api.data_processing.test_data_sources.DataSourceTest)worker-1++@LVKۮ:setUpClass (tempest.api.data_processing.test_jobs.JobTest)D-T+p@VKۮ:setUpClass (tempest.api.data_processing.test_jobs.JobTest)text/plain;charset=utf8reasonSahara support is required[lK+@VVKۮ:setUpClass (tempest.api.data_processing.test_jobs.JobTest)worker-1ϛȳ+@fVKڪ@SsetUpClass (tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest)+p@VKڪ@SsetUpClass (tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest)text/plain;charset=utf8reasonSahara support is required=w޳+@pVKڪ@SsetUpClass (tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest)worker-38!+@kVK+@XsetUpClass (tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest)ԁsh+p@VK+@XsetUpClass (tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest)text/plain;charset=utf8reasonSahara support is requiredV+@uVK+@XsetUpClass (tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest)worker-2>+@kVK@XsetUpClass (tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest)sZU+p@VK@XsetUpClass (tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest)text/plain;charset=utf8reasonSahara support is requiredq\aZ+@uVK@XsetUpClass (tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest)worker-2+@[VK@HsetUpClass (tempest.api.data_processing.test_job_binaries.JobBinaryTest),y+p@VK@HsetUpClass (tempest.api.data_processing.test_job_binaries.JobBinaryTest)text/plain;charset=utf8reasonSahara support is requiredM[+@eVK@HsetUpClass (tempest.api.data_processing.test_job_binaries.JobBinaryTest)worker-0T F+@TVK@AsetUpClass (tempest.api.data_processing.test_plugins.PluginsTest)]Q$+p@VK@AsetUpClass (tempest.api.data_processing.test_plugins.PluginsTest)text/plain;charset=utf8reasonSahara support is requiredj9+@^VK@AsetUpClass (tempest.api.data_processing.test_plugins.PluginsTest)worker-0YЮ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/stestr/tests/sample_streams/failure.subunit0000644000175000017500000006405300000000000027766 0ustar00computertrekercomputertreker00000000000000+@WYz'5|p@Dstestr.tests.repository.test_file.TestFileRepository.test_initialise`m+@bYz'5Š(@Dstestr.tests.repository.test_file.TestFileRepository.test_initialise worker-14Ƴ+@nYz'5ť8@[stestr.tests.repository.test_file.TestFileRepository.test_initialise_expands_user_directory%Y:+@yYz'53@[stestr.tests.repository.test_file.TestFileRepository.test_initialise_expands_user_directory worker-14ZP+@aYz'5X@Nstestr.tests.repository.test_file.TestFileRepository.test_inserter_output_path+@lYz'5\@Nstestr.tests.repository.test_file.TestFileRepository.test_inserter_output_path worker-14bEó+@aYz'5@Nstestr.tests.repository.test_file.TestFileRepository.test_inserting_creates_idFƖ+@lYz'5p@Nstestr.tests.repository.test_file.TestFileRepository.test_inserting_creates_id worker-14+@iYz'5@Vstestr.tests.repository.test_file.TestFileRepository.test_next_stream_corruption_error>-&Ƴ+@tYz'5@Vstestr.tests.repository.test_file.TestFileRepository.test_next_stream_corruption_error worker-14<+@_Yz'5@Lstestr.tests.repository.test_util.TestUtil.test_get_default_url_invalid_typeU9+@jYz'5$@@Lstestr.tests.repository.test_util.TestUtil.test_get_default_url_invalid_type worker-14ݳ+@mYz'5( @Zstestr.tests.repository.test_util.TestUtil.test_non_sql_get_repo_init_no_deps_import_errorj+@xYz'50@Zstestr.tests.repository.test_util.TestUtil.test_non_sql_get_repo_init_no_deps_import_error worker-149S+@mYz'53r@Zstestr.tests.repository.test_util.TestUtil.test_non_sql_get_repo_open_no_deps_import_erroró+@xYz'5=a@Zstestr.tests.repository.test_util.TestUtil.test_non_sql_get_repo_open_no_deps_import_error worker-14}I+@\Yz'5C.@Istestr.tests.test_scheduler.TestScheduler.test_generate_worker_partitionsN]+@gYz'5S@Istestr.tests.test_scheduler.TestScheduler.test_generate_worker_partitions worker-14HY+@oYz'5W@\stestr.tests.test_scheduler.TestScheduler.test_generate_worker_partitions_group_without_liststestr.tests.test_scheduler.TestScheduler.test_partition_testsc+@[Yz'5%5X>stestr.tests.test_scheduler.TestScheduler.test_partition_tests worker-15yJQ+@_Yz'5' @Lstestr.tests.test_scheduler.TestScheduler.test_partition_tests_with_grouping}+@jYz'50h@Lstestr.tests.test_scheduler.TestScheduler.test_partition_tests_with_grouping worker-15/EH+@dYz'53^X@Qstestr.tests.test_scheduler.TestScheduler.test_partition_tests_with_zero_duration[0ó+@oYz'5:H@Qstestr.tests.test_scheduler.TestScheduler.test_partition_tests_with_zero_duration worker-15b:+@UYz'5ƞ@Bstestr.tests.test_selection.TestConstructList.test_simple_black_re +@`Yz'5ơ$@Bstestr.tests.test_selection.TestConstructList.test_simple_black_re worker-14+@SYz'5=d@@stestr.tests.test_scheduler.TestScheduler.test_random_partitions]s+@^Yz'5A(@@stestr.tests.test_scheduler.TestScheduler.test_random_partitions worker-15#[X+@OYz'5DFx=stestr.tests.test_selection.TestBlackReader.test_black_readerlX8+@ZYz'5Um=stestr.tests.test_selection.TestBlackReader.test_black_reader worker-15q1|+@NYz'5ƣW@ 0:00:01.240252 - Worker 1 (1 tests) => 0:00:01.321032 - Worker 2 (1 tests) => 0:00:01.192052 - Worker 3 (1 tests) => 0:00:01.236022 - Worker 4 (1 tests) => 0:00:01.227933 - Worker 5 (12 tests) => 0:00:01.145199 - Worker 6 (2 tests) => 0:00:01.553207 - Worker 7 (2 tests) => 0:00:01.715716 ˡ+@XYz'57Y;stestr.tests.test_subunit_trace.TestSubunitTrace.test_trace worker-14HT+@]Yz'5ǭ@Jstestr.tests.test_subunit_trace.TestSubunitTrace.test_trace_with_all_skips+pAYz'5Ƿ@Jstestr.tests.test_subunit_trace.TestSubunitTrace.test_trace_with_all_skips-text/x-traceback;charset=utf8,language=python tracebackATraceback (most recent call last): File "stestr/tests/test_subunit_trace.py", line 84, in test_trace_with_all_skips with open(regular_stream, 'rb') as stream: IOError: [Errno 2] No such file or directory: '/home/computertreker/git/stestr/stestr/tests/sample_streams/skips.subunit' Ҋ-+@hYz'5Ƿ@Jstestr.tests.test_subunit_trace.TestSubunitTrace.test_trace_with_all_skips worker-15eΊ+@eYz'5;ʘ@Rstestr.tests.test_test_processor.TestTestProcessorFixture.test_start_process_linux-:7+@pYz'5KX@Rstestr.tests.test_test_processor.TestTestProcessorFixture.test_start_process_linux worker-14j!ٳ+@eYz'5ǿq@Rstestr.tests.test_test_processor.TestTestProcessorFixture.test_start_process_win32+@pYz'5@Rstestr.tests.test_test_processor.TestTestProcessorFixture.test_start_process_win32 worker-15=+@ZYz'5O@Gstestr.tests.test_utils.TestUtils.test_cleanup_test_name_strip_scenariorh+@eYz'5S@Gstestr.tests.test_utils.TestUtils.test_cleanup_test_name_strip_scenario worker-142+@TYz'5Չ@Astestr.tests.test_utils.TestUtils.test_cleanup_test_name_defaults~hyO+@_Yz'5;@Astestr.tests.test_utils.TestUtils.test_cleanup_test_name_defaults worker-150|?+@WYz'5|@Dstestr.tests.test_utils.TestUtils.test_cleanup_test_name_leave_attrsWY+@bYz'5@Dstestr.tests.test_utils.TestUtils.test_cleanup_test_name_leave_attrs worker-155ʫɳ+@dYz'5@Qstestr.tests.test_utils.TestUtils.test_cleanup_test_name_strip_scenario_and_attrsL9+@oYz'5I@Qstestr.tests.test_utils.TestUtils.test_cleanup_test_name_strip_scenario_and_attrs worker-15Q+@PYz'5ŧ>stestr.tests.test_return_codes.TestReturnCodes.test_no_command=z+@[Yz'5F>stestr.tests.test_return_codes.TestReturnCodes.test_no_command worker-13+@VYz'5ƴ(@Cstestr.tests.repository.test_sql.TestSqlRepository.test_get_failing( +pGYz'5D0@Cstestr.tests.repository.test_sql.TestSqlRepository.test_get_failingtext/plain;charset=utf8stderrG&INFO [alembic.runtime.migration] Context impl SQLiteImpl. INFO [alembic.runtime.migration] Will assume non-transactional DDL. INFO [alembic.runtime.migration] Running upgrade -> 5ef013efbc2, create tests tables INFO [alembic.runtime.migration] Running upgrade 5ef013efbc2 -> 1f92cfe8a6d3, create runs table INFO [alembic.runtime.migration] Running upgrade 1f92cfe8a6d3 -> 3db7b49816d5, create test_runs table INFO [alembic.runtime.migration] Running upgrade 3db7b49816d5 -> 163fd5aa1380, Create avg runtime column in test table INFO [alembic.runtime.migration] Running upgrade 163fd5aa1380 -> 4ca26dac400e, Create metadata tables INFO [alembic.runtime.migration] Running upgrade 4ca26dac400e -> 13d819bbb0ff, create missing indexes INFO [alembic.runtime.migration] Running upgrade 13d819bbb0ff -> 28ac1ba9c3db, Add order column INFO [alembic.runtime.migration] Running upgrade 28ac1ba9c3db -> 5332fe255095, Populate run_time for existing tests INFO [alembic.runtime.migration] Running upgrade 5332fe255095 -> 1679b5bc102c, Add microsecond columns to test_runs table INFO [alembic.runtime.migration] Running upgrade 1679b5bc102c -> 487f279b8c78, Add Attachments Table INFO [alembic.runtime.migration] Running upgrade 487f279b8c78 -> 1ff737bef438, Add indexes on common search fields INFO [alembic.runtime.migration] Running upgrade 1ff737bef438 -> 2fb76f1a1393, Rename test_id column in test_metadata table INFO [alembic.runtime.migration] Running upgrade 2fb76f1a1393 -> b96122f780, Cleanup and Improve Indexes INFO [alembic.runtime.migration] Running upgrade b96122f780 -> 2822a408bdd0, uuid to integer ids INFO [alembic.runtime.migration] Running upgrade 2822a408bdd0 -> 35cd45895e56, Add missing fk indexes INFO [alembic.runtime.migration] Running upgrade 35cd45895e56 -> 10a2b6d4b06e, Add even more indexes г+p@Yz'5D0@Cstestr.tests.repository.test_sql.TestSqlRepository.test_get_failingtext/plain;charset=utf8stdout@bWARNING: The SQL repository type is still experimental. You might encounter issues while using it.ՁϪ+@`Yz'5D0@Cstestr.tests.repository.test_sql.TestSqlRepository.test_get_failingworker-9?+@aYz'5q @Nstestr.tests.repository.test_sql.TestSqlRepository.test_run_get_subunit_stream#|D+pGYz'5;X@Nstestr.tests.repository.test_sql.TestSqlRepository.test_run_get_subunit_streamtext/plain;charset=utf8stderrG&INFO [alembic.runtime.migration] Context impl SQLiteImpl. INFO [alembic.runtime.migration] Will assume non-transactional DDL. INFO [alembic.runtime.migration] Running upgrade -> 5ef013efbc2, create tests tables INFO [alembic.runtime.migration] Running upgrade 5ef013efbc2 -> 1f92cfe8a6d3, create runs table INFO [alembic.runtime.migration] Running upgrade 1f92cfe8a6d3 -> 3db7b49816d5, create test_runs table INFO [alembic.runtime.migration] Running upgrade 3db7b49816d5 -> 163fd5aa1380, Create avg runtime column in test table INFO [alembic.runtime.migration] Running upgrade 163fd5aa1380 -> 4ca26dac400e, Create metadata tables INFO [alembic.runtime.migration] Running upgrade 4ca26dac400e -> 13d819bbb0ff, create missing indexes INFO [alembic.runtime.migration] Running upgrade 13d819bbb0ff -> 28ac1ba9c3db, Add order column INFO [alembic.runtime.migration] Running upgrade 28ac1ba9c3db -> 5332fe255095, Populate run_time for existing tests INFO [alembic.runtime.migration] Running upgrade 5332fe255095 -> 1679b5bc102c, Add microsecond columns to test_runs table INFO [alembic.runtime.migration] Running upgrade 1679b5bc102c -> 487f279b8c78, Add Attachments Table INFO [alembic.runtime.migration] Running upgrade 487f279b8c78 -> 1ff737bef438, Add indexes on common search fields INFO [alembic.runtime.migration] Running upgrade 1ff737bef438 -> 2fb76f1a1393, Rename test_id column in test_metadata table INFO [alembic.runtime.migration] Running upgrade 2fb76f1a1393 -> b96122f780, Cleanup and Improve Indexes INFO [alembic.runtime.migration] Running upgrade b96122f780 -> 2822a408bdd0, uuid to integer ids INFO [alembic.runtime.migration] Running upgrade 2822a408bdd0 -> 35cd45895e56, Add missing fk indexes INFO [alembic.runtime.migration] Running upgrade 35cd45895e56 -> 10a2b6d4b06e, Add even more indexes +p@Yz'5;X@Nstestr.tests.repository.test_sql.TestSqlRepository.test_run_get_subunit_streamtext/plain;charset=utf8stdout@bWARNING: The SQL repository type is still experimental. You might encounter issues while using it.[k"+@lYz'5;X@Nstestr.tests.repository.test_sql.TestSqlRepository.test_run_get_subunit_stream worker-128+@_Yz'5@Lstestr.tests.repository.test_sql.TestSqlRepository.test_inserter_output_path*X+pGYz'54@Lstestr.tests.repository.test_sql.TestSqlRepository.test_inserter_output_pathtext/plain;charset=utf8stderrG&INFO [alembic.runtime.migration] Context impl SQLiteImpl. INFO [alembic.runtime.migration] Will assume non-transactional DDL. INFO [alembic.runtime.migration] Running upgrade -> 5ef013efbc2, create tests tables INFO [alembic.runtime.migration] Running upgrade 5ef013efbc2 -> 1f92cfe8a6d3, create runs table INFO [alembic.runtime.migration] Running upgrade 1f92cfe8a6d3 -> 3db7b49816d5, create test_runs table INFO [alembic.runtime.migration] Running upgrade 3db7b49816d5 -> 163fd5aa1380, Create avg runtime column in test table INFO [alembic.runtime.migration] Running upgrade 163fd5aa1380 -> 4ca26dac400e, Create metadata tables INFO [alembic.runtime.migration] Running upgrade 4ca26dac400e -> 13d819bbb0ff, create missing indexes INFO [alembic.runtime.migration] Running upgrade 13d819bbb0ff -> 28ac1ba9c3db, Add order column INFO [alembic.runtime.migration] Running upgrade 28ac1ba9c3db -> 5332fe255095, Populate run_time for existing tests INFO [alembic.runtime.migration] Running upgrade 5332fe255095 -> 1679b5bc102c, Add microsecond columns to test_runs table INFO [alembic.runtime.migration] Running upgrade 1679b5bc102c -> 487f279b8c78, Add Attachments Table INFO [alembic.runtime.migration] Running upgrade 487f279b8c78 -> 1ff737bef438, Add indexes on common search fields INFO [alembic.runtime.migration] Running upgrade 1ff737bef438 -> 2fb76f1a1393, Rename test_id column in test_metadata table INFO [alembic.runtime.migration] Running upgrade 2fb76f1a1393 -> b96122f780, Cleanup and Improve Indexes INFO [alembic.runtime.migration] Running upgrade b96122f780 -> 2822a408bdd0, uuid to integer ids INFO [alembic.runtime.migration] Running upgrade 2822a408bdd0 -> 35cd45895e56, Add missing fk indexes INFO [alembic.runtime.migration] Running upgrade 35cd45895e56 -> 10a2b6d4b06e, Add even more indexes UG+p@Yz'54@Lstestr.tests.repository.test_sql.TestSqlRepository.test_inserter_output_pathtext/plain;charset=utf8stdout@bWARNING: The SQL repository type is still experimental. You might encounter issues while using it.+@jYz'54@Lstestr.tests.repository.test_sql.TestSqlRepository.test_inserter_output_path worker-11(Ƴ+@UYz'5Ƿ@Bstestr.tests.repository.test_sql.TestSqlRepository.test_initialise*+pGYz'6@Bstestr.tests.repository.test_sql.TestSqlRepository.test_initialisetext/plain;charset=utf8stderrG&INFO [alembic.runtime.migration] Context impl SQLiteImpl. INFO [alembic.runtime.migration] Will assume non-transactional DDL. INFO [alembic.runtime.migration] Running upgrade -> 5ef013efbc2, create tests tables INFO [alembic.runtime.migration] Running upgrade 5ef013efbc2 -> 1f92cfe8a6d3, create runs table INFO [alembic.runtime.migration] Running upgrade 1f92cfe8a6d3 -> 3db7b49816d5, create test_runs table INFO [alembic.runtime.migration] Running upgrade 3db7b49816d5 -> 163fd5aa1380, Create avg runtime column in test table INFO [alembic.runtime.migration] Running upgrade 163fd5aa1380 -> 4ca26dac400e, Create metadata tables INFO [alembic.runtime.migration] Running upgrade 4ca26dac400e -> 13d819bbb0ff, create missing indexes INFO [alembic.runtime.migration] Running upgrade 13d819bbb0ff -> 28ac1ba9c3db, Add order column INFO [alembic.runtime.migration] Running upgrade 28ac1ba9c3db -> 5332fe255095, Populate run_time for existing tests INFO [alembic.runtime.migration] Running upgrade 5332fe255095 -> 1679b5bc102c, Add microsecond columns to test_runs table INFO [alembic.runtime.migration] Running upgrade 1679b5bc102c -> 487f279b8c78, Add Attachments Table INFO [alembic.runtime.migration] Running upgrade 487f279b8c78 -> 1ff737bef438, Add indexes on common search fields INFO [alembic.runtime.migration] Running upgrade 1ff737bef438 -> 2fb76f1a1393, Rename test_id column in test_metadata table INFO [alembic.runtime.migration] Running upgrade 2fb76f1a1393 -> b96122f780, Cleanup and Improve Indexes INFO [alembic.runtime.migration] Running upgrade b96122f780 -> 2822a408bdd0, uuid to integer ids INFO [alembic.runtime.migration] Running upgrade 2822a408bdd0 -> 35cd45895e56, Add missing fk indexes INFO [alembic.runtime.migration] Running upgrade 35cd45895e56 -> 10a2b6d4b06e, Add even more indexes o_+p@Yz'6@Bstestr.tests.repository.test_sql.TestSqlRepository.test_initialisetext/plain;charset=utf8stdout@bWARNING: The SQL repository type is still experimental. You might encounter issues while using it.p+@`Yz'6@Bstestr.tests.repository.test_sql.TestSqlRepository.test_initialise worker-10C+@SYz'5Ʊt@@stestr.tests.test_return_codes.TestReturnCodes.test_serial_fails񺘯+@]Yz'6T@@stestr.tests.test_return_codes.TestReturnCodes.test_serial_failsworker-6>R+@JYz'5ĽMx8stestr.tests.test_return_codes.TestReturnCodes.test_listس+@TYz'6ƕh8stestr.tests.test_return_codes.TestReturnCodes.test_listworker-77(D+@aYz'5X@Nstestr.tests.test_return_codes.TestReturnCodes.test_parallel_passing_bad_regexŀ+@kYz'6ȫ @Nstestr.tests.test_return_codes.TestReturnCodes.test_parallel_passing_bad_regexworker-8uBҳ+@UYz'5`@Bstestr.tests.test_return_codes.TestReturnCodes.test_parallel_fails?+@_Yz'6䒰@Bstestr.tests.test_return_codes.TestReturnCodes.test_parallel_failsworker-1K'+@WYz'5þ @Dstestr.tests.test_return_codes.TestReturnCodes.test_parallel_passing]ϳ+@aYz'6Ћ>@Dstestr.tests.test_return_codes.TestReturnCodes.test_parallel_passingworker-25]γ+@_Yz'5:R8@Lstestr.tests.test_return_codes.TestReturnCodes.test_parallel_subunit_passing[d+@iYz'6ў@Lstestr.tests.test_return_codes.TestReturnCodes.test_parallel_subunit_passingworker-31] +@UYz'5k$@Bstestr.tests.test_return_codes.TestReturnCodes.test_serial_passing`1X+@_Yz'6J@Bstestr.tests.test_return_codes.TestReturnCodes.test_serial_passingworker-4X+@]Yz'5Ȁ@Jstestr.tests.test_return_codes.TestReturnCodes.test_serial_subunit_passing²Vó+@gYz'6@Jstestr.tests.test_return_codes.TestReturnCodes.test_serial_subunit_passingworker-5Z+@VYz'5^(@Cstestr.tests.test_return_codes.TestReturnCodes.test_combine_results+@`Yz'7Лo0@Cstestr.tests.test_return_codes.TestReturnCodes.test_combine_resultsworker-0zS././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1501428487.0 stestr-3.0.0/stestr/tests/sample_streams/successful.subunit0000644000175000017500000003042300000000000030510 0ustar00computertrekercomputertreker00000000000000+@JV&`8os_testr.tests.test_os_testr.TestGetParser.test_parallelJ޳+pBV&`ޜP8os_testr.tests.test_os_testr.TestGetParser.test_paralleltext/plain;charset="utf8"stderrAusage: run.py [-h] [--blacklist_file BLACKLIST_FILE] [--regex REGEX | --path FILE_OR_DIRECTORY | --no-discover TEST_ID] [--pretty | --no-pretty] [--subunit] [--list] [--slowest | --no-slowest] [--pdb TEST_ID] [--parallel | --serial] [--concurrency WORKERS] [--until-failure] [--print-exclude] run.py: error: argument --serial: not allowed with argument --parallel ܱW?+p@lV&`ޜP8os_testr.tests.test_os_testr.TestGetParser.test_paralleltext/plain;charset="utf8"stdout+@VV&bŪl:os_testr.tests.test_return_codes.TestReturnCodes.test_listworker-6W9'+@^V&a@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passing +p@V&bGh@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passingtext/plain;charset="utf8"stderrƜ/J+p@V&bGh@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passingtext/plain;charset="utf8"stdout4~-+@hV&bGh@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passingworker-7W././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/tests/test_bisect_return_codes.py0000644000175000017500000000636700000000000027347 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import subprocess import tempfile from stestr.tests import base class TestBisectReturnCodes(base.TestCase): def setUp(self): super(TestBisectReturnCodes, self).setUp() # Setup test dirs self.directory = tempfile.mkdtemp(prefix='stestr-unit') self.addCleanup(shutil.rmtree, self.directory) self.test_dir = os.path.join(self.directory, 'tests') os.mkdir(self.test_dir) # Setup Test files self.testr_conf_file = os.path.join(self.directory, '.stestr.conf') self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg') self.init_file = os.path.join(self.test_dir, '__init__.py') self.setup_py = os.path.join(self.directory, 'setup.py') self.user_config = os.path.join(self.directory, 'stestr.yaml') shutil.copy('stestr/tests/files/testr-conf', self.testr_conf_file) shutil.copy('setup.py', self.setup_py) shutil.copy('stestr/tests/files/setup.cfg', self.setup_cfg_file) shutil.copy('stestr/tests/files/__init__.py', self.init_file) shutil.copy('stestr/tests/files/stestr.yaml', self.user_config) # Move around the test code self.serial_fail_file = os.path.join(self.test_dir, 'test_serial_fails.py') shutil.copy('stestr/tests/files/bisect-fail-serial-tests', self.serial_fail_file) # Change directory, run wrapper and check result self.addCleanup(os.chdir, os.path.abspath(os.curdir)) os.chdir(self.directory) subprocess.call('stestr init', shell=True) def test_bisect_serial_fail_detected(self): p = subprocess.Popen( "stestr run --serial", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() self.assertEqual(1, p.returncode, 'stestr run returned an unexpected return code' 'Stdout: %s\nStderr: %s' % (out, err)) p_analyze = subprocess.Popen( "stestr --user-config stestr.yaml run --analyze-isolation", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p_analyze.communicate() out = out.decode('utf-8') # For debugging potential failures lines = str(out.rstrip()).splitlines() self.assertEqual(3, p_analyze.returncode, 'Analyze isolation returned an unexpected return code' '\nStdout: %s\nStderr: %s' % (out, err)) last_line = ('tests.test_serial_fails.TestFakeClass.test_B ' 'tests.test_serial_fails.TestFakeClass.test_A') self.assertEqual(last_line, lines[-1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/test_bisect_tests.py0000644000175000017500000002122500000000000026003 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import operator from unittest import mock import subunit import testtools from stestr import bisect_tests from stestr.repository import abstract from stestr.tests import base class FakeTestRun(abstract.AbstractTestRun): def get_test(self): case = subunit.ByteStreamToStreamResult(io.BytesIO(self._content)) def wrap_result(result): # Wrap in a router to mask out startTestRun/stopTestRun from the # ExtendedToStreamDecorator. result = testtools.StreamResultRouter( result, do_start_stop_run=False) # Wrap that in ExtendedToStreamDecorator to convert v1 calls to # StreamResult. return testtools.ExtendedToStreamDecorator(result) return testtools.DecorateTestCaseResult( case, wrap_result, operator.methodcaller('startTestRun'), operator.methodcaller('stopTestRun')) def get_id(self): return self.id class FakeFailedTestRunNoTags(FakeTestRun): def __init__(self, failure=True): # Generate a subunit stream self.id = 2 stream_buf = io.BytesIO() stream = subunit.StreamResultToBytes(stream_buf) stream.status(test_id='test_a', test_status='inprogress') stream.status(test_id='test_a', test_status='success') stream.status(test_id='test_b', test_status='inprogress') stream.status(test_id='test_b', test_status='success') stream.status(test_id='test_c', test_status='inprogress') stream.status(test_id='test_c', test_status='fail') stream_buf.seek(0) self._content = stream_buf.getvalue() class FakeFailingWithTags(FakeTestRun): def __init__(self, failure=True): # Generate a subunit stream self.id = None stream_buf = io.BytesIO() stream = subunit.StreamResultToBytes(stream_buf) stream.status(test_id='test_c', test_status='inprogress', test_tags=['worker-0']) stream.status(test_id='test_c', test_status='fail', test_tags=['worker-0']) stream_buf.seek(0) self._content = stream_buf.getvalue() class FakeNoFailing(FakeTestRun): def __init__(self, failure=True): # Generate a subunit stream stream_buf = io.BytesIO(bytes(b'')) self._content = stream_buf.getvalue() self.id = None class FakeFailedTestRunWithTags(FakeTestRun): def __init__(self, failure=True): # Generate a subunit stream stream_buf = io.BytesIO() stream = subunit.StreamResultToBytes(stream_buf) stream.status(test_id='test_a', test_status='inprogress', test_tags=['worker-0']) stream.status(test_id='test_a', test_status='success', test_tags=['worker-0']) stream.status(test_id='test_b', test_status='inprogress', test_tags=['worker-1']) stream.status(test_id='test_b', test_status='success', test_tags=['worker-1']) stream.status(test_id='test_c', test_status='inprogress', test_tags=['worker-0']) stream.status(test_id='test_c', test_status='fail', test_tags=['worker-0']) stream_buf.seek(0) self._content = stream_buf.getvalue() self.id = 2 class FakeFailedMultiWorkerTestRunWithTags(FakeTestRun): def __init__(self, failure=True): # Generate a subunit stream stream_buf = io.BytesIO() stream = subunit.StreamResultToBytes(stream_buf) stream.status(test_id='test_a', test_status='inprogress', test_tags=['worker-0']) stream.status(test_id='test_a', test_status='success', test_tags=['worker-0']) stream.status(test_id='test_b', test_status='inprogress', test_tags=['worker-1']) stream.status(test_id='test_b', test_status='fail', test_tags=['worker-1']) stream.status(test_id='test_c', test_status='inprogress', test_tags=['worker-0']) stream.status(test_id='test_c', test_status='fail', test_tags=['worker-0']) stream_buf.seek(0) self._content = stream_buf.getvalue() self.id = 2 class TestBisectTests(base.TestCase): def setUp(self): super(TestBisectTests, self).setUp() self.repo_mock = mock.create_autospec( 'stestr.repository.file.Repository') self.conf_mock = mock.create_autospec('stestr.config_file.TestrConf') self.run_func_mock = mock.MagicMock() self.latest_run_mock = mock.MagicMock() def test_bisect_no_failures_provided(self): bisector = bisect_tests.IsolationAnalyzer( self.latest_run_mock, self.conf_mock, self.run_func_mock, self.repo_mock) self.assertRaises(ValueError, bisector.bisect_tests, []) def test_prior_tests_invlaid_test_id(self): bisector = bisect_tests.IsolationAnalyzer( self.latest_run_mock, self.conf_mock, self.run_func_mock, self.repo_mock) run = FakeFailedTestRunNoTags() self.assertRaises(KeyError, bisector._prior_tests, run, 'bad_test_id') def test_get_prior_tests_no_tags(self): bisector = bisect_tests.IsolationAnalyzer( self.latest_run_mock, self.conf_mock, self.run_func_mock, self.repo_mock) run = FakeFailedTestRunNoTags() prior_tests = bisector._prior_tests(run, 'test_c') self.assertEqual(['test_a', 'test_b'], prior_tests) def test_get_prior_tests_with_tags(self): bisector = bisect_tests.IsolationAnalyzer( self.latest_run_mock, self.conf_mock, self.run_func_mock, self.repo_mock) run = FakeFailedTestRunWithTags() prior_tests = bisector._prior_tests(run, 'test_c') self.assertEqual(['test_a'], prior_tests) @mock.patch('stestr.output.output_table') def test_bisect_tests_isolated_failure(self, table_mock): run = FakeFailedTestRunWithTags() self.conf_mock.get_run_command = mock.MagicMock() def get_failures(*args, **kwargs): return FakeNoFailing() self.repo_mock.get_failing = get_failures bisector = bisect_tests.IsolationAnalyzer( run, self.conf_mock, self.run_func_mock, self.repo_mock) return_code = bisector.bisect_tests(['test_c']) expected_issue = [('failing test', 'caused by test'), ('test_c', 'unknown - no conflicts')] table_mock.assert_called_once_with(expected_issue) self.assertEqual(3, return_code) @mock.patch('stestr.output.output_table') def test_bisect_tests_not_isolated_failure(self, table_mock): run = FakeFailedTestRunWithTags() self.conf_mock.get_run_command = mock.MagicMock() def get_failures(*args, **kwargs): return FakeFailingWithTags() self.repo_mock.get_failing = get_failures bisector = bisect_tests.IsolationAnalyzer( run, self.conf_mock, self.run_func_mock, self.repo_mock) return_code = bisector.bisect_tests(['test_c']) expected_issue = [('failing test', 'caused by test'), ('test_c', 'test_a')] table_mock.assert_called_once_with(expected_issue) self.assertEqual(3, return_code) @mock.patch('stestr.output.output_table') def test_bisect_tests_not_isolated_multiworker_failures(self, table_mock): run = FakeFailedMultiWorkerTestRunWithTags() self.conf_mock.get_run_command = mock.MagicMock() def get_failures(*args, **kwargs): return FakeFailingWithTags() self.repo_mock.get_failing = get_failures bisector = bisect_tests.IsolationAnalyzer( run, self.conf_mock, self.run_func_mock, self.repo_mock) return_code = bisector.bisect_tests(['test_b', 'test_c']) expected_issue = [('failing test', 'caused by test'), ('test_b', 'unknown - no conflicts'), ('test_c', 'test_a')] table_mock.assert_called_once_with(expected_issue) self.assertEqual(3, return_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/test_config_file.py0000644000175000017500000001231500000000000025554 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from stestr import config_file from stestr.tests import base @ddt.ddt class TestTestrConf(base.TestCase): @mock.patch.object(config_file.configparser, 'ConfigParser') def setUp(self, mock_ConfigParser): super(TestTestrConf, self).setUp() self._testr_conf = config_file.TestrConf(mock.sentinel.config_file) self._testr_conf.parser = mock.Mock() @mock.patch.object(config_file.util, 'get_repo_open') @mock.patch.object(config_file.test_processor, 'TestProcessorFixture') @mock.patch.object(config_file, 'sys') @mock.patch('os.path.exists', new=lambda x: True) def _check_get_run_command(self, mock_sys, mock_TestProcessorFixture, mock_get_repo_open, platform='win32', group_regex='.*', parallel_class=False, sys_executable='/usr/bin/python', expected_python='/usr/bin/python', expected_group_callback=mock.ANY, environment=None): mock_sys.platform = platform mock_sys.executable = sys_executable if environment is None: environment = {'PYTHON': ''} with mock.patch.dict('os.environ', environment): fixture = \ self._testr_conf.get_run_command(test_path='fake_test_path', top_dir='fake_top_dir', group_regex=group_regex, parallel_class=parallel_class) self.assertEqual(mock_TestProcessorFixture.return_value, fixture) mock_get_repo_open.assert_called_once_with('file', None) command = '"%s" -m stestr.subunit_runner.run discover -t "%s" "%s" ' \ '$LISTOPT $IDOPTION' % (expected_python, 'fake_top_dir', 'fake_test_path') # Ensure TestProcessorFixture is created with defaults except for where # we specfied and with the correct python. mock_TestProcessorFixture.assert_called_once_with( None, command, "--list", "--load-list $IDFILE", mock_get_repo_open.return_value, black_regex=None, blacklist_file=None, concurrency=0, group_callback=expected_group_callback, test_filters=None, randomize=False, serial=False, whitelist_file=None, worker_path=None) @mock.patch.object(config_file, 'sys') def _check_get_run_command_exception(self, mock_sys, platform='win32', sys_executable='/usr/bin/python', environment=None): mock_sys.platform = platform mock_sys.executable = sys_executable if environment is None: environment = {'PYTHON': ''} with mock.patch.dict('os.environ', environment): self.assertRaises(RuntimeError, self._testr_conf.get_run_command, test_path='fake_test_path', top_dir='fake_top_dir') def test_get_run_command_linux(self): self._check_get_run_command( platform='linux2', expected_python='/usr/bin/python') def test_get_run_command_emptysysexecutable_noenv(self): self._check_get_run_command_exception( platform='linux2', sys_executable=None) def test_get_run_command_emptysysexecutable_win32(self): self._check_get_run_command_exception( platform='win32', sys_executable=None, environment={'PYTHON': 'python3'}) def test_get_run_command_emptysysexecutable_withenv(self): self._check_get_run_command( platform='linux2', sys_executable=None, expected_python='${PYTHON}', environment={'PYTHON': '/usr/bin/python3'}) def test_get_run_command_win32(self): self._check_get_run_command() def test_get_run_command_parallel_class(self): self._check_get_run_command(parallel_class=True) def test_get_run_command_nogroup_regex_noparallel_class(self): self._testr_conf.parser.has_option.return_value = False self._check_get_run_command(group_regex='', expected_group_callback=None) @ddt.data(('.\\', '.\\\\'), ('a\\b\\', 'a\\b\\\\'), ('a\\b', 'a\\b')) @ddt.unpack @mock.patch('os.sep', new='\\') def test_sanitize_dir_win32(self, path, expected): sanitized = self._testr_conf._sanitize_path(path) self.assertEqual(expected, sanitized) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1570913994.0 stestr-3.0.0/stestr/tests/test_load.py0000644000175000017500000000161400000000000024227 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io from stestr.commands import load from stestr.tests import base class TestLoadCommand(base.TestCase): def test_empty_with_pretty_out(self): stream = io.BytesIO() output = io.BytesIO() res = load.load(in_streams=[('subunit', stream)], pretty_out=True, stdout=output) self.assertEqual(1, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576262237.0 stestr-3.0.0/stestr/tests/test_output.py0000644000175000017500000000507400000000000024654 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io from stestr import output from stestr.tests import base class TestOutput(base.TestCase): def test_output_table(self): table = [['Header 1', 'Header 2', 'Header 999'], [1, '0000000002', 'foo'], ['bar', 6, 'This is a content.']] expected = \ "Header 1 Header 2 Header 999\n" \ "-------- ---------- ------------------\n" \ "1 0000000002 foo\n" \ "bar 6 This is a content.\n" with io.StringIO() as f: output.output_table(table, f) actual = f.getvalue() self.assertEqual(expected, actual) def test_output_tests(self): class Test(object): def __init__(self, i): self.i = i def id(self): return self.i tests = [Test('a'), Test('b'), Test('foo')] expected = "a\nb\nfoo\n" with io.StringIO() as f: output.output_tests(tests, f) actual = f.getvalue() self.assertEqual(expected, actual) def test_output_summary_passed(self): expected = 'Ran 10 (+5) tests in 1.100s (+0.100s)\n' \ 'PASSED (id=99 (+1), id=100 (+2))\n' with io.StringIO() as f: output.output_summary( successful=True, tests=10, tests_delta=5, time=1.1, time_delta=0.1, values=[('id', 99, 1), ('id', '100', 2)], output=f) actual = f.getvalue() self.assertEqual(expected, actual) def test_output_summary_failed(self): expected = 'Ran 10 (+5) tests in 1.100s (+0.100s)\n' \ 'FAILED (id=99 (+1), id=100 (+2))\n' with io.StringIO() as f: output.output_summary( successful=False, tests=10, tests_delta=5, time=1.1, time_delta=0.1, values=[('id', 99, 1), ('id', '100', 2)], output=f) actual = f.getvalue() self.assertEqual(expected, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/tests/test_return_codes.py0000644000175000017500000004167400000000000026016 0ustar00computertrekercomputertreker00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import io import os import re import shutil import subprocess import tempfile import fixtures import subunit as subunit_lib import testtools from stestr.commands import list as list_cmd from stestr.commands import run from stestr.tests import base class TestReturnCodes(base.TestCase): def setUp(self): super(TestReturnCodes, self).setUp() # Setup test dirs self.directory = tempfile.mkdtemp(prefix='stestr-unit') self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True) self.test_dir = os.path.join(self.directory, 'tests') os.mkdir(self.test_dir) # Setup Test files self.testr_conf_file = os.path.join(self.directory, '.stestr.conf') self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg') self.passing_file = os.path.join(self.test_dir, 'test_passing.py') self.failing_file = os.path.join(self.test_dir, 'test_failing.py') self.init_file = os.path.join(self.test_dir, '__init__.py') self.setup_py = os.path.join(self.directory, 'setup.py') self.user_config = os.path.join(self.directory, 'stestr.yaml') shutil.copy('stestr/tests/files/testr-conf', self.testr_conf_file) shutil.copy('stestr/tests/files/passing-tests', self.passing_file) shutil.copy('stestr/tests/files/failing-tests', self.failing_file) shutil.copy('setup.py', self.setup_py) shutil.copy('stestr/tests/files/setup.cfg', self.setup_cfg_file) shutil.copy('stestr/tests/files/__init__.py', self.init_file) shutil.copy('stestr/tests/files/stestr.yaml', self.user_config) self.stdout = io.StringIO() self.stderr = io.StringIO() # Change directory, run wrapper and check result self.addCleanup(os.chdir, os.path.abspath(os.curdir)) os.chdir(self.directory) subprocess.call('stestr init', shell=True) def _check_subunit(self, output_stream): stream = subunit_lib.ByteStreamToStreamResult(output_stream) starts = testtools.StreamResult() summary = testtools.StreamSummary() tests = [] def _add_dict(test): tests.append(test) outcomes = testtools.StreamToDict(functools.partial(_add_dict)) result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() self.assertThat(len(tests), testtools.matchers.GreaterThan(0)) def assertRunExit(self, cmd, expected, subunit=False, stdin=None): if stdin: p = subprocess.Popen( "%s" % cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate(stdin) else: p = subprocess.Popen( "%s" % cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if not subunit: self.assertEqual( p.returncode, expected, "Stdout: {}; Stderr: {}".format(out, err)) return (out, err) else: self.assertEqual(p.returncode, expected, "Expected return code: %s doesn't match actual " "return code of: %s" % (expected, p.returncode)) output_stream = io.BytesIO(out) stream = subunit_lib.ByteStreamToStreamResult(output_stream) starts = testtools.StreamResult() summary = testtools.StreamSummary() tests = [] def _add_dict(test): tests.append(test) outcomes = testtools.StreamToDict(functools.partial(_add_dict)) result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() self.assertThat(len(tests), testtools.matchers.GreaterThan(0)) return (out, err) def test_parallel_passing(self): self.assertRunExit('stestr run passing', 0) def test_parallel_passing_bad_regex(self): self.assertRunExit('stestr run bad.regex.foobar', 1) def test_parallel_fails(self): self.assertRunExit('stestr run', 1) def test_parallel_passing_xfail(self): self.assertRunExit('stestr run xfail', 0) def test_parallel_fails_unxsuccess(self): self.assertRunExit('stestr run unexpected', 1) def test_parallel_blacklist(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) with os.fdopen(fd, 'w') as blacklist: blacklist.write('fail') cmd = 'stestr run --blacklist-file %s' % path self.assertRunExit(cmd, 0) def test_parallel_whitelist(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) with os.fdopen(fd, 'w') as whitelist: whitelist.write('passing') cmd = 'stestr run --whitelist-file %s' % path self.assertRunExit(cmd, 0) def test_serial_passing(self): self.assertRunExit('stestr run --serial passing', 0) def test_serial_fails(self): self.assertRunExit('stestr run --serial', 1) def test_serial_blacklist(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) with os.fdopen(fd, 'w') as blacklist: blacklist.write('fail') cmd = 'stestr run --serial --blacklist-file %s' % path self.assertRunExit(cmd, 0) def test_serial_whitelist(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) with os.fdopen(fd, 'w') as whitelist: whitelist.write('passing') cmd = 'stestr run --serial --whitelist-file %s' % path self.assertRunExit(cmd, 0) def test_serial_subunit_passing(self): self.assertRunExit('stestr --user-config stestr.yaml run --subunit ' '--serial passing', 0, subunit=True) def test_serial_subunit_failing(self): self.assertRunExit('stestr --user-config stestr.yaml run --subunit ' '--serial failing', 0, subunit=True) def test_parallel_subunit_passing(self): self.assertRunExit('stestr --user-config stestr.yaml run --subunit ' 'passing', 0, subunit=True) def test_parallel_subunit_failing(self): self.assertRunExit('stestr --user-config stestr.yaml run --subunit ' 'failing', 0, subunit=True) def test_slowest_passing(self): self.assertRunExit('stestr run --slowest passing', 0) def test_slowest_failing(self): self.assertRunExit('stestr run --slowest failing', 1) def test_until_failure_fails(self): self.assertRunExit('stestr run --until-failure', 1) def test_until_failure_with_subunit_fails(self): self.assertRunExit('stestr --user-config stestr.yaml run ' '--until-failure --subunit', 1, subunit=True) def test_with_parallel_class(self): # NOTE(masayukig): Ideally, it's better to figure out the # difference between with --parallel-class and without # --parallel-class. However, it's difficult to make such a # test from a command line based test. self.assertRunExit('stestr --parallel-class run passing', 0) def test_no_repo_dir(self): stestr_repo_dir = os.path.join(self.directory, '.stestr') shutil.rmtree(stestr_repo_dir, ignore_errors=True) # We can use stestr run even if there's no repo directory. self.assertRunExit('stestr run passing', 0) def test_empty_repo_dir(self): stestr_repo_dir = os.path.join(self.directory, '.stestr') shutil.rmtree(stestr_repo_dir, ignore_errors=True) os.mkdir(stestr_repo_dir) # We can initialize an empty repo directory. self.assertRunExit('stestr run passing', 0) def test_non_empty_repo_dir(self): stestr_repo_dir = os.path.join(self.directory, '.stestr') shutil.rmtree(stestr_repo_dir, ignore_errors=True) os.mkdir(stestr_repo_dir) with open(os.path.join(stestr_repo_dir, 'foo'), 'wt') as stream: stream.write('1\n') # We can't initialize a non-empty repo directory. self.assertRunExit('stestr run passing', 1) def test_list(self): self.assertRunExit('stestr list', 0) def _get_cmd_stdout(self, cmd): p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) out = p.communicate() self.assertEqual(0, p.returncode) return out def test_combine_results(self): self.assertRunExit('stestr run passing', 0) stdout = self._get_cmd_stdout( 'stestr last --no-subunit-trace') stdout = str(stdout[0]) test_count_split = stdout.split(' ') test_count = test_count_split[1] test_count = int(test_count) id_regex = re.compile(r'\(id=(.*?)\)') test_id = id_regex.search(stdout).group(0) self.assertRunExit('stestr run --combine passing', 0) combine_stdout = self._get_cmd_stdout( 'stestr last --no-subunit-trace')[0] combine_stdout = str(combine_stdout) combine_test_count_split = combine_stdout.split(' ') combine_test_count = combine_test_count_split[1] combine_test_count = int(combine_test_count) combine_test_id = id_regex.search(combine_stdout).group(0) self.assertEqual(test_id, combine_test_id) # The test results from running the same tests twice with combine # should return a test count 2x as big at the end of the run self.assertEqual(test_count * 2, combine_test_count) def test_load_from_stdin(self): self.assertRunExit('stestr run passing', 0) stream = self._get_cmd_stdout( 'stestr last --subunit')[0] self.assertRunExit('stestr load', 0, stdin=stream) def test_load_force_init(self): self.assertRunExit('stestr run passing', 0) stream = self._get_cmd_stdout( 'stestr last --subunit')[0] # NOTE: --force-init should work here because there is an properly # initialized repository. self.assertRunExit('stestr load --force-init', 0, stdin=stream) def test_load_force_init_invalid(self): self.assertRunExit('stestr run passing', 0) stream = self._get_cmd_stdout( 'stestr last --subunit')[0] os.remove(os.path.join(self.directory, '.stestr', 'format')) # NOTE: --force-init should fail here because there is an invalid # repository. self.assertRunExit('stestr load --force-init', 1, stdin=stream) def test_load_from_stdin_quiet(self): out, err = self.assertRunExit('stestr --user-config stestr.yaml -q ' 'run passing', 0) self.assertEqual(out.decode('utf-8'), '') # FIXME(masayukig): We get some warnings when we run a coverage job. # So, just ignore 'err' here. stream = self._get_cmd_stdout('stestr last --subunit')[0] out, err = self.assertRunExit('stestr --user-config stestr.yaml -q ' 'load', 0, stdin=stream) self.assertEqual(out.decode('utf-8'), '') self.assertEqual(err.decode('utf-8'), '') def test_no_subunit_trace_force_subunit_trace(self): out, err = self.assertRunExit( 'stestr run --no-subunit-trace --force-subunit-trace passing', 0) out = str(out) self.assertNotIn('PASSED (id=0)', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) def test_parallel_passing_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(0, run.run_command(filters=['passing'], stdout=stdout.stream)) def test_parallel_passing_bad_regex_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(1, run.run_command(filters=['bad.regex.foobar'], stdout=stdout.stream)) def test_parallel_fails_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(1, run.run_command(stdout=stdout.stream)) def test_serial_passing_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(0, run.run_command(filters=['passing'], serial=True, stdout=stdout.stream)) def test_str_concurrency_passing_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(0, run.run_command(filters=['passing'], concurrency='1', stdout=stdout.stream)) def test_str_concurrency_fails_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(1, run.run_command(concurrency='1', stdout=stdout.stream)) def test_serial_fails_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(1, run.run_command(serial=True, stdout=stdout.stream)) def test_serial_subunit_passing_from_func(self): stdout = io.BytesIO() self.assertEqual(0, run.run_command(subunit_out=True, serial=True, filters=['passing'], stdout=stdout)) stdout.seek(0) self._check_subunit(stdout) def test_parallel_subunit_passing_from_func(self): stdout = io.BytesIO() self.assertEqual(0, run.run_command(subunit_out=True, filters=['passing'], stdout=stdout)) stdout.seek(0) self._check_subunit(stdout) def test_until_failure_fails_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(1, run.run_command(until_failure=True, stdout=stdout.stream)) def test_until_failure_with_subunit_fails_from_func(self): stdout = io.BytesIO() self.assertEqual(1, run.run_command(until_failure=True, subunit_out=True, stdout=stdout)) stdout.seek(0) self._check_subunit(stdout) def test_list_from_func(self): stdout = fixtures.StringStream('stdout') self.useFixture(stdout) self.assertEqual(0, list_cmd.list_command(stdout=stdout.stream)) def test_run_no_discover_pytest_path(self): passing_string = 'tests/test_passing.py::FakeTestClass::test_pass_list' out, err = self.assertRunExit('stestr run -n %s' % passing_string, 0) lines = out.decode('utf8').splitlines() self.assertIn(' - Passed: 1', lines) self.assertIn(' - Failed: 0', lines) def test_run_no_discover_pytest_path_failing(self): passing_string = 'tests/test_failing.py::FakeTestClass::test_pass_list' out, err = self.assertRunExit('stestr run -n %s' % passing_string, 1) lines = out.decode('utf8').splitlines() self.assertIn(' - Passed: 0', lines) self.assertIn(' - Failed: 1', lines) def test_run_no_discover_file_path(self): passing_string = 'tests/test_passing.py' out, err = self.assertRunExit('stestr run -n %s' % passing_string, 0) lines = out.decode('utf8').splitlines() self.assertIn(' - Passed: 2', lines) self.assertIn(' - Failed: 0', lines) self.assertIn(' - Expected Fail: 1', lines) def test_run_no_discover_file_path_failing(self): passing_string = 'tests/test_failing.py' out, err = self.assertRunExit('stestr run -n %s' % passing_string, 1) lines = out.decode('utf8').splitlines() self.assertIn(' - Passed: 0', lines) self.assertIn(' - Failed: 2', lines) self.assertIn(' - Unexpected Success: 1', lines) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/stestr/tests/test_run.py0000644000175000017500000000321300000000000024111 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io from stestr.commands import run from stestr.tests import base class TestRunCommand(base.TestCase): def test_to_int_positive_int(self): self.assertEqual(29, run._to_int(29)) def test_to_int_positive_int_str(self): self.assertEqual(42, run._to_int('42')) def test_to_int_negative_int(self): self.assertEqual(-2, run._to_int(-2)) def test_to_int_negative_int_str(self): self.assertEqual(-45, run._to_int('-45')) def test_to_int_invalid_str(self): fake_stderr = io.StringIO() out = run._to_int('I am not an int', out=fake_stderr) expected = ( 'Unable to convert "I am not an int" to an integer. ' 'Using 0.\n') self.assertEqual(fake_stderr.getvalue(), expected) self.assertEqual(0, out) def test_to_int_none(self): fake_stderr = io.StringIO() out = run._to_int(None, out=fake_stderr) expected = ( 'Unable to convert "None" to an integer. ' 'Using 0.\n') self.assertEqual(fake_stderr.getvalue(), expected) self.assertEqual(0, out) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/test_scheduler.py0000644000175000017500000002142200000000000025265 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import re from unittest import mock from subunit import iso8601 from stestr.repository import memory from stestr import scheduler from stestr.tests import base class TestScheduler(base.TestCase): def _add_timed_test(self, id, duration, result): start = datetime.datetime.now() start = start.replace(tzinfo=iso8601.UTC) result.status(test_id=id, test_status='inprogress', timestamp=start) timestamp = start + datetime.timedelta(seconds=duration) result.status(test_id=id, test_status='success', timestamp=timestamp) def test_partition_tests(self): repo = memory.RepositoryFactory().initialise('memory:') result = repo.get_inserter() result.startTestRun() self._add_timed_test("slow", 3, result) self._add_timed_test("fast1", 1, result) self._add_timed_test("fast2", 1, result) result.stopTestRun() test_ids = frozenset(['slow', 'fast1', 'fast2', 'unknown1', 'unknown2', 'unknown3', 'unknown4']) partitions = scheduler.partition_tests(test_ids, 2, repo, None) self.assertTrue('slow' in partitions[0]) self.assertFalse('fast1' in partitions[0]) self.assertFalse('fast2' in partitions[0]) self.assertFalse('slow' in partitions[1]) self.assertTrue('fast1' in partitions[1]) self.assertTrue('fast2' in partitions[1]) self.assertEqual(3, len(partitions[0])) self.assertEqual(4, len(partitions[1])) def test_random_partitions(self): repo = memory.RepositoryFactory().initialise('memory:') test_ids = frozenset(['a_test', 'b_test', 'c_test', 'd_test']) random_parts = scheduler.partition_tests(test_ids, 2, repo, None, randomize=True) # NOTE(masayukig): We can't test this randomness. So just checking # what we should get here. self.assertEqual(2, len(random_parts)) self.assertTrue(isinstance(random_parts, list)) self.assertTrue(isinstance(random_parts[0], list)) self.assertTrue(isinstance(random_parts[1], list)) flatten_random_parts = [] for i, j in random_parts: flatten_random_parts.append(i) flatten_random_parts.append(j) for i in test_ids: self.assertIn(i, flatten_random_parts) def test_partition_tests_with_zero_duration(self): repo = memory.RepositoryFactory().initialise('memory:') result = repo.get_inserter() result.startTestRun() self._add_timed_test("zero1", 0, result) self._add_timed_test("zero2", 0, result) result.stopTestRun() # Partitioning by two should generate two one-entry partitions. test_ids = frozenset(['zero1', 'zero2']) partitions = scheduler.partition_tests(test_ids, 2, repo, None) self.assertEqual(1, len(partitions[0])) self.assertEqual(1, len(partitions[1])) def test_partition_tests_with_grouping(self): repo = memory.RepositoryFactory().initialise('memory:') result = repo.get_inserter() result.startTestRun() self._add_timed_test("TestCase1.slow", 3, result) self._add_timed_test("TestCase2.fast1", 1, result) self._add_timed_test("TestCase2.fast2", 1, result) result.stopTestRun() test_ids = frozenset(['TestCase1.slow', 'TestCase1.fast', 'TestCase1.fast2', 'TestCase2.fast1', 'TestCase3.test1', 'TestCase3.test2', 'TestCase2.fast2', 'TestCase4.test', 'testdir.testfile.TestCase5.test']) def group_id(test_id, regex=re.compile('TestCase[0-5]')): match = regex.match(test_id) if match: return match.group(0) partitions = scheduler.partition_tests(test_ids, 2, repo, group_id) # Timed groups are deterministic: self.assertTrue('TestCase2.fast1' in partitions[0]) self.assertTrue('TestCase2.fast2' in partitions[0]) self.assertTrue('TestCase1.slow' in partitions[1]) self.assertTrue('TestCase1.fast' in partitions[1]) self.assertTrue('TestCase1.fast2' in partitions[1]) # Untimed groups just need to be in the same partition: if 'TestCase3.test1' in partitions[0]: self.assertTrue('TestCase3.test2' in partitions[0]) if 'TestCase4.test' not in partitions[0]: self.assertTrue('TestCase4.test' in partitions[1]) if 'testdir.testfile.TestCase5.test' not in partitions[0]: self.assertTrue('testdir.testfile.TestCase5.test' in partitions[1]) @mock.patch('builtins.open', mock.mock_open(), create=True) def test_generate_worker_partitions(self): test_ids = ['test_a', 'test_b', 'your_test'] fake_worker_yaml = [ {'worker': ['test_']}, {'worker': ['test']}, ] with mock.patch('yaml.safe_load', return_value=fake_worker_yaml): groups = scheduler.generate_worker_partitions(test_ids, 'fakepath') expected_grouping = [ ['test_a', 'test_b'], ['test_a', 'test_b', 'your_test'], ] self.assertEqual(expected_grouping, groups) @mock.patch('builtins.open', mock.mock_open(), create=True) def test_generate_worker_partitions_group_without_list(self): test_ids = ['test_a', 'test_b', 'your_test'] fake_worker_yaml = [ {'worker': ['test_']}, {'worker': 'test'}, ] with mock.patch('yaml.safe_load', return_value=fake_worker_yaml): self.assertRaises(TypeError, scheduler.generate_worker_partitions, test_ids, 'fakepath') @mock.patch('builtins.open', mock.mock_open(), create=True) def test_generate_worker_partitions_no_worker_tag(self): test_ids = ['test_a', 'test_b', 'your_test'] fake_worker_yaml = [ {'worker-foo': ['test_']}, {'worker': ['test']}, ] with mock.patch('yaml.safe_load', return_value=fake_worker_yaml): self.assertRaises(TypeError, scheduler.generate_worker_partitions, test_ids, 'fakepath') @mock.patch('builtins.open', mock.mock_open(), create=True) def test_generate_worker_partitions_group_without_match(self): test_ids = ['test_a', 'test_b', 'your_test'] fake_worker_yaml = [ {'worker': ['test_']}, {'worker': ['test']}, {'worker': ['foo']} ] with mock.patch('yaml.safe_load', return_value=fake_worker_yaml): groups = scheduler.generate_worker_partitions(test_ids, 'fakepath') expected_grouping = [ ['test_a', 'test_b'], ['test_a', 'test_b', 'your_test'], ] self.assertEqual(expected_grouping, groups) @mock.patch('builtins.open', mock.mock_open(), create=True) def test_generate_worker_partitions_with_count(self): test_ids = ['test_a', 'test_b', 'your_test', 'a_thing1', 'a_thing2'] fake_worker_yaml = [ {'worker': ['test_']}, {'worker': ['test']}, {'worker': ['a_thing'], 'concurrency': 2}, ] with mock.patch('yaml.safe_load', return_value=fake_worker_yaml): groups = scheduler.generate_worker_partitions(test_ids, 'fakepath') expected_grouping = [ ['test_a', 'test_b'], ['test_a', 'test_b', 'your_test'], ['a_thing1'], ['a_thing2'], ] for worker in expected_grouping: self.assertIn(worker, groups) @mock.patch('builtins.open', mock.mock_open(), create=True) def test_generate_worker_partitions_with_count_1(self): test_ids = ['test_a', 'test_b', 'your_test'] fake_worker_yaml = [ {'worker': ['test_']}, {'worker': ['test'], 'count': 1}, ] with mock.patch('yaml.safe_load', return_value=fake_worker_yaml): groups = scheduler.generate_worker_partitions(test_ids, 'fakepath') expected_grouping = [ ['test_a', 'test_b'], ['test_a', 'test_b', 'your_test'], ] self.assertEqual(expected_grouping, groups) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/test_selection.py0000644000175000017500000001547200000000000025304 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import re from unittest import mock from stestr import selection from stestr.tests import base class TestSelection(base.TestCase): def test_filter_tests_no_filter(self): test_list = ['a', 'b', 'c'] result = selection.filter_tests(None, test_list) self.assertEqual(test_list, result) def test_filter_tests(self): test_list = ['a', 'b', 'c'] result = selection.filter_tests(['a'], test_list) self.assertEqual(['a'], result) def test_filter_invalid_regex(self): test_list = ['a', 'b', 'c'] with mock.patch('sys.exit', side_effect=ImportError) as mock_exit: self.assertRaises(ImportError, selection.filter_tests, ['fake_regex_with_bad_part[The-BAD-part]'], test_list) mock_exit.assert_called_once_with(5) class TestBlackReader(base.TestCase): def test_black_reader(self): blacklist_file = io.StringIO() for i in range(4): blacklist_file.write('fake_regex_%s\n' % i) blacklist_file.write('fake_regex_with_note_%s # note\n' % i) blacklist_file.seek(0) with mock.patch('builtins.open', return_value=blacklist_file): result = selection.black_reader('fake_path') self.assertEqual(2 * 4, len(result)) note_cnt = 0 # not assuming ordering, mainly just testing the type for r in result: self.assertEqual(r[2], []) if r[1] == 'note': note_cnt += 1 self.assertIn('search', dir(r[0])) # like a compiled regexp self.assertEqual(note_cnt, 4) def test_invalid_regex(self): blacklist_file = io.StringIO() blacklist_file.write("fake_regex_with_bad_part[The-BAD-part]") blacklist_file.seek(0) with mock.patch('builtins.open', return_value=blacklist_file): with mock.patch('sys.exit') as mock_exit: selection.black_reader('fake_path') mock_exit.assert_called_once_with(5) class TestConstructList(base.TestCase): def test_simple_re(self): test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])'] result = selection.construct_list(test_lists, regexes=['foo']) self.assertEqual(list(result), ['fake_test(scen)[egg,foo])']) def test_simple_black_re(self): test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])'] result = selection.construct_list(test_lists, black_regex='foo') self.assertEqual(list(result), ['fake_test(scen)[tag,bar])']) def test_invalid_black_re(self): test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])'] invalid_regex = "fake_regex_with_bad_part[The-BAD-part]" with mock.patch('sys.exit', side_effect=ImportError) as exit_mock: self.assertRaises(ImportError, selection.construct_list, test_lists, black_regex=invalid_regex) exit_mock.assert_called_once_with(5) def test_blacklist(self): black_list = [(re.compile('foo'), 'foo not liked', [])] test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])'] with mock.patch('stestr.selection.black_reader', return_value=black_list): result = selection.construct_list(test_lists, blacklist_file='file', regexes=['fake_test']) self.assertEqual(list(result), ['fake_test(scen)[tag,bar])']) def test_whitelist(self): white_list = [re.compile('fake_test1'), re.compile('fake_test2')] test_lists = ['fake_test1[tg]', 'fake_test2[tg]', 'fake_test3[tg]'] white_getter = 'stestr.selection._get_regex_from_whitelist_file' with mock.patch(white_getter, return_value=white_list): result = selection.construct_list(test_lists, whitelist_file='file') self.assertEqual(set(result), {'fake_test1[tg]', 'fake_test2[tg]'}) def test_whitelist_invalid_regex(self): whitelist_file = io.StringIO() whitelist_file.write("fake_regex_with_bad_part[The-BAD-part]") whitelist_file.seek(0) with mock.patch('builtins.open', return_value=whitelist_file): with mock.patch('sys.exit') as mock_exit: selection._get_regex_from_whitelist_file('fake_path') mock_exit.assert_called_once_with(5) def test_whitelist_blacklist_re(self): white_list = [re.compile('fake_test1'), re.compile('fake_test2')] test_lists = ['fake_test1[tg]', 'fake_test2[spam]', 'fake_test3[tg,foo]', 'fake_test4[spam]'] black_list = [(re.compile('spam'), 'spam not liked', [])] white_getter = 'stestr.selection._get_regex_from_whitelist_file' with mock.patch(white_getter, return_value=white_list): with mock.patch('stestr.selection.black_reader', return_value=black_list): result = selection.construct_list(test_lists, 'black_file', 'white_file', ['foo']) self.assertEqual(set(result), {'fake_test1[tg]', 'fake_test3[tg,foo]'}) def test_overlapping_black_regex(self): black_list = [(re.compile('compute.test_keypairs.KeypairsTestV210'), '', []), (re.compile('compute.test_keypairs.KeypairsTestV21'), '', [])] test_lists = [ 'compute.test_keypairs.KeypairsTestV210.test_create_keypair', 'compute.test_keypairs.KeypairsTestV21.test_create_keypair', 'compute.test_fake.FakeTest.test_fake_test'] with mock.patch('stestr.selection.black_reader', return_value=black_list): result = selection.construct_list(test_lists, blacklist_file='file', regexes=['fake_test']) self.assertEqual( list(result), ['compute.test_fake.FakeTest.test_fake_test']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1535037630.0 stestr-3.0.0/stestr/tests/test_slowest.py0000644000175000017500000000254600000000000025015 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stestr.commands import slowest from stestr.tests import base class TestSlowest(base.TestCase): def test_format_times(self): times = [('test_id_a', 12.34), ('test_id_b', 1.34)] res = slowest.format_times(times) self.assertEqual([('test_id_a', '12.340'), ('test_id_b', ' 1.340')], res) def test_format_times_with_zero(self): times = [('test_id_a', 0), ('test_id_b', 1.34)] res = slowest.format_times(times) self.assertEqual([('test_id_a', '0.000'), ('test_id_b', '1.340')], res) def test_format_times_all_zero(self): times = [('test_id_a', 0), ('test_id_b', 0.00)] res = slowest.format_times(times) self.assertEqual([('test_id_a', '0.000'), ('test_id_b', '0.000')], res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/test_subunit_trace.py0000644000175000017500000000752500000000000026166 0ustar00computertrekercomputertreker00000000000000# Copyright 2015 SUSE Linux GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime as dt import io import os import sys from unittest.mock import patch from ddt import data from ddt import ddt from ddt import unpack from stestr import subunit_trace from stestr.tests import base @ddt class TestSubunitTrace(base.TestCase): def setUp(self): super(TestSubunitTrace, self).setUp() # NOTE(mtreinish): subunit-trace relies on a global to track results # with the expectation that it's run once per python interpreter # (like per stestr run or other command). Make sure to clear those on # each test to isolate the tests from each other. subunit_trace.RESULTS = {} subunit_trace.FAILS = [] @data(([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 14, 111111)], "0.000000s"), ([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 15, 111111)], "1.000000s"), ([dt(2015, 4, 17, 22, 23, 14, 111111), None], "")) @unpack def test_get_durating(self, timestamps, expected_result): self.assertEqual(subunit_trace.get_duration(timestamps), expected_result) @data(([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 14, 111111)], 0.0), ([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 15, 111111)], 1.0), ([dt(2015, 4, 17, 22, 23, 14, 111111), None], 0.0)) @unpack def test_run_time(self, timestamps, expected_result): patched_res = { 0: [ {'timestamps': timestamps} ] } with patch.dict(subunit_trace.RESULTS, patched_res, clear=True): self.assertEqual(subunit_trace.run_time(), expected_result) def test_trace(self): regular_stream = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'sample_streams/successful.subunit') bytes_ = io.BytesIO() with open(regular_stream, 'rb') as stream: bytes_.write(bytes(stream.read())) bytes_.seek(0) stdin = io.TextIOWrapper(io.BufferedReader(bytes_)) returncode = subunit_trace.trace(stdin, sys.stdout) self.assertEqual(0, returncode) def test_trace_with_all_skips(self): regular_stream = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'sample_streams/all_skips.subunit') bytes_ = io.BytesIO() with open(regular_stream, 'rb') as stream: bytes_.write(bytes(stream.read())) bytes_.seek(0) stdin = io.TextIOWrapper(io.BufferedReader(bytes_)) returncode = subunit_trace.trace(stdin, sys.stdout) self.assertEqual(1, returncode) def test_trace_with_failures(self): regular_stream = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'sample_streams/failure.subunit') bytes_ = io.BytesIO() with open(regular_stream, 'rb') as stream: bytes_.write(bytes(stream.read())) bytes_.seek(0) stdin = io.TextIOWrapper(io.BufferedReader(bytes_)) returncode = subunit_trace.trace(stdin, sys.stdout) self.assertEqual(1, returncode) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/test_test_processor.py0000644000175000017500000000330100000000000026361 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subprocess from unittest import mock from stestr import test_processor from stestr.tests import base class TestTestProcessorFixture(base.TestCase): def setUp(self): super(TestTestProcessorFixture, self).setUp() self._fixture = test_processor.TestProcessorFixture( mock.sentinel.test_ids, mock.sentinel.options, mock.sentinel.cmd_template, mock.sentinel.listopt, mock.sentinel.idoption, mock.sentinel.repository) @mock.patch.object(subprocess, 'Popen') @mock.patch.object(test_processor, 'sys') def _check_start_process(self, mock_sys, mock_Popen, platform='win32', expected_fn=None): mock_sys.platform = platform self._fixture._start_process(mock.sentinel.cmd) mock_Popen.assert_called_once_with( mock.sentinel.cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, preexec_fn=expected_fn) def test_start_process_win32(self): self._check_start_process() def test_start_process_linux(self): self._check_start_process( platform='linux2', expected_fn=self._fixture._clear_SIGPIPE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/stestr/tests/test_user_config.py0000644000175000017500000001377200000000000025623 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os from unittest import mock from stestr.tests import base from stestr import user_config FULL_YAML = """ run: concurrency: 42 # This can be any integer value random: True no-subunit-trace: True color: True abbreviate: True slowest: True suppress-attachments: True all-attachments: True failing: list: True last: no-subunit-trace: True color: True suppress-attachments: True all-attachments: True load: force-init: True subunit-trace: True color: True abbreviate: True suppress-attachments: True all-attachments: True """ INVALID_YAML_FIELD = """ run: color: True, """ YAML_NOT_INT = """ run: concurrency: Two """ class TestUserConfig(base.TestCase): def setUp(self): super(TestUserConfig, self).setUp() home_dir = os.path.expanduser("~") self.xdg_path = os.path.join(os.path.join(home_dir, '.config'), 'stestr.yaml') self.home_path = os.path.join(home_dir, '.stestr.yaml') @mock.patch('sys.exit') @mock.patch('stestr.user_config.UserConfig') def test_get_user_config_invalid_path(self, user_mock, exit_mock): user_config.get_user_config('/i_am_an_invalid_path') msg = 'The specified stestr user config is not a valid path' exit_mock.assert_called_once_with(msg) @mock.patch('os.path.isfile') @mock.patch('stestr.user_config.UserConfig') def test_get_user_config_xdg_file(self, user_mock, path_mock): def fake_isfile(path): if path == self.xdg_path: return True else: return False path_mock.side_effect = fake_isfile user_config.get_user_config() user_mock.assert_called_once_with(self.xdg_path) @mock.patch('os.path.isfile') @mock.patch('stestr.user_config.UserConfig') def test_get_default_user_config_file(self, user_mock, path_mock): def fake_isfile(path): if path == self.home_path: return True else: return False path_mock.side_effect = fake_isfile user_config.get_user_config() user_mock.assert_called_once_with(self.home_path) @mock.patch('yaml.safe_load', return_value={}) @mock.patch('builtins.open', mock.mock_open()) def test_user_config_empty_schema(self, yaml_mock): user_conf = user_config.UserConfig('/path') self.assertEqual({}, user_conf.config) @mock.patch('yaml.safe_load', return_value={'init': {'subunit-trace': True}}) @mock.patch('sys.exit') @mock.patch('builtins.open', mock.mock_open()) def test_user_config_invalid_command(self, exit_mock, yaml_mock): user_config.UserConfig('/path') error_string = ("Provided user config file /path is invalid because:\n" "extra keys not allowed @ data['init']") exit_mock.assert_called_once_with(error_string) @mock.patch('yaml.safe_load', return_value={'run': {'subunit-trace': True}}) @mock.patch('sys.exit') @mock.patch('builtins.open', mock.mock_open()) def test_user_config_invalid_option(self, exit_mock, yaml_mock): user_config.UserConfig('/path') error_string = ("Provided user config file /path is invalid because:\n" "extra keys not allowed @ " "data['run']['subunit-trace']") exit_mock.assert_called_once_with(error_string) @mock.patch('builtins.open', return_value=io.BytesIO(FULL_YAML.encode('utf-8'))) def test_user_config_full_config(self, open_mock): user_conf = user_config.UserConfig('/path') full_dict = { 'run': { 'concurrency': 42, 'random': True, 'no-subunit-trace': True, 'color': True, 'abbreviate': True, 'slowest': True, 'suppress-attachments': True, 'all-attachments': True}, 'failing': { 'list': True}, 'last': { 'no-subunit-trace': True, 'color': True, 'suppress-attachments': True, 'all-attachments': True}, 'load': { 'force-init': True, 'subunit-trace': True, 'color': True, 'abbreviate': True, 'suppress-attachments': True, 'all-attachments': True} } self.assertEqual(full_dict, user_conf.config) @mock.patch('sys.exit') @mock.patch('builtins.open', return_value=io.BytesIO(INVALID_YAML_FIELD.encode('utf-8'))) def test_user_config_invalid_value_type(self, open_mock, exit_mock): user_config.UserConfig('/path') error_string = ("Provided user config file /path is invalid because:\n" "expected bool for dictionary value @ " "data['run']['color']") exit_mock.assert_called_once_with(error_string) @mock.patch('sys.exit') @mock.patch('builtins.open', return_value=io.BytesIO(YAML_NOT_INT.encode('utf-8'))) def test_user_config_invalid_integer(self, open_mock, exit_mock): user_config.UserConfig('/path') error_string = ("Provided user config file /path is invalid because:\n" "expected int for dictionary value @ " "data['run']['concurrency']") exit_mock.assert_called_once_with(error_string) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/tests/test_user_config_return_codes.py0000644000175000017500000004160300000000000030371 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import io import os import shutil import subprocess import tempfile import subunit as subunit_lib import testtools import yaml from stestr.tests import base class TestReturnCodes(base.TestCase): def setUp(self): super(TestReturnCodes, self).setUp() # Setup test dirs self.directory = tempfile.mkdtemp(prefix='stestr-unit') self.addCleanup(shutil.rmtree, self.directory) self.test_dir = os.path.join(self.directory, 'tests') os.mkdir(self.test_dir) # Setup Test files self.testr_conf_file = os.path.join(self.directory, '.stestr.conf') self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg') self.passing_file = os.path.join(self.test_dir, 'test_passing.py') self.failing_file = os.path.join(self.test_dir, 'test_failing.py') self.init_file = os.path.join(self.test_dir, '__init__.py') self.setup_py = os.path.join(self.directory, 'setup.py') shutil.copy('stestr/tests/files/testr-conf', self.testr_conf_file) shutil.copy('stestr/tests/files/passing-tests', self.passing_file) shutil.copy('stestr/tests/files/failing-tests', self.failing_file) shutil.copy('setup.py', self.setup_py) shutil.copy('stestr/tests/files/setup.cfg', self.setup_cfg_file) shutil.copy('stestr/tests/files/__init__.py', self.init_file) self.stdout = io.StringIO() self.stderr = io.StringIO() # Change directory, run wrapper and check result self.addCleanup(os.chdir, os.path.abspath(os.curdir)) os.chdir(self.directory) subprocess.call('stestr init', shell=True) def _get_cmd_stdout(self, cmd): p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) out = p.communicate() self.assertEqual(0, p.returncode) return out def assertRunExit(self, cmd, expected, subunit=False, stdin=None): if stdin: p = subprocess.Popen( "%s" % cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate(stdin) else: p = subprocess.Popen( "%s" % cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if not subunit: self.assertEqual( p.returncode, expected, "Stdout: {}; Stderr: {}".format(out, err)) return (out, err) else: self.assertEqual(p.returncode, expected, "Expected return code: %s doesn't match actual " "return code of: %s" % (expected, p.returncode)) output_stream = io.BytesIO(out) stream = subunit_lib.ByteStreamToStreamResult(output_stream) starts = testtools.StreamResult() summary = testtools.StreamSummary() tests = [] def _add_dict(test): tests.append(test) outcomes = testtools.StreamToDict(functools.partial(_add_dict)) result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() self.assertThat(len(tests), testtools.matchers.GreaterThan(0)) return (out, err) def test_empty_config_file_failing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) self.assertRunExit( 'stestr --user-config=%s run' % path, 1) def test_empty_config_file_passing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) self.assertRunExit( 'stestr --user-config=%s run passing' % path, 0) def test_no_subunit_trace_config_file_passing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'no-subunit-trace': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) out, err = self.assertRunExit( 'stestr --user-config=%s run passing' % path, 0) out = str(out) self.assertIn('PASSED (id=0)', out) self.assertNotIn('Totals', out) self.assertNotIn('Worker Balance', out) self.assertNotIn('Sum of execute time for each test:', out) self.assertNotIn('Runtime (s)', out) def test_no_subunit_trace_config_file_failing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'no-subunit-trace': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) out, err = self.assertRunExit( 'stestr --user-config=%s run' % path, 1) out = str(out) self.assertIn('FAILED (id=0, failures=2)', out) self.assertNotIn('Totals', out) self.assertNotIn('Worker Balance', out) self.assertNotIn('Sum of execute time for each test:', out) def test_no_subunit_trace_config_file_force_subunit_trace(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'no-subunit-trace': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) out, err = self.assertRunExit( 'stestr --user-config=%s run --force-subunit-trace passing' % path, 0) out = str(out) self.assertNotIn('PASSED (id=0)', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) def test_abbreviate_config_file_passing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'abbreviate': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) out, err = self.assertRunExit( 'stestr --user-config=%s run passing' % path, 0) out = str(out) self.assertIn('..', out) self.assertNotIn('PASSED (id=0)', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) def test_abbreviate_config_file_failing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'abbreviate': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) # NOTE(mtreinish): Running serially here to ensure a consistent # execution order for confirming the abbreviated output. out, err = self.assertRunExit( 'stestr --user-config=%s run --serial' % path, 1) out = str(out) self.assertIn('FF..', out) self.assertNotIn('FAILED (id=0, failures=2)', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) def test_no_subunit_trace_slowest_config_file_passing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'no-subunit-trace': True, 'slowest': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) out, err = self.assertRunExit( 'stestr --user-config=%s run passing' % path, 0) out = str(out) self.assertIn('PASSED (id=0)', out) self.assertNotIn('Totals', out) self.assertNotIn('Worker Balance', out) self.assertNotIn('Sum of execute time for each test:', out) self.assertIn('Runtime (s)', out) def test_failing_list_config_file(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'no-subunit-trace': True, 'slowest': True, }, 'failing': { 'list': True } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) self.assertRunExit('stestr --user-config=%s run' % path, 1) out, err = self.assertRunExit('stestr --user-config=%s failing' % path, 1) out = str(out) self.assertNotIn('FAILED (id=0, failures=2)', out) self.assertNotIn('FAIL:', out) self.assertIn('tests.test_failing.FakeTestClass.test_pass', out) self.assertIn('tests.test_failing.FakeTestClass.test_pass_list', out) def test_no_subunit_trace_last_config_file_passing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'slowest': True, }, 'failing': { 'list': True }, 'last': { 'no-subunit-trace': True, }, }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) run_out, run_err = self.assertRunExit( 'stestr --user-config=%s run passing' % path, 0) out, err = self.assertRunExit('stestr --user-config=%s last' % path, 0) run_out = str(run_out) out = str(out) self.assertIn('PASSED (id=0)', out) self.assertNotIn('Totals', out) self.assertNotIn('Worker Balance', out) self.assertNotIn('Sum of execute time for each test:', out) self.assertNotIn('Runtime (s)', out) self.assertIn('Totals', run_out) self.assertIn('Worker Balance', run_out) self.assertIn('Sum of execute time for each test:', run_out) self.assertIn('Runtime (s)', run_out) def test_subunit_trace_load_from_config_passing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'slowest': True, }, 'failing': { 'list': True }, 'last': { 'no-subunit-trace': True, }, 'load': { 'subunit-trace': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) self.assertRunExit('stestr --user-config=%s run passing' % path, 0) stream = self._get_cmd_stdout( 'stestr --user-config=%s last --subunit' % path)[0] out, err = self.assertRunExit('stestr --user-config=%s load' % path, 0, stdin=stream) out = str(out) self.assertNotIn('PASSED (id=0)', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) def test_subunit_trace_load_from_config_failing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'slowest': True, }, 'failing': { 'list': True }, 'last': { 'no-subunit-trace': True, }, 'load': { 'subunit-trace': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) self.assertRunExit('stestr --user-config=%s run' % path, 1) stream = self._get_cmd_stdout( 'stestr --user-config=%s last --subunit' % path)[0] out, err = self.assertRunExit('stestr --user-config=%s load' % path, 0, stdin=stream) out = str(out) self.assertNotIn('FAILED (id=0, failures=2)', out) self.assertNotIn('FF..', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) @testtools.skip('Abbreviated output not displaying') def test_abbreviate_load_from_config_passing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'slowest': True, }, 'failing': { 'list': True }, 'last': { 'no-subunit-trace': True, }, 'load': { 'abbreviate': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) self.assertRunExit('stestr --user-config=%s run passing' % path, 0) stream = self._get_cmd_stdout( 'stestr --user-config=%s last --subunit' % path)[0] out, err = self.assertRunExit('stestr --user-config=%s load' % path, 0, stdin=stream) out = str(out) self.assertNotIn('PASSED (id=0)', out) self.assertIn('..', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) @testtools.skip('Abbreviated output not displaying') def test_abbreviate_load_from_config_failing(self): fd, path = tempfile.mkstemp() self.addCleanup(os.remove, path) conf_file = os.fdopen(fd, 'wb', 0) self.addCleanup(conf_file.close) contents = str( yaml.dump({ 'run': { 'slowest': True, }, 'failing': { 'list': True }, 'last': { 'no-subunit-trace': True, }, 'load': { 'abbreviate': True, } }, default_flow_style=False)) conf_file.write(contents.encode('utf-8')) # NOTE(mtreinish): Running serially here to ensure a consistent # execution order for confirming the abbreviated output. self.assertRunExit('stestr --user-config=%s run --serial' % path, 1) stream = self._get_cmd_stdout( 'stestr --user-config=%s last --subunit' % path)[0] out, err = self.assertRunExit('stestr --user-config=%s load' % path, 0, stdin=stream) out = str(out) self.assertNotIn('FAILED (id=0, failures=2)', out) self.assertIn('FF..', out) self.assertIn('Totals', out) self.assertIn('Worker Balance', out) self.assertIn('Sum of execute time for each test:', out) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1525878837.0 stestr-3.0.0/stestr/tests/test_utils.py0000644000175000017500000001274400000000000024456 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stestr.tests import base from stestr import utils class TestUtils(base.TestCase): def test_cleanup_test_name_defaults(self): test_id_no_attrs = 'test.TestThing.test_thing' test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]' test_id_with_scenario = 'test.TestThing.test_thing(mysql)' test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]' '(mysql)') result_no_attrs = utils.cleanup_test_name(test_id_no_attrs) self.assertEqual(test_id_no_attrs, result_no_attrs) result_with_attrs = utils.cleanup_test_name(test_id_with_attrs) self.assertEqual(test_id_no_attrs, result_with_attrs) result_with_scenario = utils.cleanup_test_name(test_id_with_scenario) self.assertEqual(test_id_with_scenario, result_with_scenario) result_with_attr_and_scenario = utils.cleanup_test_name( test_id_with_attrs_and_scenario) self.assertEqual(test_id_with_scenario, result_with_attr_and_scenario) def test_cleanup_test_name_leave_attrs(self): test_id_no_attrs = 'test.TestThing.test_thing' test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]' test_id_with_scenario = 'test.TestThing.test_thing(mysql)' test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]' '(mysql)') result_no_attrs = utils.cleanup_test_name(test_id_no_attrs, strip_tags=False) self.assertEqual(test_id_no_attrs, result_no_attrs) result_with_attrs = utils.cleanup_test_name(test_id_with_attrs, strip_tags=False) self.assertEqual(test_id_with_attrs, result_with_attrs) result_with_scenario = utils.cleanup_test_name(test_id_with_scenario, strip_tags=False) self.assertEqual(test_id_with_scenario, result_with_scenario) result_with_attr_and_scenario = utils.cleanup_test_name( test_id_with_attrs_and_scenario, strip_tags=False) self.assertEqual(test_id_with_attrs_and_scenario, result_with_attr_and_scenario) def test_cleanup_test_name_strip_scenario_and_attrs(self): test_id_no_attrs = 'test.TestThing.test_thing' test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]' test_id_with_scenario = 'test.TestThing.test_thing(mysql)' test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]' '(mysql)') result_no_attrs = utils.cleanup_test_name(test_id_no_attrs, strip_scenarios=True) self.assertEqual(test_id_no_attrs, result_no_attrs) result_with_attrs = utils.cleanup_test_name(test_id_with_attrs, strip_scenarios=True) self.assertEqual(test_id_no_attrs, result_with_attrs) result_with_scenario = utils.cleanup_test_name(test_id_with_scenario, strip_scenarios=True) self.assertEqual(test_id_no_attrs, result_with_scenario) result_with_attr_and_scenario = utils.cleanup_test_name( test_id_with_attrs_and_scenario, strip_scenarios=True) self.assertEqual(test_id_no_attrs, result_with_attr_and_scenario) def test_cleanup_test_name_strip_scenario(self): test_id_no_attrs = 'test.TestThing.test_thing' test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]' test_id_with_scenario = 'test.TestThing.test_thing(mysql)' test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]' '(mysql)') result_no_attrs = utils.cleanup_test_name(test_id_no_attrs, strip_scenarios=True, strip_tags=False) self.assertEqual(test_id_no_attrs, result_no_attrs) result_with_attrs = utils.cleanup_test_name(test_id_with_attrs, strip_scenarios=True, strip_tags=False) self.assertEqual(test_id_with_attrs, result_with_attrs) result_with_scenario = utils.cleanup_test_name(test_id_with_scenario, strip_scenarios=True, strip_tags=False) self.assertEqual(test_id_no_attrs, result_with_scenario) result_with_attr_and_scenario = utils.cleanup_test_name( test_id_with_attrs_and_scenario, strip_scenarios=True, strip_tags=False) self.assertEqual('test.TestThing.test_thing[attr]', result_with_attr_and_scenario) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/user_config.py0000644000175000017500000000610600000000000023413 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import voluptuous as vp import yaml def get_user_config(path=None): if not path: home_dir = os.path.expanduser("~") path = os.path.join(home_dir, '.stestr.yaml') if not os.path.isfile(path): path = os.path.join(os.path.join(home_dir, '.config'), 'stestr.yaml') if not os.path.isfile(path): path = None if not path: return None else: if not os.path.isfile(path): msg = 'The specified stestr user config is not a valid path' sys.exit(msg) return UserConfig(path) class UserConfig(object): def __init__(self, path): self.schema = vp.Schema({ vp.Optional('run'): { vp.Optional('concurrency'): int, vp.Optional('random'): bool, vp.Optional('no-subunit-trace'): bool, vp.Optional('color'): bool, vp.Optional('abbreviate'): bool, vp.Optional('slowest'): bool, vp.Optional('suppress-attachments'): bool, vp.Optional('all-attachments'): bool, }, vp.Optional('failing'): { vp.Optional('list'): bool, }, vp.Optional('last'): { vp.Optional('no-subunit-trace'): bool, vp.Optional('color'): bool, vp.Optional('suppress-attachments'): bool, vp.Optional('all-attachments'): bool, }, vp.Optional('load'): { vp.Optional('force-init'): bool, vp.Optional('subunit-trace'): bool, vp.Optional('color'): bool, vp.Optional('abbreviate'): bool, vp.Optional('suppress-attachments'): bool, vp.Optional('all-attachments'): bool, } }) with open(path, 'r') as fd: self.config = yaml.safe_load(fd.read()) if self.config is None: self.config = {} try: self.schema(self.config) except vp.MultipleInvalid as e: msg = ('Provided user config file {} is invalid ' 'because:\n{}'.format(path, str(e))) sys.exit(msg) @property def run(self): return self.config.get('run') @property def failing(self): return self.config.get('failing') @property def last(self): return self.config.get('last') @property def load(self): return self.config.get('load') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/stestr/utils.py0000644000175000017500000000760400000000000022254 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io from stestr import output class CallWhenProcFinishes(object): """Convert a process object to trigger a callback when returncode is set. This just wraps the entire object and when the returncode attribute access finds a set value, calls the callback. """ def __init__(self, process, callback): """Adapt process :param process: A subprocess.Popen object. :param callback: The process to call when the process completes. """ self._proc = process self._callback = callback self._done = False @property def stdin(self): return self._proc.stdin @property def stdout(self): return self._proc.stdout @property def stderr(self): return self._proc.stderr @property def returncode(self): result = self._proc.returncode if not self._done and result is not None: self._done = True self._callback() return result def wait(self): return self._proc.wait() def _iter_internal_streams(input_streams, stream_type): streams = [] for in_stream in input_streams: if in_stream[0] == stream_type: streams.append(in_stream[1]) for stream_value in streams: if isinstance(stream_value, output.ReturnCodeToSubunit): if getattr(stream_value.source, 'detach', None): yield stream_value.source.detach() else: yield stream_value.source elif getattr(stream_value, 'read', None): yield stream_value else: yield io.BytesIO(stream_value) def iter_streams(input_streams, stream_type): """Iterate over all the streams of type stream_type. :param stream_type: A simple string such as 'subunit' which matches one of the stream types defined for the cmd object this UI is being used with. :return: A generator of stream objects. stream objects have a read method and a close method which behave as for file objects. """ for stream_spec in input_streams: _stream_spec = stream_spec[0] if '*' in _stream_spec or '?' in _stream_spec or '+' in _stream_spec: found = stream_type == _stream_spec[:-1] else: found = stream_type == _stream_spec if found: return _iter_internal_streams(input_streams, stream_type) raise KeyError(stream_type) def cleanup_test_name(name, strip_tags=True, strip_scenarios=False): """Clean up the test name for display. By default we strip out the tags in the test because they don't help us in identifying the test that is run to it's result. Make it possible to strip out the testscenarios information (not to be confused with tempest scenarios) however that's often needed to identify generated negative tests. """ if strip_tags: tags_start = name.find('[') tags_end = name.find(']') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname if strip_scenarios: tags_start = name.find('(') tags_end = name.find(')') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname return name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1483216918.0 stestr-3.0.0/stestr/version.py0000644000175000017500000000114600000000000022574 0ustar00computertrekercomputertreker00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import pbr.version version_info = pbr.version.VersionInfo('stestr') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5720925 stestr-3.0.0/stestr.egg-info/0000755000175000017500000000000000000000000022225 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/PKG-INFO0000644000175000017500000002026400000000000023326 0ustar00computertrekercomputertreker00000000000000Metadata-Version: 2.1 Name: stestr Version: 3.0.0 Summary: A parallel Python test runner built around subunit Home-page: http://stestr.readthedocs.io/en/latest/ Author: Matthew Treinish Author-email: mtreinish@kortar.org License: Apache-2.0 Project-URL: Documentation, https://stestr.readthedocs.io Project-URL: Source Code, https://github.com/mtreinish/stestr Project-URL: Bug Tracker, https://github.com/mtreinish/stestr/issues Description: stestr ====== .. image:: https://img.shields.io/travis/mtreinish/stestr/master.svg?style=flat-square :target: https://travis-ci.org/mtreinish/stestr :alt: Build status .. image:: https://dev.azure.com/stestr/stestr/_apis/build/status/mtreinish.stestr?branchName=master :target: https://dev.azure.com/stestr/stestr/_build/latest?definitionId=1&branchName=master :alt: Azure DevOps build status .. image:: https://img.shields.io/coveralls/github/mtreinish/stestr/master.svg?style=flat-square :target: https://coveralls.io/github/mtreinish/stestr?branch=master :alt: Code coverage .. image:: https://img.shields.io/pypi/v/stestr.svg?style=flat-square :target: https://pypi.python.org/pypi/stestr :alt: Latest Version * Read this in other languages: `English`_, `日本語`_ * You can see the full rendered docs at: http://stestr.readthedocs.io/en/latest/ * The code of the project is on Github: https://github.com/mtreinish/stestr .. _English: https://github.com/mtreinish/stestr/blob/master/README.rst .. _日本語: https://github.com/mtreinish/stestr/blob/master/README_ja.rst .. note:: stestr v2.x.x release series will be the last series that supports Python 2. Support for Python 2.7 was dropped in stestr release 3.0.0. Overview -------- stestr is parallel Python test runner designed to execute `unittest`_ test suites using multiple processes to split up execution of a test suite. It also will store a history of all test runs to help in debugging failures and optimizing the scheduler to improve speed. To accomplish this goal it uses the `subunit`_ protocol to facilitate streaming and storing results from multiple workers. .. _unittest: https://docs.python.org/3/library/unittest.html .. _subunit: https://github.com/testing-cabal/subunit stestr originally started as a fork of the `testrepository`_ project. But, instead of being an interface for any test runner that used subunit, like testrepository, stestr concentrated on being a dedicated test runner for python projects. While stestr was originally forked from testrepository it is not backwards compatible with testrepository. At a high level the basic concepts of operation are shared between the two projects but the actual usage is not exactly the same. .. _testrepository: https://testrepository.readthedocs.org/en/latest Installing stestr ----------------- stestr is available via pypi, so all you need to do is run:: pip install -U stestr to get stestr on your system. If you need to use a development version of stestr you can clone the repo and install it locally with:: git clone https://github.com/mtreinish/stestr.git && pip install -e stestr which will install stestr in your python environment in editable mode for local development Using stestr ------------ After you install stestr to use it to run tests is pretty straightforward. The first thing you'll want to do is create a ``.stestr.conf`` file for your project. This file is used to tell stestr where to find tests and basic information about how tests are run. A basic minimal example of the contents of this is:: [DEFAULT] test_path=./project_source_dir/tests which just tells stestr the relative path for the directory to use for test discovery. This is the same as ``--start-directory`` in the standard `unittest discovery`_. .. _unittest discovery: https://docs.python.org/3/library/unittest.html#test-discovery After this file is created you should be all set to start using stestr to run tests. To run tests just use:: stestr run it will first create a results repository at ``.stestr/`` in the current working directory and then execute all the tests found by test discovery. If you're just running a single test (or module) and want to avoid the overhead of doing test discovery you can use the ``--no-discover``/``-n`` option to specify that test. For all the details on these commands and more thorough explanation of options see the stestr manual: https://stestr.readthedocs.io/en/latest/MANUAL.html Migrating from testrepository ----------------------------- If you have a project that is already using testrepository stestr's source repo contains a helper script for migrating your repo to use stestr. This script just creates a ``.stestr.conf`` file from a ``.testr.conf`` file. (assuming it uses a standard subunit.run test command format) To run this from your project repo just call:: $STESTR_SOURCE_DIR/tools/testr_to_stestr.py and you'll have a ``.stestr.conf`` created. Building a manpage ------------------ The stestr manual has been formatted so that it renders well as html and as a manpage. The html output and is autogenerated and published to: https://stestr.readthedocs.io/en/latest/MANUAL.html but the manpage has to be generated by hand. To do this you have to manually run sphinx-build with the manpage builder. This has been automated in a small script that should be run from the root of the stestr repository:: tools/build_manpage.sh which will generate the troff file in doc/build/man/stestr.1 which is ready to be packaged and or put in your system's man pages. Contributing ------------ To browse the latest code, see: https://github.com/mtreinish/stestr To clone the latest code, use: ``git clone https://github.com/mtreinish/stestr.git`` Guidelines for contribution are documented at: http://stestr.readthedocs.io/en/latest/developer_guidelines.html Use `github pull requests`_ to submit patches. Before you submit a pull request ensure that all the automated testing will pass by running ``tox`` locally. This will run the test suite and also the automated style rule checks just as they will in CI. If CI fails on your change it will not be able to merge. .. _github pull requests: https://help.github.com/articles/about-pull-requests/ Community --------- Besides Github interactions there is also a stestr IRC channel: #stestr on Freenode feel free to join to ask questions, or just discuss stestr. Platform: UNKNOWN Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Software Development :: Testing Classifier: Topic :: Software Development :: Quality Assurance Provides-Extra: sql Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/SOURCES.txt0000644000175000017500000000644600000000000024123 0ustar00computertrekercomputertreker00000000000000.coveragerc .mailmap .stestr.conf .travis.yml AUTHORS CODE_OF_CONDUCT.md CONTRIBUTING.rst ChangeLog LICENSE README.rst README_ja.rst azure-pipelines.yml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini .github/ISSUE_TEMPLATE/bug_report.md .github/ISSUE_TEMPLATE/feature_request.md .travis/coveralls.sh doc/source/CONTRIBUTING.rst doc/source/MANUAL.rst doc/source/README.rst doc/source/README_ja.rst doc/source/api.rst doc/source/conf.py doc/source/developer_guidelines.rst doc/source/index.rst doc/source/internal_arch.rst doc/source/api/config_file.rst doc/source/api/output.rst doc/source/api/scheduler.rst doc/source/api/selection.rst doc/source/api/subunit_trace.rst doc/source/api/test_processor.rst doc/source/api/commands/__init__.rst doc/source/api/commands/failing.rst doc/source/api/commands/init.rst doc/source/api/commands/last.rst doc/source/api/commands/list.rst doc/source/api/commands/load.rst doc/source/api/commands/run.rst doc/source/api/commands/slowest.rst doc/source/api/repository/abstract.rst doc/source/api/repository/file.rst doc/source/api/repository/memory.rst doc/source/api/repository/sql.rst stestr/__init__.py stestr/__main__.py stestr/bisect_tests.py stestr/cli.py stestr/colorizer.py stestr/config_file.py stestr/output.py stestr/results.py stestr/scheduler.py stestr/selection.py stestr/subunit_trace.py stestr/test_processor.py stestr/testlist.py stestr/user_config.py stestr/utils.py stestr/version.py stestr.egg-info/PKG-INFO stestr.egg-info/SOURCES.txt stestr.egg-info/dependency_links.txt stestr.egg-info/entry_points.txt stestr.egg-info/not-zip-safe stestr.egg-info/pbr.json stestr.egg-info/requires.txt stestr.egg-info/top_level.txt stestr/commands/__init__.py stestr/commands/failing.py stestr/commands/init.py stestr/commands/last.py stestr/commands/list.py stestr/commands/load.py stestr/commands/run.py stestr/commands/slowest.py stestr/repository/__init__.py stestr/repository/abstract.py stestr/repository/file.py stestr/repository/memory.py stestr/repository/sql.py stestr/repository/util.py stestr/repository/vcs/__init__.py stestr/repository/vcs/detect.py stestr/repository/vcs/git.py stestr/subunit_runner/__init__.py stestr/subunit_runner/program.py stestr/subunit_runner/run.py stestr/tests/__init__.py stestr/tests/base.py stestr/tests/test_bisect_return_codes.py stestr/tests/test_bisect_tests.py stestr/tests/test_config_file.py stestr/tests/test_load.py stestr/tests/test_output.py stestr/tests/test_return_codes.py stestr/tests/test_run.py stestr/tests/test_scheduler.py stestr/tests/test_selection.py stestr/tests/test_slowest.py stestr/tests/test_subunit_trace.py stestr/tests/test_test_processor.py stestr/tests/test_user_config.py stestr/tests/test_user_config_return_codes.py stestr/tests/test_utils.py stestr/tests/files/__init__.py stestr/tests/files/bisect-fail-serial-tests stestr/tests/files/failing-tests stestr/tests/files/passing-tests stestr/tests/files/setup.cfg stestr/tests/files/stestr.yaml stestr/tests/files/testr-conf stestr/tests/repository/__init__.py stestr/tests/repository/test_file.py stestr/tests/repository/test_sql.py stestr/tests/repository/test_util.py stestr/tests/sample_streams/all_skips.subunit stestr/tests/sample_streams/failure.subunit stestr/tests/sample_streams/successful.subunit tools/build_manpage.sh tools/find_and_rm.py tools/testr_to_stestr.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/dependency_links.txt0000644000175000017500000000000100000000000026273 0ustar00computertrekercomputertreker00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/entry_points.txt0000644000175000017500000000045700000000000025531 0ustar00computertrekercomputertreker00000000000000[console_scripts] stestr = stestr.cli:main [stestr.cm] failing = stestr.commands.failing:Failing init = stestr.commands.init:Init last = stestr.commands.last:Last list = stestr.commands.list:List load = stestr.commands.load:Load run = stestr.commands.run:Run slowest = stestr.commands.slowest:Slowest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/not-zip-safe0000644000175000017500000000000100000000000024453 0ustar00computertrekercomputertreker00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/pbr.json0000644000175000017500000000005600000000000023704 0ustar00computertrekercomputertreker00000000000000{"git_version": "6aeb323", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/requires.txt0000644000175000017500000000043400000000000024626 0ustar00computertrekercomputertreker00000000000000future pbr!=2.1.0,!=4.0.0,!=4.0.1,!=4.0.2,!=4.0.3,>=2.0.0 cliff>=2.8.0 python-subunit>=1.4.0 fixtures>=3.0.0 testtools>=2.2.0 PyYAML>=3.10.0 voluptuous>=0.8.9 [sql] subunit2sql>=1.8.0 [test] hacking<1.2.0,>=1.1.0 sphinx>2.1.0 subunit2sql>=1.8.0 coverage>=4.0 ddt>=1.0.1 doc8>=0.8.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585232236.0 stestr-3.0.0/stestr.egg-info/top_level.txt0000644000175000017500000000000700000000000024754 0ustar00computertrekercomputertreker00000000000000stestr ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/test-requirements.txt0000644000175000017500000000052400000000000023451 0ustar00computertrekercomputertreker00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking<1.2.0,>=1.1.0 sphinx>2.1.0 # BSD subunit2sql>=1.8.0 coverage>=4.0 # Apache-2.0 ddt>=1.0.1 # MIT doc8>=0.8.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585232236.5754259 stestr-3.0.0/tools/0000755000175000017500000000000000000000000020347 5ustar00computertrekercomputertreker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1567975354.0 stestr-3.0.0/tools/build_manpage.sh0000755000175000017500000000013500000000000023474 0ustar00computertrekercomputertreker00000000000000#!/bin/sh mkdir -p doc/build/man tox -evenv -- sphinx-build -b man doc/source doc/build/man ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1518910744.0 stestr-3.0.0/tools/find_and_rm.py0000755000175000017500000000156400000000000023172 0ustar00computertrekercomputertreker00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os def find_and_remove(suffix='.pyc'): for root, dirs, files in os.walk('.'): for file in files: target = os.path.join(root, file) if os.path.isfile(target) and target.endswith(suffix): os.remove(target) if __name__ == '__main__': find_and_remove() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585149294.0 stestr-3.0.0/tools/testr_to_stestr.py0000755000175000017500000000346000000000000024176 0ustar00computertrekercomputertreker00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import configparser import os import sys if not os.path.isfile('.testr.conf'): sys.exit("Testr config file not found") with open('.testr.conf', 'r') as testr_conf_file: config = configparser.ConfigParser() config.readfp(testr_conf_file) test_command = config.get('DEFAULT', 'test_command') group_regex = None if config.has_option('DEFAULT', 'group_regex'): group_regex = config.get('DEFAULT', 'group_regex') top_dir = None test_dir = None for line in test_command.split('\n'): if 'subunit.run discover' in line: command_parts = line.split(' ') top_dir_present = '-t' in line for idx, val in enumerate(command_parts): if top_dir_present: if val == '-t': top_dir = command_parts[idx + 1] test_dir = command_parts[idx + 2] else: if val == 'discover': test_dir = command_parts[idx + 1] with open('.stestr.conf', 'w') as stestr_conf_file: stestr_conf_file.write('[DEFAULT]\n') stestr_conf_file.write('test_path=%s\n' % test_dir) if top_dir: stestr_conf_file.write('top_dir=%s\n' % top_dir) if group_regex: stestr_conf_file.write('group_regex=%s\n' % group_regex) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1584462487.0 stestr-3.0.0/tox.ini0000644000175000017500000000354000000000000020524 0ustar00computertrekercomputertreker00000000000000[tox] minversion = 1.6 envlist = py38,py37,py36,py35,pep8 skipsdist = True [testenv] usedevelop = True install_command = pip install -U --force-reinstall {opts} {packages} setenv = VIRTUAL_ENV={envdir} whitelist_externals = find deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = python tools/find_and_rm.py stestr run {posargs} [testenv:pep8] sitepackages = False commands = flake8 {posargs} [testenv:venv] commands = {posargs} [testenv:cover] setenv = VIRTUAL_ENV={envdir} PYTHON=coverage run --source stestr commands = coverage run stestr/cli.py run {posargs} coverage combine coverage html -d cover [testenv:docs] commands = doc8 -e .rst doc/source CONTRIBUTING.rst README.rst python setup.py build_sphinx [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # H402 skipped because some docstrings aren't sentences # E123 skipped because it is ignored by default in the default pep8 # E129 skipped because it is too limiting when combined with other rules # H305 skipped because it is inconsistent between python versions # E711 skipped because sqlalchemy filter() requires using == instead of is ignore = E125,H402,E123,E129,H305,E711 exclude = .venv,.git,.tox,dist,doc,*egg,build,releasenotes [testenv:pip-check-reqs] # Do not install test-requirements as that will pollute the virtualenv for # determining missing packages. # This also means that pip-check-reqs must be installed separately, outside # of the requirements.txt files deps = pip_check_reqs -r{toxinidir}/requirements.txt commands= # pip-extra-reqs -d --ignore-file=stestr/tests/* stestr pip-missing-reqs -d --ignore-file=stestr/tests/* stestr