././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707085288.4335763 pytest-retry-1.6.2/0000755000076500000240000000000014560006750013426 5ustar00silasjstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1686172164.0 pytest-retry-1.6.2/.gitignore0000644000076500000240000000016114440171004015404 0ustar00silasjstaff*.egg*/ .D* .N* .idea/ .pytest_cache/ .tox/ __pycache__ artifacts/ build/ config.ini credentials.ini dist/ venv/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1686172164.0 pytest-retry-1.6.2/LICENSE0000644000076500000240000000204614440171004014425 0ustar00silasjstaffMIT License Copyright (c) 2022 Silas Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1686172164.0 pytest-retry-1.6.2/MANIFEST.in0000644000076500000240000000013614440171004015154 0ustar00silasjstaffinclude LICENSE include .gitignore include *.txt include tox.ini recursive-include tests *.py ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707085288.4334462 pytest-retry-1.6.2/PKG-INFO0000644000076500000240000002515114560006750014527 0ustar00silasjstaffMetadata-Version: 2.1 Name: pytest-retry Version: 1.6.2 Summary: Adds the ability to retry flaky tests in CI environments Author: str0zzapreti License: MIT License Copyright (c) 2022 Silas Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Project-URL: Homepage, https://github.com/str0zzapreti/pytest-retry Keywords: rerun,pytest,flaky Classifier: License :: OSI Approved :: MIT License Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Framework :: Pytest Requires-Python: >=3.9 Description-Content-Type: text/markdown License-File: LICENSE Requires-Dist: pytest>=7.0.0 Provides-Extra: dev Requires-Dist: black; extra == "dev" Requires-Dist: isort; extra == "dev" Requires-Dist: mypy; extra == "dev" Requires-Dist: flake8; extra == "dev" ![Tests](https://github.com/str0zzapreti/pytest-retry/actions/workflows/tests.yaml/badge.svg) # pytest-retry pytest-retry is a plugin for Pytest which adds the ability to retry flaky tests, thereby improving the consistency of the test suite results. ## Requirements pytest-retry is designed for the latest versions of Python and Pytest. Python 3.9+ and pytest 7.0.0+ are required. ## Installation Use pip to install pytest-retry: ``` $ pip install pytest-retry ``` ## Usage There are two main ways to use pytest-retry: ### 1. Global settings Once installed, pytest-retry adds new command line and ini config options for pytest. Run Pytest with the command line argument --retries in order to retry every test in the event of a failure. The following example will retry each failed up to two times before proceeding to the next test: ``` $ python -m pytest --retries 2 ``` An optional delay can be specified using the --retry-delay argument. This will insert a fixed delay (in seconds) between each attempt when a test fails. This can be useful if the test failures are due to intermittent environment issues which clear up after a few seconds ``` $ python -m pytest --retries 2 --retry-delay 5 ``` #### Advanced Options: There are two custom hooks provided for the purpose of setting global exception filters for your entire Pytest suite. `pytest_set_filtered_exceptions` and `pytest_set_excluded_exceptions`. You can define either of them in your conftest.py file and return a list of exception types. Note: these hooks are mutually exclusive and cannot both be defined at the same time. Example: ``` def pytest_set_excluded_exceptions(): """ All tests will be retried unless they fail due to an AssertionError or CustomError """ return [AssertionError, CustomError] ``` There is a command line option to specify the test timing method, which can either be `overwrite` (default) or `cumulative`. With cumulative timing, the duration of each test attempt is summed for the reported overall test duration. The default behavior simply reports the timing of the final attempt. ``` $ python -m pytest --retries 2 --cumulative-timing 1 ``` If you're not sure which to use, stick with the default `overwrite` method. This generally plays nicer with time-based test splitting algorithms and will result in more even splits. Instead of command line arguments, you can set any of these config options in your pytest.ini, tox.ini, or pyproject.toml file. Any command line arguments will take precedence over options specified in one of these config files. Here are some sample configs that you can copy into your project to get started: _pyproject.toml_ ``` [tool.pytest.ini_options] retries = 2 retry_delay = 0.5 cumulative_timing = false ``` _config.ini/tox.ini_ ``` [pytest] retries = 2 retry_delay = 0.5 cumulative_timing = false ``` ### 2. Pytest flaky mark Mark individual tests as 'flaky' to retry them when they fail. If no command line arguments are passed, only the marked tests will be retried. The default values are 1 retry attempt with a 0-second delay ``` @pytest.mark.flaky def test_unreliable_service(): ... ``` The number of times each test will be retried and/or the delay can be manually specified as well ``` @pytest.mark.flaky(retries=3, delay=1) def test_unreliable_service(): # This test will be retried up to 3 times (4 attempts total) with a # one second delay between each attempt ... ``` If you want to control filtered or excluded exceptions per-test, the flaky mark provides the `only_on` and `exclude` arguments which both take a list of exception types, including any custom types you may have defined for your project. Note that only one of these arguments may be used at a time. A test with a list of `only_on` exceptions will only be retried if it fails with one of the listed exceptions. A test with a list of `exclude` exceptions will only be retried if it fails with an exception which does not match any of the listed exceptions. If the exception for a subsequent attempt changes and no longer matches the filter, no further attempts will be made and the test will immediately fail. ``` @pytest.mark.flaky(retries=2, only_on=[ValueError, IndexError]) def test_unreliable_service(): # This test will only be retried if it fails due to raising a ValueError # or an IndexError. e.g., an AssertionError will fail without retrying ... ``` If you want some other generalized condition to control whether a test is retried, use the `condition` argument. Any statement which results in a bool can be used here to add granularity to your retries. The test will only be retried if `condition` is `True`. Note, there is no matching command line option for `condition`, but if you need to globally apply this type of logic to all of your tests, consider invoking the `pytest_collection_modifyitems` hook. ``` @pytest.mark.flaky(retries=2, condition=sys.platform.startswith('win32')) def test_only_flaky_on_some_systems(): # This test will only be retried if sys.platform.startswith('win32') evaluates to `True` ``` Finally, there is a flaky mark argument for the test timing method, which can either be `overwrite` (default) or `cumulative`. See **Command Line** > **Advanced Options** for more information ``` @pytest.mark.flaky(timing='overwrite') def test_unreliable_service(): ... ``` A flaky mark will override any command line options and exception filter hooks specified when running Pytest. ### Things to consider - **Currently, failing test fixtures are not retried.** In the future, flaky test setup may be retried, although given the undesirability of flaky tests in general, flaky setup should be avoided at all costs. Any failures during teardown will immediately halt further attempts so that they can be addressed immediately. Make sure your teardowns always work reliably regardless of the number of retries when using this plugin - When a flaky test is retried, the plugin runs teardown steps for the test as if it had passed. This is to ensure that any partial state created by the test is cleaned up before the next attempt so that subsequent attempts do not conflict with one another. Class and module fixtures are included in this teardown with the assumption that false test failures should be a rare occurrence and the performance hit from re-running these potentially expensive fixtures is worth it to ensure clean initial test state. With feedback, the option to not re-run class and module fixtures may be added, but in general, these types of fixtures should be avoided for known flaky tests. - Flaky tests are not sustainable. This plugin is designed as an easy short-term solution while a permanent fix is implemented. Use the reports generated by this plugin to identify issues with the tests or testing environment and resolve them. ## Reporting pytest-retry intercepts the standard Pytest report flow in order to retry tests and update the reports as required. When a test is retried at least once, an R is printed to the live test output and the counter of retried tests is incremented by 1. After the test session has completed, an additional report is generated below the standard output which lists all of the tests which were retried, along with the exceptions that occurred during each failed attempt. ``` plugins: retry-1.1.0 collected 1 item test_retry_passes_after_temporary_test_failure.py R. [100%] ======================= the following tests were retried ======================= test_eventually_passes failed on attempt 1! Retrying! Traceback (most recent call last): File "tests/test_example.py", line 4, in test_eventually_passes assert len(a) > 1 AssertionError: assert 1 > 1 + where 1 = len([1]) =========================== end of test retry report =========================== ========================= 1 passed, 1 retried in 0.01s ========================= ``` Tests which have been retried but eventually pass are counted as both retried and passed, and tests which have been retried but eventually fail are counted as both retried and failed. Skipped, xfailed, and xpassed tests are never retried. Three pytest stash keys are available to import from the pytest_retry plugin: `attempts_key`, `outcome_key`, and `duration_key`. These keys are used by the plugin to store the number of attempts each item has undergone, whether the test passed or failed, and the total duration from setup to teardown, respectively. (If any stage of setup, call, or teardown fails, a test is considered failed overall). These stash keys can be used to retrieve these reports for use in your own hooks or plugins. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1704052386.0 pytest-retry-1.6.2/README.md0000644000076500000240000002101414544343242014705 0ustar00silasjstaff![Tests](https://github.com/str0zzapreti/pytest-retry/actions/workflows/tests.yaml/badge.svg) # pytest-retry pytest-retry is a plugin for Pytest which adds the ability to retry flaky tests, thereby improving the consistency of the test suite results. ## Requirements pytest-retry is designed for the latest versions of Python and Pytest. Python 3.9+ and pytest 7.0.0+ are required. ## Installation Use pip to install pytest-retry: ``` $ pip install pytest-retry ``` ## Usage There are two main ways to use pytest-retry: ### 1. Global settings Once installed, pytest-retry adds new command line and ini config options for pytest. Run Pytest with the command line argument --retries in order to retry every test in the event of a failure. The following example will retry each failed up to two times before proceeding to the next test: ``` $ python -m pytest --retries 2 ``` An optional delay can be specified using the --retry-delay argument. This will insert a fixed delay (in seconds) between each attempt when a test fails. This can be useful if the test failures are due to intermittent environment issues which clear up after a few seconds ``` $ python -m pytest --retries 2 --retry-delay 5 ``` #### Advanced Options: There are two custom hooks provided for the purpose of setting global exception filters for your entire Pytest suite. `pytest_set_filtered_exceptions` and `pytest_set_excluded_exceptions`. You can define either of them in your conftest.py file and return a list of exception types. Note: these hooks are mutually exclusive and cannot both be defined at the same time. Example: ``` def pytest_set_excluded_exceptions(): """ All tests will be retried unless they fail due to an AssertionError or CustomError """ return [AssertionError, CustomError] ``` There is a command line option to specify the test timing method, which can either be `overwrite` (default) or `cumulative`. With cumulative timing, the duration of each test attempt is summed for the reported overall test duration. The default behavior simply reports the timing of the final attempt. ``` $ python -m pytest --retries 2 --cumulative-timing 1 ``` If you're not sure which to use, stick with the default `overwrite` method. This generally plays nicer with time-based test splitting algorithms and will result in more even splits. Instead of command line arguments, you can set any of these config options in your pytest.ini, tox.ini, or pyproject.toml file. Any command line arguments will take precedence over options specified in one of these config files. Here are some sample configs that you can copy into your project to get started: _pyproject.toml_ ``` [tool.pytest.ini_options] retries = 2 retry_delay = 0.5 cumulative_timing = false ``` _config.ini/tox.ini_ ``` [pytest] retries = 2 retry_delay = 0.5 cumulative_timing = false ``` ### 2. Pytest flaky mark Mark individual tests as 'flaky' to retry them when they fail. If no command line arguments are passed, only the marked tests will be retried. The default values are 1 retry attempt with a 0-second delay ``` @pytest.mark.flaky def test_unreliable_service(): ... ``` The number of times each test will be retried and/or the delay can be manually specified as well ``` @pytest.mark.flaky(retries=3, delay=1) def test_unreliable_service(): # This test will be retried up to 3 times (4 attempts total) with a # one second delay between each attempt ... ``` If you want to control filtered or excluded exceptions per-test, the flaky mark provides the `only_on` and `exclude` arguments which both take a list of exception types, including any custom types you may have defined for your project. Note that only one of these arguments may be used at a time. A test with a list of `only_on` exceptions will only be retried if it fails with one of the listed exceptions. A test with a list of `exclude` exceptions will only be retried if it fails with an exception which does not match any of the listed exceptions. If the exception for a subsequent attempt changes and no longer matches the filter, no further attempts will be made and the test will immediately fail. ``` @pytest.mark.flaky(retries=2, only_on=[ValueError, IndexError]) def test_unreliable_service(): # This test will only be retried if it fails due to raising a ValueError # or an IndexError. e.g., an AssertionError will fail without retrying ... ``` If you want some other generalized condition to control whether a test is retried, use the `condition` argument. Any statement which results in a bool can be used here to add granularity to your retries. The test will only be retried if `condition` is `True`. Note, there is no matching command line option for `condition`, but if you need to globally apply this type of logic to all of your tests, consider invoking the `pytest_collection_modifyitems` hook. ``` @pytest.mark.flaky(retries=2, condition=sys.platform.startswith('win32')) def test_only_flaky_on_some_systems(): # This test will only be retried if sys.platform.startswith('win32') evaluates to `True` ``` Finally, there is a flaky mark argument for the test timing method, which can either be `overwrite` (default) or `cumulative`. See **Command Line** > **Advanced Options** for more information ``` @pytest.mark.flaky(timing='overwrite') def test_unreliable_service(): ... ``` A flaky mark will override any command line options and exception filter hooks specified when running Pytest. ### Things to consider - **Currently, failing test fixtures are not retried.** In the future, flaky test setup may be retried, although given the undesirability of flaky tests in general, flaky setup should be avoided at all costs. Any failures during teardown will immediately halt further attempts so that they can be addressed immediately. Make sure your teardowns always work reliably regardless of the number of retries when using this plugin - When a flaky test is retried, the plugin runs teardown steps for the test as if it had passed. This is to ensure that any partial state created by the test is cleaned up before the next attempt so that subsequent attempts do not conflict with one another. Class and module fixtures are included in this teardown with the assumption that false test failures should be a rare occurrence and the performance hit from re-running these potentially expensive fixtures is worth it to ensure clean initial test state. With feedback, the option to not re-run class and module fixtures may be added, but in general, these types of fixtures should be avoided for known flaky tests. - Flaky tests are not sustainable. This plugin is designed as an easy short-term solution while a permanent fix is implemented. Use the reports generated by this plugin to identify issues with the tests or testing environment and resolve them. ## Reporting pytest-retry intercepts the standard Pytest report flow in order to retry tests and update the reports as required. When a test is retried at least once, an R is printed to the live test output and the counter of retried tests is incremented by 1. After the test session has completed, an additional report is generated below the standard output which lists all of the tests which were retried, along with the exceptions that occurred during each failed attempt. ``` plugins: retry-1.1.0 collected 1 item test_retry_passes_after_temporary_test_failure.py R. [100%] ======================= the following tests were retried ======================= test_eventually_passes failed on attempt 1! Retrying! Traceback (most recent call last): File "tests/test_example.py", line 4, in test_eventually_passes assert len(a) > 1 AssertionError: assert 1 > 1 + where 1 = len([1]) =========================== end of test retry report =========================== ========================= 1 passed, 1 retried in 0.01s ========================= ``` Tests which have been retried but eventually pass are counted as both retried and passed, and tests which have been retried but eventually fail are counted as both retried and failed. Skipped, xfailed, and xpassed tests are never retried. Three pytest stash keys are available to import from the pytest_retry plugin: `attempts_key`, `outcome_key`, and `duration_key`. These keys are used by the plugin to store the number of attempts each item has undergone, whether the test passed or failed, and the total duration from setup to teardown, respectively. (If any stage of setup, call, or teardown fails, a test is considered failed overall). These stash keys can be used to retrieve these reports for use in your own hooks or plugins. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1686172164.0 pytest-retry-1.6.2/dev-requirements.txt0000644000076500000240000000010414440171004017451 0ustar00silasjstaffpytest==7.1.2 mypy==0.960 isort==5.10.1 flake8==4.0.1 black==22.3.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707084809.0 pytest-retry-1.6.2/pyproject.toml0000644000076500000240000000230414560006011016327 0ustar00silasjstaff[build-system] requires = ["setuptools>=61.0.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "pytest-retry" version = "1.6.2" description = "Adds the ability to retry flaky tests in CI environments" readme = "README.md" authors = [{ name = "str0zzapreti" }] license = { file = "LICENSE" } classifiers = [ "License :: OSI Approved :: MIT License", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3 :: Only", "Framework :: Pytest", ] keywords = ["rerun", "pytest", "flaky"] dependencies = [ "pytest >= 7.0.0", ] requires-python = ">=3.9" [project.optional-dependencies] dev = ["black", "isort", "mypy", "flake8"] [project.urls] Homepage = "https://github.com/str0zzapreti/pytest-retry" [project.entry-points.pytest11] pytest-retry = "pytest_retry.retry_plugin" [tool.pytest.ini_options] addopts = "-p no:pytest-retry" [tool.mypy] python_version = "3.10" cache_dir = "/dev/null" disallow_untyped_defs = true [tool.black] line-length = 99 target-version = ['py310'] fast = true ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707085288.4313314 pytest-retry-1.6.2/pytest_retry/0000755000076500000240000000000014560006750016203 5ustar00silasjstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1686172164.0 pytest-retry-1.6.2/pytest_retry/__init__.py0000644000076500000240000000017514440171004020307 0ustar00silasjstafffrom .retry_plugin import duration_key, outcome_key, attempts_key __all__ = ("duration_key", "outcome_key", "attempts_key") ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1696442616.0 pytest-retry-1.6.2/pytest_retry/configs.py0000644000076500000240000000332714507324370020214 0ustar00silasjstaffimport pytest from typing import Any RETRIES = "RETRIES" RETRY_DELAY = "RETRY_DELAY" CUMULATIVE_TIMING = "CUMULATIVE_TIMING" class UnknownDefaultError(Exception): pass class _Defaults: _DEFAULT_CONFIG = { RETRIES: 1, # A flaky mark with 0 args should default to 1 retry. RETRY_DELAY: 0, CUMULATIVE_TIMING: False, } def __init__(self) -> None: object.__setattr__(self, "_opts", self._DEFAULT_CONFIG.copy()) def __getattr__(self, name: str) -> Any: if name in self._opts: return self._opts[name] raise UnknownDefaultError(f"{name} is not a valid default option!") def __setattr__(self, name: str, value: Any) -> None: raise ValueError("Defaults cannot be overwritten manually! Please use `configure()`") def add(self, name: str, value: Any) -> None: if name in self._opts: raise ValueError(f"{name} is already an existing default!") self._opts[name] = value def load_ini(self, config: pytest.Config) -> None: """ Pytest has separate methods for loading command line args and ini options. All ini values are stored as strings so must be converted to the proper type. """ self._opts[RETRIES] = int(config.getini(RETRIES.lower())) self._opts[RETRY_DELAY] = float(config.getini(RETRY_DELAY.lower())) self._opts[CUMULATIVE_TIMING] = config.getini(CUMULATIVE_TIMING.lower()) def configure(self, config: pytest.Config) -> None: if config.getini("retries"): self.load_ini(config) for key in self._opts: if (val := config.getoption(key.lower())) is not None: self._opts[key] = val Defaults = _Defaults() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1690790841.0 pytest-retry-1.6.2/pytest_retry/hooks.py0000644000076500000240000000224314461665671017716 0ustar00silasjstaffimport pytest @pytest.hookspec(firstresult=True) def pytest_set_filtered_exceptions() -> None: """ Return a collection of exception classes to be used as a filter when retrying tests. This pytest hook is called during setup to gather a collection of exception classes. Only tests that fail with one of the listed exceptions will be retried (individual flaky marks which specify their own exceptions will override this list). Example: # In your conftest.py file: def pytest_set_filtered_exceptions(): return (CustomError, ValueError) """ ... @pytest.hookspec(firstresult=True) def pytest_set_excluded_exceptions() -> None: """ Return a collection of exception classes to be excluded when retrying tests. This pytest hook is called during setup to gather a collection of exception classes. Tests that fail with one of the listed exceptions will NOT be retried (individual flaky marks which specify their own exceptions will override this list). Example: # In your conftest.py file: def pytest_set_filtered_exceptions(): return (CustomError, ValueError) """ ... ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1686172164.0 pytest-retry-1.6.2/pytest_retry/py.typed0000644000076500000240000000000014440171004017660 0ustar00silasjstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707084809.0 pytest-retry-1.6.2/pytest_retry/retry_plugin.py0000644000076500000240000003376314560006011021302 0ustar00silasjstaffimport pytest import bdb from time import sleep from logging import LogRecord from traceback import format_exception from typing import Any, Generator, Optional from collections.abc import Iterable from pytest_retry.configs import Defaults from pytest_retry.server import ReportHandler, OfflineReporter, ReportServer, ClientReporter from _pytest.terminal import TerminalReporter from _pytest.logging import caplog_records_key outcome_key = pytest.StashKey[str]() attempts_key = pytest.StashKey[int]() duration_key = pytest.StashKey[float]() server_port_key = pytest.StashKey[int]() stages = ("setup", "call", "teardown") RETRY = 0 FAIL = 1 EXIT = 2 PASS = 3 class ConfigurationError(Exception): pass class ExceptionFilter: """ Helper class which returns a bool when called based on the filter type (expected or excluded) and whether the exception exists within the list """ def __init__(self, expected_exceptions: Iterable, excluded_exceptions: Iterable): if expected_exceptions and excluded_exceptions: raise ConfigurationError( "filtered_exceptions and excluded_exceptions are exclusive and cannot " "be defined simultaneously." ) self.list_type = bool(expected_exceptions) self.filter = expected_exceptions or excluded_exceptions or [] def __call__(self, exception_type: Optional[type[BaseException]]) -> bool: try: return not self.filter or bool(self.list_type == bool(exception_type in self.filter)) except TypeError: raise ConfigurationError( "Filtered or excluded exceptions must be passed as a collection. If using the " "flaky mark, this means `only_on` or `exclude` args must be a collection too." ) def __bool__(self) -> bool: return bool(self.filter) class RetryManager: """ Stores statistics and reports for flaky tests and fixtures which have failed at least once during the test session and need to be retried """ def __init__(self) -> None: self.reporter: ReportHandler = OfflineReporter() self.trace_limit: Optional[int] = -1 self.node_stats: dict[str, dict] = {} self.messages = ( " failed on attempt {attempt}! Retrying!\n\t", " failed after {attempt} attempts!\n\t", " teardown failed on attempt {attempt}! Exiting immediately!\n\t", " passed on attempt {attempt}!\n\t", ) def log_attempt( self, attempt: int, name: str, exc: Optional[pytest.ExceptionInfo], result: int ) -> None: message = self.messages[result].format(attempt=attempt) formatted_trace = "" if exc: err = (exc.type, exc.value, exc.tb) formatted_trace = ( formatted_trace.join(format_exception(*err, limit=self.trace_limit)) .replace("\n", "\n\t") .rstrip() ) self.reporter.record_attempt([f"\t{name}", message, formatted_trace, "\n\n"]) def build_retry_report(self, terminal_reporter: TerminalReporter) -> None: contents = self.reporter.stream.getvalue() if not contents: return terminal_reporter.write("\n") terminal_reporter.section( "the following tests were retried", sep="=", bold=True, yellow=True ) terminal_reporter.write(contents) terminal_reporter.section("end of test retry report", sep="=", bold=True, yellow=True) terminal_reporter.write("\n") def record_node_stats(self, report: pytest.TestReport) -> None: self.node_stats[report.nodeid]["outcomes"][report.when].append(report.outcome) self.node_stats[report.nodeid]["durations"][report.when].append(report.duration) def simple_outcome(self, item: pytest.Item) -> str: """ Return failed if setup, teardown, or final call outcome is 'failed' Return skipped if test was skipped """ test_outcomes = self.node_stats[item.nodeid]["outcomes"] for outcome in ("skipped", "failed"): if outcome in test_outcomes["setup"]: return outcome if not test_outcomes["call"] or test_outcomes["call"][-1] == "failed": return "failed" # can probably just simplify this to return test_outcomes["teardown"] as a fallthrough if "failed" in test_outcomes["teardown"]: return "failed" return "passed" def simple_duration(self, item: pytest.Item) -> float: """ Return total duration for test summing setup, teardown, and final call """ return sum(self.node_stats[item.nodeid]["durations"][stage][-1] for stage in stages) def sum_attempts(self, item: pytest.Item) -> int: return len(self.node_stats[item.nodeid]["outcomes"]["call"]) retry_manager = RetryManager() def has_interactive_exception(call: pytest.CallInfo) -> bool: if call.excinfo is None: return False if isinstance(call.excinfo.value, bdb.BdbQuit): # Special control flow exception. return False return True def should_handle_retry(call: pytest.CallInfo) -> bool: if call.excinfo is None: return False # if teardown stage, don't retry # may handle fixture setup retries in v2 if requested. For now, this is fine. if call.when in {"setup", "teardown"}: return False # if test was skipped, don't retry if call.excinfo.typename == "Skipped": return False return True @pytest.hookimpl(hookwrapper=True) def pytest_runtest_protocol(item: pytest.Item) -> Optional[object]: retry_manager.node_stats[item.nodeid] = { "outcomes": {k: [] for k in stages}, "durations": {k: [0.0] for k in stages}, } yield item.stash[outcome_key] = retry_manager.simple_outcome(item) item.stash[duration_key] = retry_manager.simple_duration(item) # always overwrite, for now item.stash[attempts_key] = retry_manager.sum_attempts(item) @pytest.hookimpl(hookwrapper=True, tryfirst=True) def pytest_runtest_makereport( item: pytest.Item, call: pytest.CallInfo ) -> Generator[None, pytest.TestReport, None]: outcome = yield original_report: pytest.TestReport = outcome.get_result() retry_manager.record_node_stats(original_report) # Set dynamic outcome for each stage until runtest protocol has completed item.stash[outcome_key] = original_report.outcome if not should_handle_retry(call): return # xfail tests don't raise a Skipped exception if they fail, but are still marked as skipped if original_report.skipped is True: return flake_mark = item.get_closest_marker("flaky") if flake_mark is None: return condition = flake_mark.kwargs.get("condition") if condition is False: return exception_filter = ExceptionFilter( flake_mark.kwargs.get("only_on", []), flake_mark.kwargs.get("exclude", []), ) or ExceptionFilter(Defaults.FILTERED_EXCEPTIONS, Defaults.EXCLUDED_EXCEPTIONS) if not exception_filter(call.excinfo.type): # type: ignore return retries = flake_mark.kwargs.get("retries", Defaults.RETRIES) delay = flake_mark.kwargs.get("delay", Defaults.RETRY_DELAY) cumulative_timing = flake_mark.kwargs.get("cumulative_timing", Defaults.CUMULATIVE_TIMING) attempts = 1 hook = item.ihook while True: # Default teardowns are already excluded, so this must be the `call` stage # Try preliminary teardown using a fake class to ensure every local fixture (i.e. # excluding session) is torn down. Yes, including module and class fixtures t_call = pytest.CallInfo.from_call( lambda: hook.pytest_runtest_teardown( item=item, nextitem=pytest.Class.from_parent(item.session, name="Fakeboi"), ), when="teardown", ) # If teardown fails, break. Flaky teardowns are unacceptable and should raise immediately if t_call.excinfo: item.stash[outcome_key] = "failed" retry_manager.log_attempt( attempt=attempts, name=item.name, exc=t_call.excinfo, result=EXIT ) # Prevents a KeyError when an error during retry teardown causes a redundant teardown empty: dict[str, list[LogRecord]] = {} item.stash[caplog_records_key] = empty break # If teardown passes, send report that the test is being retried if attempts == 1: original_report.outcome = "retried" # type: ignore hook.pytest_runtest_logreport(report=original_report) original_report.outcome = "failed" retry_manager.log_attempt(attempt=attempts, name=item.name, exc=call.excinfo, result=RETRY) sleep(delay) # Calling _initrequest() is required to reset fixtures for a retry. Make public pls? item._initrequest() # type: ignore[attr-defined] pytest.CallInfo.from_call(lambda: hook.pytest_runtest_setup(item=item), when="setup") call = pytest.CallInfo.from_call(lambda: hook.pytest_runtest_call(item=item), when="call") retry_report = pytest.TestReport.from_item_and_call(item, call) retry_manager.record_node_stats(retry_report) # Do the exception interaction step # (may not bother to support this since this is designed for automated runs, not debugging) if has_interactive_exception(call): hook.pytest_exception_interact(node=item, call=call, report=retry_report) attempts += 1 should_keep_retrying = ( not retry_report.passed and attempts <= retries and exception_filter(call.excinfo.type) # type: ignore ) if not should_keep_retrying: original_report.outcome = retry_report.outcome original_report.longrepr = retry_report.longrepr if cumulative_timing is False: original_report.duration = retry_report.duration else: original_report.duration = sum( retry_manager.node_stats[original_report.nodeid]["durations"]["call"] ) retry_manager.log_attempt( attempt=attempts, name=item.name, exc=call.excinfo, result=FAIL if retry_report.failed else PASS, ) break def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: retry_manager.build_retry_report(terminalreporter) def pytest_report_teststatus( report: pytest.TestReport, ) -> Optional[tuple[str, str, tuple[str, dict]]]: if report.outcome == "retried": return "retried", "R", ("RETRY", {"yellow": True}) return None class XdistHook: @staticmethod def pytest_configure_node(node: Any) -> None: # Xdist WorkerController instance # Tells each worker node which port was randomly assigned to the retry server node.workerinput["server_port"] = node.config.stash[server_port_key] def pytest_configure(config: pytest.Config) -> None: config.addinivalue_line( "markers", "flaky(retries=1, delay=0, only_on=..., exclude=..., condition=...): indicate a flaky " "test which will be retried the number of times specified with an (optional) specified " "delay between each attempt. Collections of one or more exceptions can be passed so " "that the test is retried only on those exceptions, or excluding those exceptions. " "Any statement which returns a bool can be used as a condition", ) if config.getoption("verbose"): # if pytest config has -v enabled, then don't limit traceback length retry_manager.trace_limit = None Defaults.configure(config) Defaults.add("FILTERED_EXCEPTIONS", config.hook.pytest_set_filtered_exceptions() or []) Defaults.add("EXCLUDED_EXCEPTIONS", config.hook.pytest_set_excluded_exceptions() or []) if config.pluginmanager.has_plugin("xdist") and config.getoption("numprocesses", False): config.pluginmanager.register(XdistHook()) retry_manager.reporter = ReportServer() config.stash[server_port_key] = retry_manager.reporter.initialize_server() elif hasattr(config, "workerinput"): # pytest-xdist doesn't use the config stash, so have to ignore a type error here retry_manager.reporter = ClientReporter(config.workerinput["server_port"]) # type: ignore RETRIES_HELP_TEXT = "number of times to retry failed tests. Defaults to 0." DELAY_HELP_TEXT = "configure a delay (in seconds) between retries." TIMING_HELP_TEXT = "if True, retry duration will be included in overall reported test duration" def pytest_addoption(parser: pytest.Parser) -> None: group = parser.getgroup( "pytest-retry", "retry flaky tests to compensate for intermittent failures" ) group.addoption( "--retries", action="store", dest="retries", type=int, help=RETRIES_HELP_TEXT, ) group.addoption( "--retry-delay", action="store", dest="retry_delay", type=float, help=DELAY_HELP_TEXT, ) group.addoption( "--cumulative-timing", action="store", dest="cumulative_timing", type=bool, help=TIMING_HELP_TEXT, ) parser.addini("retries", RETRIES_HELP_TEXT, default=0, type="string") parser.addini("retry_delay", DELAY_HELP_TEXT, default=0, type="string") parser.addini("cumulative_timing", TIMING_HELP_TEXT, default=False, type="bool") def pytest_addhooks(pluginmanager: pytest.PytestPluginManager) -> None: """This example assumes the hooks are grouped in the 'sample_hook' module.""" from pytest_retry import hooks pluginmanager.add_hookspecs(hooks) def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None: if not (config.getoption("--retries") or config.getini("retries")): return flaky = pytest.mark.flaky(retries=Defaults.RETRIES) for item in items: if "flaky" not in item.keywords: item.add_marker(flaky) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1704401909.0 pytest-retry-1.6.2/pytest_retry/server.py0000644000076500000240000000360614545615765020107 0ustar00silasjstaffimport socket import threading from io import StringIO from _pytest.terminal import TerminalReporter class ReportHandler: def __init__(self) -> None: self.stream = StringIO() def build_retry_report(self, terminalreporter: TerminalReporter) -> None: pass def record_attempt(self, lines: list[str]) -> None: pass class OfflineReporter(ReportHandler): def __init__(self) -> None: super().__init__() def record_attempt(self, lines: list[str]) -> None: self.stream.writelines(lines) class ReportServer(ReportHandler): def __init__(self) -> None: super().__init__() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setblocking(True) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) def initialize_server(self) -> int: self.sock.bind(("localhost", 0)) t = threading.Thread(target=self.run_server, daemon=True) t.start() return self.sock.getsockname()[-1] def run_server(self) -> None: self.sock.listen() while True: conn, _ = self.sock.accept() while True: chunk = conn.recv(128) if not chunk: break self.stream.write(chunk.decode("utf-8")) class ClientReporter(ReportHandler): def __init__(self, port: int) -> None: super().__init__() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setblocking(True) self.sock.connect(("localhost", port)) def record_attempt(self, lines: list[str]) -> None: self.stream.writelines(lines) # Group reports for each item together before sending and resetting stream if not lines[1].endswith("Retrying!\n\t"): self.sock.sendall(self.stream.getvalue().encode("utf-8")) self.stream = StringIO() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707085288.4329805 pytest-retry-1.6.2/pytest_retry.egg-info/0000755000076500000240000000000014560006750017675 5ustar00silasjstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707085288.0 pytest-retry-1.6.2/pytest_retry.egg-info/PKG-INFO0000644000076500000240000002515114560006750020776 0ustar00silasjstaffMetadata-Version: 2.1 Name: pytest-retry Version: 1.6.2 Summary: Adds the ability to retry flaky tests in CI environments Author: str0zzapreti License: MIT License Copyright (c) 2022 Silas Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Project-URL: Homepage, https://github.com/str0zzapreti/pytest-retry Keywords: rerun,pytest,flaky Classifier: License :: OSI Approved :: MIT License Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Framework :: Pytest Requires-Python: >=3.9 Description-Content-Type: text/markdown License-File: LICENSE Requires-Dist: pytest>=7.0.0 Provides-Extra: dev Requires-Dist: black; extra == "dev" Requires-Dist: isort; extra == "dev" Requires-Dist: mypy; extra == "dev" Requires-Dist: flake8; extra == "dev" ![Tests](https://github.com/str0zzapreti/pytest-retry/actions/workflows/tests.yaml/badge.svg) # pytest-retry pytest-retry is a plugin for Pytest which adds the ability to retry flaky tests, thereby improving the consistency of the test suite results. ## Requirements pytest-retry is designed for the latest versions of Python and Pytest. Python 3.9+ and pytest 7.0.0+ are required. ## Installation Use pip to install pytest-retry: ``` $ pip install pytest-retry ``` ## Usage There are two main ways to use pytest-retry: ### 1. Global settings Once installed, pytest-retry adds new command line and ini config options for pytest. Run Pytest with the command line argument --retries in order to retry every test in the event of a failure. The following example will retry each failed up to two times before proceeding to the next test: ``` $ python -m pytest --retries 2 ``` An optional delay can be specified using the --retry-delay argument. This will insert a fixed delay (in seconds) between each attempt when a test fails. This can be useful if the test failures are due to intermittent environment issues which clear up after a few seconds ``` $ python -m pytest --retries 2 --retry-delay 5 ``` #### Advanced Options: There are two custom hooks provided for the purpose of setting global exception filters for your entire Pytest suite. `pytest_set_filtered_exceptions` and `pytest_set_excluded_exceptions`. You can define either of them in your conftest.py file and return a list of exception types. Note: these hooks are mutually exclusive and cannot both be defined at the same time. Example: ``` def pytest_set_excluded_exceptions(): """ All tests will be retried unless they fail due to an AssertionError or CustomError """ return [AssertionError, CustomError] ``` There is a command line option to specify the test timing method, which can either be `overwrite` (default) or `cumulative`. With cumulative timing, the duration of each test attempt is summed for the reported overall test duration. The default behavior simply reports the timing of the final attempt. ``` $ python -m pytest --retries 2 --cumulative-timing 1 ``` If you're not sure which to use, stick with the default `overwrite` method. This generally plays nicer with time-based test splitting algorithms and will result in more even splits. Instead of command line arguments, you can set any of these config options in your pytest.ini, tox.ini, or pyproject.toml file. Any command line arguments will take precedence over options specified in one of these config files. Here are some sample configs that you can copy into your project to get started: _pyproject.toml_ ``` [tool.pytest.ini_options] retries = 2 retry_delay = 0.5 cumulative_timing = false ``` _config.ini/tox.ini_ ``` [pytest] retries = 2 retry_delay = 0.5 cumulative_timing = false ``` ### 2. Pytest flaky mark Mark individual tests as 'flaky' to retry them when they fail. If no command line arguments are passed, only the marked tests will be retried. The default values are 1 retry attempt with a 0-second delay ``` @pytest.mark.flaky def test_unreliable_service(): ... ``` The number of times each test will be retried and/or the delay can be manually specified as well ``` @pytest.mark.flaky(retries=3, delay=1) def test_unreliable_service(): # This test will be retried up to 3 times (4 attempts total) with a # one second delay between each attempt ... ``` If you want to control filtered or excluded exceptions per-test, the flaky mark provides the `only_on` and `exclude` arguments which both take a list of exception types, including any custom types you may have defined for your project. Note that only one of these arguments may be used at a time. A test with a list of `only_on` exceptions will only be retried if it fails with one of the listed exceptions. A test with a list of `exclude` exceptions will only be retried if it fails with an exception which does not match any of the listed exceptions. If the exception for a subsequent attempt changes and no longer matches the filter, no further attempts will be made and the test will immediately fail. ``` @pytest.mark.flaky(retries=2, only_on=[ValueError, IndexError]) def test_unreliable_service(): # This test will only be retried if it fails due to raising a ValueError # or an IndexError. e.g., an AssertionError will fail without retrying ... ``` If you want some other generalized condition to control whether a test is retried, use the `condition` argument. Any statement which results in a bool can be used here to add granularity to your retries. The test will only be retried if `condition` is `True`. Note, there is no matching command line option for `condition`, but if you need to globally apply this type of logic to all of your tests, consider invoking the `pytest_collection_modifyitems` hook. ``` @pytest.mark.flaky(retries=2, condition=sys.platform.startswith('win32')) def test_only_flaky_on_some_systems(): # This test will only be retried if sys.platform.startswith('win32') evaluates to `True` ``` Finally, there is a flaky mark argument for the test timing method, which can either be `overwrite` (default) or `cumulative`. See **Command Line** > **Advanced Options** for more information ``` @pytest.mark.flaky(timing='overwrite') def test_unreliable_service(): ... ``` A flaky mark will override any command line options and exception filter hooks specified when running Pytest. ### Things to consider - **Currently, failing test fixtures are not retried.** In the future, flaky test setup may be retried, although given the undesirability of flaky tests in general, flaky setup should be avoided at all costs. Any failures during teardown will immediately halt further attempts so that they can be addressed immediately. Make sure your teardowns always work reliably regardless of the number of retries when using this plugin - When a flaky test is retried, the plugin runs teardown steps for the test as if it had passed. This is to ensure that any partial state created by the test is cleaned up before the next attempt so that subsequent attempts do not conflict with one another. Class and module fixtures are included in this teardown with the assumption that false test failures should be a rare occurrence and the performance hit from re-running these potentially expensive fixtures is worth it to ensure clean initial test state. With feedback, the option to not re-run class and module fixtures may be added, but in general, these types of fixtures should be avoided for known flaky tests. - Flaky tests are not sustainable. This plugin is designed as an easy short-term solution while a permanent fix is implemented. Use the reports generated by this plugin to identify issues with the tests or testing environment and resolve them. ## Reporting pytest-retry intercepts the standard Pytest report flow in order to retry tests and update the reports as required. When a test is retried at least once, an R is printed to the live test output and the counter of retried tests is incremented by 1. After the test session has completed, an additional report is generated below the standard output which lists all of the tests which were retried, along with the exceptions that occurred during each failed attempt. ``` plugins: retry-1.1.0 collected 1 item test_retry_passes_after_temporary_test_failure.py R. [100%] ======================= the following tests were retried ======================= test_eventually_passes failed on attempt 1! Retrying! Traceback (most recent call last): File "tests/test_example.py", line 4, in test_eventually_passes assert len(a) > 1 AssertionError: assert 1 > 1 + where 1 = len([1]) =========================== end of test retry report =========================== ========================= 1 passed, 1 retried in 0.01s ========================= ``` Tests which have been retried but eventually pass are counted as both retried and passed, and tests which have been retried but eventually fail are counted as both retried and failed. Skipped, xfailed, and xpassed tests are never retried. Three pytest stash keys are available to import from the pytest_retry plugin: `attempts_key`, `outcome_key`, and `duration_key`. These keys are used by the plugin to store the number of attempts each item has undergone, whether the test passed or failed, and the total duration from setup to teardown, respectively. (If any stage of setup, call, or teardown fails, a test is considered failed overall). These stash keys can be used to retrieve these reports for use in your own hooks or plugins. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707085288.0 pytest-retry-1.6.2/pytest_retry.egg-info/SOURCES.txt0000644000076500000240000000103014560006750021553 0ustar00silasjstaff.gitignore LICENSE MANIFEST.in README.md dev-requirements.txt pyproject.toml requirements.txt setup.cfg tox.ini pytest_retry/__init__.py pytest_retry/configs.py pytest_retry/hooks.py pytest_retry/py.typed pytest_retry/retry_plugin.py pytest_retry/server.py pytest_retry.egg-info/PKG-INFO pytest_retry.egg-info/SOURCES.txt pytest_retry.egg-info/dependency_links.txt pytest_retry.egg-info/entry_points.txt pytest_retry.egg-info/not-zip-safe pytest_retry.egg-info/requires.txt pytest_retry.egg-info/top_level.txt tests/test_retry_plugin.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707085288.0 pytest-retry-1.6.2/pytest_retry.egg-info/dependency_links.txt0000644000076500000240000000000114560006750023743 0ustar00silasjstaff ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707085288.0 pytest-retry-1.6.2/pytest_retry.egg-info/entry_points.txt0000644000076500000240000000006414560006750023173 0ustar00silasjstaff[pytest11] pytest-retry = pytest_retry.retry_plugin ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707085288.0 pytest-retry-1.6.2/pytest_retry.egg-info/not-zip-safe0000644000076500000240000000000114560006750022123 0ustar00silasjstaff ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707085288.0 pytest-retry-1.6.2/pytest_retry.egg-info/requires.txt0000644000076500000240000000005514560006750022275 0ustar00silasjstaffpytest>=7.0.0 [dev] black isort mypy flake8 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707085288.0 pytest-retry-1.6.2/pytest_retry.egg-info/top_level.txt0000644000076500000240000000001514560006750022423 0ustar00silasjstaffpytest_retry ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1704052386.0 pytest-retry-1.6.2/requirements.txt0000644000076500000240000000005014544343242016707 0ustar00silasjstaffpytest>=7.0.0 black>=23.3.0 mypy>=1.3.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707085288.4338274 pytest-retry-1.6.2/setup.cfg0000644000076500000240000000045514560006750015253 0ustar00silasjstaff[metadata] long_description = file: README.md long_description_content_type = text/markdown [options] zip_safe = no packages = pytest_retry [options.package_data] pytest_retry = py.typed [flake8] max-line-length = 99 extend-ignore = E203 exclude = venv,.venv [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1707085288.4325538 pytest-retry-1.6.2/tests/0000755000076500000240000000000014560006750014570 5ustar00silasjstaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1707084809.0 pytest-retry-1.6.2/tests/test_retry_plugin.py0000644000076500000240000005574714560006011020734 0ustar00silasjstafffrom pytest import mark try: from xdist import __version__ # noqa: F401 xdist_installed = True except ImportError: xdist_installed = False pytest_plugins = ["pytester"] def check_outcome_field(outcomes, field_name, expected_value): field_value = outcomes.get(field_name, 0) assert field_value == expected_value, ( f"outcomes.{field_name} has unexpected value. " f"Expected '{expected_value}' but got '{field_value}'" ) def assert_outcomes( result, passed=1, skipped=0, failed=0, errors=0, xfailed=0, xpassed=0, retried=0, ): outcomes = result.parseoutcomes() check_outcome_field(outcomes, "passed", passed) check_outcome_field(outcomes, "skipped", skipped) check_outcome_field(outcomes, "failed", failed) check_outcome_field(outcomes, "errors", errors) check_outcome_field(outcomes, "xfailed", xfailed) check_outcome_field(outcomes, "xpassed", xpassed) check_outcome_field(outcomes, "retried", retried) def test_no_retry_on_pass(testdir): testdir.makepyfile("def test_success(): assert 1 == 1") result = testdir.runpytest("--retries", "1") assert_outcomes(result) def test_no_retry_on_fail_without_plugin(testdir): testdir.makepyfile("def test_failure(): assert False") result = testdir.runpytest() assert_outcomes(result, passed=0, failed=1, retried=0) def test_no_retry_on_skip_mark(testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip(reason="do not run me") def test_skip(): assert 1 == 1 """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, skipped=1) def test_no_retry_on_skip_call(testdir): testdir.makepyfile( """ import pytest def test_skip(): pytest.skip(reason="Don't test me") """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, skipped=1) def test_no_retry_on_xfail_mark(testdir): testdir.makepyfile( """ import pytest @pytest.mark.xfail() def test_xfail(): assert False """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, xfailed=1, failed=0) def test_no_retry_on_xpass(testdir): testdir.makepyfile( """ import pytest @pytest.mark.xfail() def test_xpass(): assert 1 == 1 """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, xpassed=1, failed=0) def test_no_retry_on_strict_xpass(testdir): testdir.makepyfile( """ import pytest @pytest.mark.xfail(strict=True) def test_xpass(): assert 1 == 1 """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, xpassed=0, failed=1) def test_retry_fails_after_consistent_setup_failure(testdir): testdir.makepyfile("def test_pass(): pass") testdir.makeconftest( """ def pytest_runtest_setup(item): raise Exception("Setup failure") """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, errors=1, retried=0) @mark.skip(reason="Not worrying about setup failures for now, maybe later") def test_retry_passes_after_temporary_setup_failure(testdir): testdir.makepyfile("def test_pass(): pass") testdir.makeconftest( """ a = [] def pytest_runtest_setup(item): a.append(1) if len(a) < 2: raise ValueError("Setup failed!") """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=1, retried=1) def test_retry_exits_immediately_on_teardown_failure(testdir): testdir.makepyfile( """ import pytest @pytest.fixture() def bad_teardown(): yield raise ValueError a = [] def test_eventually_passes(bad_teardown): a.append(1) assert len(a) > 1 """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, failed=1, retried=0) def test_retry_fails_after_consistent_test_failure(testdir): testdir.makepyfile("def test_fail(): assert False") result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, failed=1, retried=1) def test_retry_passes_after_temporary_test_failure(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) assert len(a) > 1 """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=1, retried=1) def test_retry_passes_after_temporary_test_failure_with_flaky_mark(testdir): testdir.makepyfile( """ import pytest a = [] @pytest.mark.flaky(retries=2) def test_eventually_passes(): a.append(1) assert len(a) > 2 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1, retried=1) def test_retries_if_flaky_mark_is_applied_without_options(testdir): testdir.makepyfile( """ import pytest a = [] @pytest.mark.flaky() def test_eventually_passes(): a.append(1) assert len(a) > 1 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1, retried=1) def test_fixtures_are_retried_with_test(testdir): testdir.makepyfile( """ import pytest a = [] setup = [] teardown = [] @pytest.fixture() def basic_setup_and_teardown(): setup.append(True) yield teardown.append(True) @pytest.mark.flaky(retries=2) def test_eventually_passes(basic_setup_and_teardown): a.append(1) assert len(a) > 2 def test_setup_and_teardown_reran(): assert len(setup) == 3 assert len(teardown) == 3 """ ) result = testdir.runpytest() assert_outcomes(result, passed=2, failed=0, retried=1) def test_retry_executes_class_scoped_fixture(testdir): testdir.makepyfile( """ import pytest a = [] setup = [] teardown = [] @pytest.fixture(scope="class") def basic_setup_and_teardown(): setup.append(True) yield teardown.append(True) @pytest.mark.usefixtures("basic_setup_and_teardown") class TestClassFixtures: @pytest.mark.flaky(retries=2) def test_eventually_passes(self): a.append(1) assert len(a) > 2 def test_setup_and_teardown_reran(): assert len(setup) == 3 assert len(teardown) == 3 """ ) result = testdir.runpytest() assert_outcomes(result, passed=2, failed=0, retried=1) def test_retry_executes_module_scoped_fixture(testdir): testdir.makepyfile( """ import pytest a = [] setup = [] teardown = [] @pytest.fixture(scope="module") def basic_setup_and_teardown(): setup.append(True) yield teardown.append(True) @pytest.mark.flaky(retries=2) def test_eventually_passes(basic_setup_and_teardown): a.append(1) assert len(a) > 2 def test_setup_and_teardown_reran(): assert len(setup) == 3 assert len(teardown) == 2 """ ) result = testdir.runpytest() assert_outcomes(result, passed=2, failed=0, retried=1) def test_retry_fails_if_temporary_failures_exceed_retry_limit(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) assert len(a) > 3 """ ) result = testdir.runpytest("--retries", "2") assert_outcomes(result, passed=0, failed=1, retried=1) def test_retry_delay_from_mark_between_attempts(testdir): testdir.makepyfile( """ import pytest a = [] @pytest.mark.flaky(retries=2, delay=2) def test_eventually_passes(): a.append(1) assert len(a) > 2 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1, retried=1) assert result.duration > 4 def test_retry_delay_from_command_line_between_attempts(testdir): testdir.makepyfile( """ import pytest a = [] def test_eventually_passes(): a.append(1) assert len(a) > 2 """ ) result = testdir.runpytest("--retries", "2", "--retry-delay", "0.2") assert_outcomes(result, passed=1, retried=1) assert result.duration > 0.4 assert result.duration < 0.8 def test_passing_outcome_is_available_from_item_stash(testdir): testdir.makepyfile("def test_success(): assert 1 == 1") testdir.makeconftest( """ import pytest from pytest_retry import outcome_key @pytest.fixture(autouse=True) def report_check(request): yield assert request.node.stash[outcome_key] == "passed" """ ) result = testdir.runpytest() assert_outcomes(result, passed=1) def test_failed_outcome_is_available_from_item_stash(testdir): testdir.makepyfile("def test_success(): assert 1 == 2") testdir.makeconftest( """ import pytest from pytest_retry import outcome_key @pytest.fixture(autouse=True) def report_check(request): yield assert request.node.stash[outcome_key] == "failed" """ ) result = testdir.runpytest() assert_outcomes(result, passed=0, failed=1) def test_skipped_outcome_is_available_from_item_stash(testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip def test_success(): assert 1 == 2 """ ) testdir.makeconftest( """ import pytest from pytest_retry import outcome_key, attempts_key, duration_key def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[outcome_key] == "skipped" assert item.stash[attempts_key] == 0 assert item.stash[duration_key] < 0.1 """ ) result = testdir.runpytest() assert_outcomes(result, passed=0, skipped=1) def test_duration_is_available_from_item_stash(testdir): testdir.makepyfile("""def test_success(): assert 1 == 1""") testdir.makeconftest( """ import pytest from pytest_retry import duration_key def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[duration_key] > 0 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1) def test_failed_outcome_after_successful_teardown(testdir): testdir.makepyfile("def test_success(): assert 1 == 2") testdir.makeconftest( """ import pytest from pytest_retry import outcome_key @pytest.fixture(autouse=True) def successful_teardown(request): yield assert 1 == 1 def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[outcome_key] == "failed" """ ) result = testdir.runpytest() assert_outcomes(result, passed=0, failed=1) def test_failed_outcome_after_unsuccessful_setup(testdir): testdir.makepyfile("def test_success(): assert 1 == 1") testdir.makeconftest( """ import pytest from pytest_retry import outcome_key @pytest.fixture(autouse=True) def failed_setup(request): assert 1 == 2 def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[outcome_key] == "failed" """ ) result = testdir.runpytest() assert_outcomes(result, passed=0, errors=1) def test_failed_outcome_after_unsuccessful_teardown(testdir): testdir.makepyfile("def test_success(): assert 1 == 1") testdir.makeconftest( """ import pytest from pytest_retry import outcome_key @pytest.fixture(autouse=True) def failed_teardown(request): yield assert 1 == 2 def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[outcome_key] == "failed" """ ) result = testdir.runpytest() assert_outcomes(result, passed=1, errors=1) def test_attempts_are_always_available_from_item_stash(testdir): testdir.makepyfile("def test_success(): assert 1 == 1") testdir.makeconftest( """ import pytest from pytest_retry import attempts_key def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[attempts_key] == 1 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1) def test_global_filtered_exception_is_retried(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) if not len(a) > 1: raise AssertionError """ ) testdir.makeconftest( """ import pytest def pytest_set_filtered_exceptions(): return [AssertionError] """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=1, retried=1) def test_temporary_filtered_exception_fails_when_attempts_exceeded(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) if not len(a) > 4: raise IndexError """ ) testdir.makeconftest( """ import pytest def pytest_set_filtered_exceptions(): return [IndexError] """ ) result = testdir.runpytest("--retries", "3") assert_outcomes(result, passed=0, failed=1, retried=1) def test_temporary_exception_is_not_retried_if_filter_not_matched(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) if not len(a) > 1: raise ValueError """ ) testdir.makeconftest( """ import pytest def pytest_set_filtered_exceptions(): return [IndexError] """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, failed=1, retried=0) def test_temporary_exception_is_retried_if_not_globally_excluded(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) if not len(a) > 1: raise ValueError """ ) testdir.makeconftest( """ import pytest def pytest_set_excluded_exceptions(): return [AssertionError] """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=1, retried=1) def test_temporary_exception_fails_if_not_excluded_and_attempts_exceeded(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) if not len(a) > 4: raise ValueError """ ) testdir.makeconftest( """ import pytest def pytest_set_excluded_exceptions(): return [AssertionError] """ ) result = testdir.runpytest("--retries", "3") assert_outcomes(result, passed=0, failed=1, retried=1) def test_temporary_exception_is_not_retried_if_excluded(testdir): testdir.makepyfile( """ a = [] def test_eventually_passes(): a.append(1) if not len(a) > 1: raise ValueError """ ) testdir.makeconftest( """ import pytest def pytest_set_excluded_exceptions(): return [ValueError] """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, failed=1, retried=0) def test_flaky_mark_exception_filter_param_overrides_global_filter(testdir): testdir.makepyfile( """ import pytest a = [] @pytest.mark.flaky(only_on=[IndexError]) def test_eventually_passes(): a.append(1) if not len(a) > 1: raise ValueError """ ) testdir.makeconftest( """ import pytest def pytest_set_excluded_exceptions(): return [IndexError] """ ) result = testdir.runpytest("--retries", "1") assert_outcomes(result, passed=0, failed=1, retried=0) def test_attempt_count_is_correct(testdir): testdir.makepyfile( """ import pytest a = [] @pytest.mark.flaky(retries=2) def test_eventually_passes(): a.append(1) assert len(a) > 2 """ ) testdir.makeconftest( """ import pytest from pytest_retry import attempts_key def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[attempts_key] == 3 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1, retried=1) def test_flaky_mark_overrides_command_line_options(testdir): testdir.makepyfile( """ import pytest a = [] b = [] @pytest.mark.flaky(retries=3, delay=0) def test_flaky_mark_options(): a.append(1) assert len(a) > 3 def test_default_commandline_options(): b.append(1) assert len(b) > 3 """ ) testdir.makeconftest( """ import pytest from pytest_retry import attempts_key def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: if item.name == "test_flaky_mark_options": assert item.stash[attempts_key] == 4 if item.name == "test_default_commandline_options": assert item.stash[attempts_key] == 3 """ ) result = testdir.runpytest("--retries", "2", "--retry-delay", "1") assert_outcomes(result, passed=1, failed=1, retried=2) assert result.duration > 2 assert result.duration < 3 def test_configuration_by_ini_file(testdir): testdir.makeini( """ [pytest] retries = 2 retry_delay = 0.5 cumulative_timing = true """ ) testdir.makepyfile( """ from time import sleep a = [] def test_ini_settings(): sleep(2 - len(a)) a.append(1) assert len(a) > 2 """ ) testdir.makeconftest( """ import pytest from pytest_retry import attempts_key def pytest_sessionfinish(session: pytest.Session) -> None: for item in session.items: assert item.stash[attempts_key] == 3 def pytest_report_teststatus(report: pytest.TestReport): if report.when == "call" and report.outcome != "retried": assert report.duration > 3 assert report.duration < 4 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1, retried=1) def test_configuration_by_pyproject_toml_file(testdir): testdir.makepyprojecttoml( """ [tool.pytest.ini_options] retries = 1 retry_delay = 0.3 """ ) testdir.makepyfile( """ def test_toml_settings(): assert False """ ) result = testdir.runpytest() assert_outcomes(result, passed=0, failed=1, retried=1) assert result.duration > 0.3 assert result.duration < 0.7 def test_duration_in_overwrite_timings_mode(testdir): testdir.makepyfile( """ import pytest from time import sleep a = [] @pytest.mark.flaky(retries=2) def test_eventually_passes(): sleep(1.5 - len(a)) a.append(1) assert len(a) > 1 """ ) testdir.makeconftest( """ import pytest from pytest_retry import attempts_key def pytest_report_teststatus(report: pytest.TestReport): if report.when == "call" and report.outcome != "retried": assert report.duration < 0.7 """ ) result = testdir.runpytest() assert_outcomes(result, passed=1, retried=1) def test_duration_in_cumulative_timings_mode(testdir): testdir.makepyfile( """ import pytest from time import sleep a = [] def test_eventually_passes(): sleep(2 - len(a)) a.append(1) assert len(a) > 1 """ ) testdir.makeconftest( """ import pytest def pytest_report_teststatus(report: pytest.TestReport): if report.when == "call" and report.outcome != "retried": assert report.duration > 3 """ ) result = testdir.runpytest("--retries", "2", "--cumulative-timing", "1") assert_outcomes(result, passed=1, retried=1) def test_conditional_flaky_marks_evaluate_correctly(testdir): testdir.makepyfile( """ import pytest a = [] b = [] c = [] @pytest.mark.flaky(retries=2, condition=True) def test_eventually_passes(): a.append(1) assert len(a) > 2 @pytest.mark.flaky(retries=2, condition=True) def test_eventually_passes_again(): b.append(1) assert len(b) > 2 @pytest.mark.flaky(retries=2, condition=False) def test_eventually_passes_once_more(): c.append(1) assert len(c) > 2 """ ) result = testdir.runpytest() assert_outcomes(result, passed=2, failed=1, retried=2) @mark.skipif(xdist_installed is False, reason="Only run if xdist is installed locally") def test_xdist_reporting_compatability(testdir): testdir.makepyfile( """ import pytest a = 0 b = 0 def test_flaky() -> None: global a a += 1 assert a == 3 def test_moar_flaky() -> None: global b b += 1 assert b == 2 """ ) result = testdir.runpytest("-n", "2", "--retries", "3") assert "\ttest_flaky failed on attempt 1! Retrying!" in result.outlines assert "\ttest_flaky failed on attempt 2! Retrying!" in result.outlines assert "\ttest_flaky passed on attempt 3!" in result.outlines assert "\ttest_moar_flaky failed on attempt 1! Retrying!" in result.outlines assert "\ttest_moar_flaky passed on attempt 2!" in result.outlines ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1696442616.0 pytest-retry-1.6.2/tox.ini0000644000076500000240000000105214507324370014741 0ustar00silasjstaff[tox] minversion = 3.9.0 envlist = py39, py310, py311, flake8, mypy isolated_build = true toxworkdir = {toxinidir}/../.tox [gh-actions] python = 3.9: py39 3.10: py310, mypy, flake8 3.11: py311 [testenv] setenv = PYTHONPATH = {toxinidir} deps = -r{toxinidir}/dev-requirements.txt commands = pytest --basetemp={envtmpdir} [testenv:flake8] basepython = python3.10 deps = flake8 commands = flake8 pytest_retry tests [testenv:mypy] basepython = python3.10 deps = -r{toxinidir}/dev-requirements.txt commands = mypy pytest_retry