pax_global_header00006660000000000000000000000064145756425770014540gustar00rootroot0000000000000052 comment=977bf615aefe58e39b907ed34e0ae9eff8cf4738 pytest-bdd-7.1.2/000077500000000000000000000000001457564257700136265ustar00rootroot00000000000000pytest-bdd-7.1.2/.editorconfig000066400000000000000000000004621457564257700163050ustar00rootroot00000000000000# EditorConfig is awesome: https://EditorConfig.org # top-most EditorConfig file root = true # Unix-style newlines with a newline ending every file [*] end_of_line = lf insert_final_newline = true charset = utf-8 [*.py] indent_style = space indent_size = 4 [*.yml] indent_style = space indent_size = 2 pytest-bdd-7.1.2/.github/000077500000000000000000000000001457564257700151665ustar00rootroot00000000000000pytest-bdd-7.1.2/.github/workflows/000077500000000000000000000000001457564257700172235ustar00rootroot00000000000000pytest-bdd-7.1.2/.github/workflows/main.yml000066400000000000000000000050231457564257700206720ustar00rootroot00000000000000name: Main testing workflow on: push: pull_request: workflow_dispatch: jobs: test-run: runs-on: ubuntu-latest strategy: matrix: include: - python-version: "3.8" toxfactor: py3.8 ignore-typecheck-outcome: true ignore-test-outcome: false - python-version: "3.9" toxfactor: py3.9 ignore-typecheck-outcome: true ignore-test-outcome: false - python-version: "3.10" toxfactor: py3.10 ignore-typecheck-outcome: true ignore-test-outcome: false - python-version: "3.11" toxfactor: py3.11 ignore-typecheck-outcome: true ignore-test-outcome: false - python-version: "3.12" toxfactor: py3.12 ignore-typecheck-outcome: true ignore-test-outcome: false steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 id: setup-python with: python-version: ${{ matrix.python-version }} - name: Install poetry run: | python -m pip install poetry==1.8.2 - name: Configure poetry run: | python -m poetry config virtualenvs.in-project true - name: Cache the virtualenv id: poetry-dependencies-cache uses: actions/cache@v3 with: path: ./.venv key: ${{ runner.os }}-venv-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} - name: Install dev dependencies if: steps.poetry-dependencies-cache.outputs.cache-hit != 'true' run: | python -m poetry install --only=dev - name: Type checking # Ignore errors for older pythons continue-on-error: ${{ matrix.ignore-typecheck-outcome }} run: | source .venv/bin/activate tox -e mypy - name: Test with tox continue-on-error: ${{ matrix.ignore-test-outcome }} run: | source .venv/bin/activate coverage erase tox run-parallel -f ${{ matrix.toxfactor }} --parallel-no-spinner --parallel-live coverage combine coverage xml - uses: codecov/codecov-action@v3 with: # Explicitly using the token to avoid Codecov rate limit errors # See https://community.codecov.com/t/upload-issues-unable-to-locate-build-via-github-actions-api/3954 token: ${{ secrets.CODECOV_TOKEN }} fail_ci_if_error: true verbose: true # optional (default = false) pytest-bdd-7.1.2/.gitignore000066400000000000000000000007451457564257700156240ustar00rootroot00000000000000*.rej *.py[cod] /.env *.orig # C extensions *.so # Packages *.egg *.egg-info dist build _build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject .pytest_cache .ropeproject # Sublime /*.sublime-* #PyCharm /.idea pytest-bdd-7.1.2/.pre-commit-config.yaml000066400000000000000000000016541457564257700201150ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/psf/black # If you update the version here, also update it in tox.ini (py*-pytestlatest-linters) rev: 23.11.0 hooks: - id: black - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: - id: isort name: isort (python) - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: check-added-large-files - repo: https://github.com/asottile/pyupgrade rev: v3.15.0 hooks: - id: pyupgrade args: ["--py38-plus"] # TODO: Enable mypy checker when the checks succeed #- repo: https://github.com/pre-commit/mirrors-mypy # rev: v0.931 # hooks: # - id: mypy # additional_dependencies: [types-setuptools] pytest-bdd-7.1.2/.readthedocs.yaml000066400000000000000000000005051457564257700170550ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details version: 2 build: os: ubuntu-22.04 tools: python: "3" sphinx: configuration: docs/conf.py formats: - epub - pdf - htmlzip python: install: - method: pip path: . pytest-bdd-7.1.2/.sourcery.yaml000066400000000000000000000001041457564257700164360ustar00rootroot00000000000000rule_settings: disable: - "assign-if-exp" - "use-next" pytest-bdd-7.1.2/AUTHORS.rst000066400000000000000000000020071457564257700155040ustar00rootroot00000000000000Authors ======= `Oleg Pidsadnyi `_ original idea, initial implementation and further improvements `Anatoly Bubenkov `_ key implementation idea and realization, many new features and improvements These people have contributed to `pytest-bdd`, in alphabetical order: * `Adam Coddington `_ * `Albert-Jan Nijburg `_ * `Alessio Bogon `_ * `Andrey Makhnach `_ * `Aron Curzon `_ * `Dmitrijs Milajevs `_ * `Dmitry Kolyagin `_ * `Florian Bruhin `_ * `Floris Bruynooghe `_ * `Harro van der Klauw `_ * `Hugo van Kemenade `_ * `Laurence Rowe `_ * `Leonardo Santagada `_ * `Milosz Sliwinski `_ * `Michiel Holtkamp `_ * `Robin Pedersen `_ * `Sergey Kraynev `_ pytest-bdd-7.1.2/CHANGES.rst000066400000000000000000000377711457564257700154470ustar00rootroot00000000000000Changelog ========= Unreleased ---------- 7.1.2 ---------- - Address another compatibility issue with pytest 8.1 (fixture registration). `#680 `_ 7.1.1 ---------- - Address a bug introduced in pytest-bdd 7.1 caused by incorrect pytest version check. 7.1 ---------- - Address compatibility issue with pytest 8.1. `#666 `_ 7.0.1 ----- - Fix errors occurring if `pytest_unconfigure` is called before `pytest_configure`. `#362 `_ `#641 `_ 7.0.0 ---------- - ⚠️ Backwards incompatible: - ``parsers.re`` now does a `fullmatch `_ instead of a partial match. This is to make it work just like the other parsers, since they don't ignore non-matching characters at the end of the string. `#539 `_ - Drop python 3.7 compatibility, as it's no longer supported. `#627 `_ - Declare official support for python 3.12 `#628 `_ - Improve parser performance by 15% `#623 `_ by `@dcendents `_ - Add support for Scenarios and Scenario Outlines to have descriptions. `#600 `_ 6.1.1 ----- - Fix regression introduced in version 6.1.0 where the ``pytest_bdd_after_scenario`` hook would be called after every step instead of after the scenario. `#577 `_ 6.1.0 ----- - Fix bug where steps without parsers would take precedence over steps with parsers. `#534 `_ - Step functions can now be decorated multiple times with @given, @when, @then. Previously every decorator would override ``converters`` and ``target_fixture`` every at every application. `#534 `_ `#544 `_ `#525 `_ - Require pytest>=6.2 `#534 `_ - Using modern way to specify hook options to avoid deprecation warnings with pytest >=7.2. - Add generic ``step`` decorator that will be used for all kind of steps `#548 `_ - Add ``stacklevel`` param to ``given``, ``when``, ``then``, ``step`` decorators. This allows for programmatic step generation `#548 `_ - Hide pytest-bdd internal method in user tracebacks `#557 `_. - Make the package PEP 561-compatible `#559 `_ `#563 `_. - Configuration option ``bdd_features_base_dir`` is interpreted as relative to the `pytest root directory `_ (previously it was relative to the current working directory). `#573 `_ 6.0.1 ----- - Fix regression introduced in 6.0.0 where a step function decorated multiple using a parsers times would not be executed correctly. `#530 `_ `#528 `_ 6.0.0 ----- This release introduces breaking changes in order to be more in line with the official gherkin specification. - Cleanup of the documentation and tests related to parametrization (elchupanebrej) `#469 `_ - Removed feature level examples for the gherkin compatibility (olegpidsadnyi) `#490 `_ - Removed vertical examples for the gherkin compatibility (olegpidsadnyi) `#492 `_ - Step arguments are no longer fixtures (olegpidsadnyi) `#493 `_ - Drop support of python 3.6, pytest 4 (elchupanebrej) `#495 `_ `#504 `_ - Step definitions can have "yield" statements again (4.0 release broke it). They will be executed as normal fixtures: code after the yield is executed during teardown of the test. (youtux) `#503 `_ - Scenario outlines unused example parameter validation is removed (olegpidsadnyi) `#499 `_ - Add type annotations (youtux) `#505 `_ - ``pytest_bdd.parsers.StepParser`` now is an Abstract Base Class. Subclasses must make sure to implement the abstract methods. (youtux) `#505 `_ - Angular brackets in step definitions are only parsed in "Scenario Outline" (previously they were parsed also in normal "Scenario"s) (youtux) `#524 `_. 5.0.0 ----- This release introduces breaking changes, please refer to the :ref:`Migration from 4.x.x`. - Rewrite the logic to parse Examples for Scenario Outlines. Now the substitution of the examples is done during the parsing of Gherkin feature files. You won't need to define the steps twice like ``@given("there are cucumbers")`` and ``@given(parsers.parse("there are {start} cucumbers"))``. The latter will be enough. - Removed ``example_converters`` from ``scenario(...)`` signature. You should now use just the ``converters`` parameter for ``given``, ``when``, ``then``. - Removed ``--cucumberjson-expanded`` and ``--cucumber-json-expanded`` options. Now the JSON report is always expanded. - Removed ``--gherkin-terminal-reporter-expanded`` option. Now the terminal report is always expanded. 4.1.0 ----------- - `when` and `then` steps now can provide a `target_fixture`, just like `given` does. Discussion at https://github.com/pytest-dev/pytest-bdd/issues/402. - Drop compatibility for python 2 and officially support only python >= 3.6. - Fix error when using `--cucumber-json-expanded` in combination with `example_converters` (marcbrossaissogeti). - Fix `--generate-missing` not correctly recognizing steps with parsers 4.0.2 ----- - Fix a bug that prevents using comments in the ``Examples:`` section. (youtux) 4.0.1 ----- - Fixed performance regression introduced in 4.0.0 where collection time of tests would take way longer than before. (youtux) 4.0.0 ----- This release introduces breaking changes, please refer to the :ref:`Migration from 3.x.x`. - Strict Gherkin option is removed (``@scenario()`` does not accept the ``strict_gherkin`` parameter). (olegpidsadnyi) - ``@scenario()`` does not accept the undocumented parameter ``caller_module`` anymore. (youtux) - Given step is no longer a fixture. The scope parameter is also removed. (olegpidsadnyi) - Fixture parameter is removed from the given step declaration. (olegpidsadnyi) - ``pytest_bdd_step_validation_error`` hook is removed. (olegpidsadnyi) - Fix an error with pytest-pylint plugin #374. (toracle) - Fix pytest-xdist 2.0 compatibility #369. (olegpidsadnyi) - Fix compatibility with pytest 6 ``--import-mode=importlib`` option. (youtux) 3.4.0 ----- - Parse multiline steps according to the gherkin specification #365. 3.3.0 ----- - Drop support for pytest < 4.3. - Fix a Python 4.0 bug. - Fix ``pytest --generate-missing`` functionality being broken. - Fix problematic missing step definition from strings containing quotes. - Implement parsing escaped pipe characters in outline parameters (Mark90) #337. - Disable the strict Gherkin validation in the steps generation (v-buriak) #356. 3.2.1 ---------- - Fix regression introduced in 3.2.0 where pytest-bdd would break in presence of test items that are not functions. 3.2.0 ---------- - Fix Python 3.8 support - Remove code that rewrites code. This should help with the maintenance of this project and make debugging easier. 3.1.1 ---------- - Allow unicode string in ``@given()`` step names when using python2. This makes the transition of projects from python 2 to 3 easier. 3.1.0 ---------- - Drop support for pytest < 3.3.2. - Step definitions generated by ``$ pytest-bdd generate`` will now raise ``NotImplementedError`` by default. - ``@given(...)`` no longer accepts regex objects. It was deprecated long ago. - Improve project testing by treating warnings as exceptions. - ``pytest_bdd_step_validation_error`` will now always receive ``step_func_args`` as defined in the signature. 3.0.2 ------ - Add compatibility with pytest 4.2 (sliwinski-milosz) #288. 3.0.1 ------ - Minimal supported version of `pytest` is now 2.9.0 as lower versions do not support `bool` type ini options (sliwinski-milosz) #260 - Fix RemovedInPytest4Warning warnings (sliwinski-milosz) #261. 3.0.0 ------ - Fixtures `pytestbdd_feature_base_dir` and `pytestbdd_strict_gherkin` have been removed. Check the `Migration of your tests from versions 2.x.x `_ for more information (sliwinski-milosz) #255 - Fix step definitions not being found when using parsers or converters after a change in pytest (youtux) #257 2.21.0 ------ - Gherkin terminal reporter expanded format (pauk-slon) 2.20.0 ------ - Added support for But steps (olegpidsadnyi) - Fixed compatibility with pytest 3.3.2 (olegpidsadnyi) - MInimal required version of pytest is now 2.8.1 since it doesn't support earlier versions (olegpidsadnyi) 2.19.0 ------ - Added --cucumber-json-expanded option for explicit selection of expanded format (mjholtkamp) - Step names are filled in when --cucumber-json-expanded is used (mjholtkamp) 2.18.2 ------ - Fix check for out section steps definitions for no strict gherkin feature 2.18.1 ------ - Relay fixture results to recursive call of 'get_features' (coddingtonbear) 2.18.0 ------ - Add gherkin terminal reporter (spinus + thedrow) 2.17.2 ------ - Fix scenario lines containing an ``@`` being parsed as a tag. (The-Compiler) 2.17.1 ------ - Add support for pytest 3.0 2.17.0 ------ - Fix FixtureDef signature for newer pytest versions (The-Compiler) - Better error explanation for the steps defined outside of scenarios (olegpidsadnyi) - Add a ``pytest_bdd_apply_tag`` hook to customize handling of tags (The-Compiler) - Allow spaces in tag names. This can be useful when using the ``pytest_bdd_apply_tag`` hook with tags like ``@xfail: Some reason``. 2.16.1 ------ - Cleaned up hooks of the plugin (olegpidsadnyi) - Fixed report serialization (olegpidsadnyi) 2.16.0 ------ - Fixed deprecation warnings with pytest 2.8 (The-Compiler) - Fixed deprecation warnings with Python 3.5 (The-Compiler) 2.15.0 ------ - Add examples data in the scenario report (bubenkoff) 2.14.5 ------ - Properly parse feature description (bubenkoff) 2.14.3 ------ - Avoid potentially random collection order for xdist compartibility (bubenkoff) 2.14.1 ------ - Pass additional arguments to parsers (bubenkoff) 2.14.0 ------ - Add validation check which prevents having multiple features in a single feature file (bubenkoff) 2.13.1 ------ - Allow mixing feature example table with scenario example table (bubenkoff, olegpidsadnyi) 2.13.0 ------ - Feature example table (bubenkoff, sureshvv) 2.12.2 ------ - Make it possible to relax strict Gherkin scenario validation (bubenkoff) 2.11.3 ------ - Fix minimal `six` version (bubenkoff, dustinfarris) 2.11.1 ------ - Mention step type on step definition not found errors and in code generation (bubenkoff, lrowe) 2.11.0 ------ - Prefix step definition fixture names to avoid name collisions (bubenkoff, lrowe) 2.10.0 ------ - Make feature and scenario tags to be fully compartible with pytest markers (bubenkoff, kevinastone) 2.9.1 ----- - Fixed FeatureError string representation to correctly support python3 (bubenkoff, lrowe) 2.9.0 ----- - Added possibility to inject fixtures from given keywords (bubenkoff) 2.8.0 ----- - Added hook before the step is executed with evaluated parameters (olegpidsadnyi) 2.7.2 ----- - Correct base feature path lookup for python3 (bubenkoff) 2.7.1 ----- - Allow to pass ``scope`` for ``given`` steps (bubenkoff, sureshvv) 2.7.0 ----- - Implemented `scenarios` shortcut to automatically bind scenarios to tests (bubenkoff) 2.6.2 ----- - Parse comments only in the beginning of words (santagada) 2.6.1 ----- - Correctly handle `pytest-bdd` command called without the subcommand under python3 (bubenkoff, spinus) - Pluggable parsers for step definitions (bubenkoff, spinus) 2.5.3 ----- - Add after scenario hook, document both before and after scenario hooks (bubenkoff) 2.5.2 ----- - Fix code generation steps ordering (bubenkoff) 2.5.1 ----- - Fix error report serialization (olegpidsadnyi) 2.5.0 ----- - Fix multiline steps in the Background section (bubenkoff, arpe) - Code cleanup (olegpidsadnyi) 2.4.5 ----- - Fix unicode issue with scenario name (bubenkoff, aohontsev) 2.4.3 ----- - Fix unicode regex argumented steps issue (bubenkoff, aohontsev) - Fix steps timings in the json reporting (bubenkoff) 2.4.2 ----- - Recursion is fixed for the --generate-missing and the --feature parameters (bubenkoff) 2.4.1 ----- - Better reporting of a not found scenario (bubenkoff) - Simple test code generation implemented (bubenkoff) - Correct timing values for cucumber json reporting (bubenkoff) - Validation/generation helpers (bubenkoff) 2.4.0 ----- - Background support added (bubenkoff) - Fixed double collection of the conftest files if scenario decorator is used (ropez, bubenkoff) 2.3.3 ----- - Added timings to the cucumber json report (bubenkoff) 2.3.2 ----- - Fixed incorrect error message using e.argname instead of step.name (hvdklauw) 2.3.1 ----- - Implemented cucumber tags support (bubenkoff) - Implemented cucumber json formatter (bubenkoff, albertjan) - Added 'trace' keyword (bubenkoff) 2.1.2 ----- - Latest pytest compartibility fixes (bubenkoff) 2.1.1 ----- - Bugfixes (bubenkoff) 2.1.0 ----- - Implemented multiline steps (bubenkoff) 2.0.1 ----- - Allow more than one parameter per step (bubenkoff) - Allow empty example values (bubenkoff) 2.0.0 ----- - Pure pytest parametrization for scenario outlines (bubenkoff) - Argumented steps now support converters (transformations) (bubenkoff) - scenario supports only decorator form (bubenkoff) - Code generation refactoring and cleanup (bubenkoff) 1.0.0 ----- - Implemented scenario outlines (bubenkoff) 0.6.11 ------ - Fixed step arguments conflict with the fixtures having the same name (olegpidsadnyi) 0.6.9 ----- - Implemented support of Gherkin "Feature:" (olegpidsadnyi) 0.6.8 ----- - Implemented several hooks to allow reporting/error handling (bubenkoff) 0.6.6 ----- - Fixes to unnecessary mentioning of pytest-bdd package files in py.test log with -v (bubenkoff) 0.6.5 ----- - Compartibility with recent pytest (bubenkoff) 0.6.4 ----- - More unicode fixes (amakhnach) 0.6.3 ----- - Added unicode support for feature files. Removed buggy module replacement for scenario. (amakhnach) 0.6.2 ----- - Removed unnecessary mention of pytest-bdd package files in py.test log with -v (bubenkoff) 0.6.1 ----- - Step arguments in whens when there are no given arguments used. (amakhnach, bubenkoff) 0.6.0 ----- - Added step arguments support. (curzona, olegpidsadnyi, bubenkoff) - Added checking of the step type order. (markon, olegpidsadnyi) 0.5.2 ----- - Added extra info into output when FeatureError exception raises. (amakhnach) 0.5.0 ----- - Added parametrization to scenarios - Coveralls.io integration - Test coverage improvement/fixes - Correct wrapping of step functions to preserve function docstring 0.4.7 ----- - Fixed Python 3.3 support 0.4.6 ----- - Fixed a bug when py.test --fixtures showed incorrect filenames for the steps. 0.4.5 ----- - Fixed a bug with the reuse of the fixture by given steps being evaluated multiple times. 0.4.3 ----- - Update the license file and PYPI related documentation. pytest-bdd-7.1.2/CONTRIBUTING.md000066400000000000000000000010261457564257700160560ustar00rootroot00000000000000# How to setup development environment - Install poetry: https://python-poetry.org/docs/#installation - (Optional) Install pre-commit: https://pre-commit.com/#install - Run `poetry install` to install dependencies - Run `pre-commit install` to install pre-commit hooks # How to run tests - Run `poetry run pytest` - or run `tox` # How to make a release ```shell python -m pip install --upgrade build twine # cleanup the ./dist folder rm -rf ./dist # Build the distributions python -m build # Upload them twine upload dist/* ``` pytest-bdd-7.1.2/LICENSE.txt000066400000000000000000000021041457564257700154460ustar00rootroot00000000000000Copyright (C) 2013-2014 Oleg Pidsadnyi, Anatoly Bubenkov and others Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-bdd-7.1.2/README.rst000066400000000000000000001175241457564257700153270ustar00rootroot00000000000000BDD library for the pytest runner ================================= .. image:: https://img.shields.io/pypi/v/pytest-bdd.svg :target: https://pypi.python.org/pypi/pytest-bdd .. image:: https://codecov.io/gh/pytest-dev/pytest-bdd/branch/master/graph/badge.svg :target: https://codecov.io/gh/pytest-dev/pytest-bdd .. image:: https://github.com/pytest-dev/pytest-bdd/actions/workflows/main.yml/badge.svg :target: https://github.com/pytest-dev/pytest-bdd/actions/workflows/main.yml .. image:: https://readthedocs.org/projects/pytest-bdd/badge/?version=stable :target: https://readthedocs.org/projects/pytest-bdd/ :alt: Documentation Status pytest-bdd implements a subset of the Gherkin language to enable automating project requirements testing and to facilitate behavioral driven development. Unlike many other BDD tools, it does not require a separate runner and benefits from the power and flexibility of pytest. It enables unifying unit and functional tests, reduces the burden of continuous integration server configuration and allows the reuse of test setups. Pytest fixtures written for unit tests can be reused for setup and actions mentioned in feature steps with dependency injection. This allows a true BDD just-enough specification of the requirements without maintaining any context object containing the side effects of Gherkin imperative declarations. .. _behave: https://pypi.python.org/pypi/behave .. _pytest-splinter: https://github.com/pytest-dev/pytest-splinter Install pytest-bdd ------------------ :: pip install pytest-bdd Example ------- An example test for a blog hosting software could look like this. Note that pytest-splinter_ is used to get the browser fixture. .. code-block:: gherkin # content of publish_article.feature Feature: Blog A site where you can publish your articles. Scenario: Publishing the article Given I'm an author user And I have an article When I go to the article page And I press the publish button Then I should not see the error message And the article should be published # Note: will query the database Note that only one feature is allowed per feature file. .. code-block:: python # content of test_publish_article.py from pytest_bdd import scenario, given, when, then @scenario('publish_article.feature', 'Publishing the article') def test_publish(): pass @given("I'm an author user") def author_user(auth, author): auth['user'] = author.user @given("I have an article", target_fixture="article") def article(author): return create_test_article(author=author) @when("I go to the article page") def go_to_article(article, browser): browser.visit(urljoin(browser.url, '/manage/articles/{0}/'.format(article.id))) @when("I press the publish button") def publish_article(browser): browser.find_by_css('button[name=publish]').first.click() @then("I should not see the error message") def no_error_message(browser): with pytest.raises(ElementDoesNotExist): browser.find_by_css('.message.error').first @then("the article should be published") def article_is_published(article): article.refresh() # Refresh the object in the SQLAlchemy session assert article.is_published Scenario decorator ------------------ Functions decorated with the `scenario` decorator behave like a normal test function, and they will be executed after all scenario steps. .. code-block:: python from pytest_bdd import scenario, given, when, then @scenario('publish_article.feature', 'Publishing the article') def test_publish(browser): assert article.title in browser.html .. NOTE:: It is however encouraged to try as much as possible to have your logic only inside the Given, When, Then steps. Step aliases ------------ Sometimes, one has to declare the same fixtures or steps with different names for better readability. In order to use the same step function with multiple step names simply decorate it multiple times: .. code-block:: python @given("I have an article") @given("there's an article") def article(author, target_fixture="article"): return create_test_article(author=author) Note that the given step aliases are independent and will be executed when mentioned. For example if you associate your resource to some owner or not. Admin user can’t be an author of the article, but articles should have a default author. .. code-block:: gherkin Feature: Resource owner Scenario: I'm the author Given I'm an author And I have an article Scenario: I'm the admin Given I'm the admin And there's an article Step arguments -------------- Often it's possible to reuse steps giving them a parameter(s). This allows to have single implementation and multiple use, so less code. Also opens the possibility to use same step twice in single scenario and with different arguments! And even more, there are several types of step parameter parsers at your disposal (idea taken from behave_ implementation): .. _pypi_parse: http://pypi.python.org/pypi/parse .. _pypi_parse_type: http://pypi.python.org/pypi/parse_type **string** (the default) This is the default and can be considered as a `null` or `exact` parser. It parses no parameters and matches the step name by equality of strings. **parse** (based on: pypi_parse_) Provides a simple parser that replaces regular expressions for step parameters with a readable syntax like ``{param:Type}``. The syntax is inspired by the Python builtin ``string.format()`` function. Step parameters must use the named fields syntax of pypi_parse_ in step definitions. The named fields are extracted, optionally type converted and then used as step function arguments. Supports type conversions by using type converters passed via `extra_types` **cfparse** (extends: pypi_parse_, based on: pypi_parse_type_) Provides an extended parser with "Cardinality Field" (CF) support. Automatically creates missing type converters for related cardinality as long as a type converter for cardinality=1 is provided. Supports parse expressions like: * ``{values:Type+}`` (cardinality=1..N, many) * ``{values:Type*}`` (cardinality=0..N, many0) * ``{value:Type?}`` (cardinality=0..1, optional) Supports type conversions (as above). **re** This uses full regular expressions to parse the clause text. You will need to use named groups "(?P...)" to define the variables pulled from the text and passed to your ``step()`` function. Type conversion can only be done via `converters` step decorator argument (see example below). The default parser is `string`, so just plain one-to-one match to the keyword definition. Parsers except `string`, as well as their optional arguments are specified like: for `cfparse` parser .. code-block:: python from pytest_bdd import parsers @given( parsers.cfparse("there are {start:Number} cucumbers", extra_types={"Number": int}), target_fixture="cucumbers", ) def given_cucumbers(start): return {"start": start, "eat": 0} for `re` parser .. code-block:: python from pytest_bdd import parsers @given( parsers.re(r"there are (?P\d+) cucumbers"), converters={"start": int}, target_fixture="cucumbers", ) def given_cucumbers(start): return {"start": start, "eat": 0} Example: .. code-block:: gherkin Feature: Step arguments Scenario: Arguments for given, when, then Given there are 5 cucumbers When I eat 3 cucumbers And I eat 2 cucumbers Then I should have 0 cucumbers The code will look like: .. code-block:: python from pytest_bdd import scenarios, given, when, then, parsers scenarios("arguments.feature") @given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") def given_cucumbers(start): return {"start": start, "eat": 0} @when(parsers.parse("I eat {eat:d} cucumbers")) def eat_cucumbers(cucumbers, eat): cucumbers["eat"] += eat @then(parsers.parse("I should have {left:d} cucumbers")) def should_have_left_cucumbers(cucumbers, left): assert cucumbers["start"] - cucumbers["eat"] == left Example code also shows possibility to pass argument converters which may be useful if you need to postprocess step arguments after the parser. You can implement your own step parser. It's interface is quite simple. The code can look like: .. code-block:: python import re from pytest_bdd import given, parsers class MyParser(parsers.StepParser): """Custom parser.""" def __init__(self, name, **kwargs): """Compile regex.""" super().__init__(name) self.regex = re.compile(re.sub("%(.+)%", "(?P<\1>.+)", self.name), **kwargs) def parse_arguments(self, name): """Get step arguments. :return: `dict` of step arguments """ return self.regex.match(name).groupdict() def is_matching(self, name): """Match given name with the step name.""" return bool(self.regex.match(name)) @given(parsers.parse("there are %start% cucumbers"), target_fixture="cucumbers") def given_cucumbers(start): return {"start": start, "eat": 0} Override fixtures via given steps --------------------------------- Dependency injection is not a panacea if you have complex structure of your test setup data. Sometimes there's a need such a given step which would imperatively change the fixture only for certain test (scenario), while for other tests it will stay untouched. To allow this, special parameter `target_fixture` exists in the `given` decorator: .. code-block:: python from pytest_bdd import given @pytest.fixture def foo(): return "foo" @given("I have injecting given", target_fixture="foo") def injecting_given(): return "injected foo" @then('foo should be "injected foo"') def foo_is_foo(foo): assert foo == 'injected foo' .. code-block:: gherkin Feature: Target fixture Scenario: Test given fixture injection Given I have injecting given Then foo should be "injected foo" In this example, the existing fixture `foo` will be overridden by given step `I have injecting given` only for the scenario it's used in. Sometimes it is also useful to let `when` and `then` steps provide a fixture as well. A common use case is when we have to assert the outcome of an HTTP request: .. code-block:: python # content of test_blog.py from pytest_bdd import scenarios, given, when, then from my_app.models import Article scenarios("blog.feature") @given("there is an article", target_fixture="article") def there_is_an_article(): return Article() @when("I request the deletion of the article", target_fixture="request_result") def there_should_be_a_new_article(article, http_client): return http_client.delete(f"/articles/{article.uid}") @then("the request should be successful") def article_is_published(request_result): assert request_result.status_code == 200 .. code-block:: gherkin # content of blog.feature Feature: Blog Scenario: Deleting the article Given there is an article When I request the deletion of the article Then the request should be successful Multiline steps --------------- As Gherkin, pytest-bdd supports multiline steps (a.k.a. `Doc Strings `_). But in much cleaner and powerful way: .. code-block:: gherkin Feature: Multiline steps Scenario: Multiline step using sub indentation Given I have a step with: Some Extra Lines Then the text should be parsed with correct indentation A step is considered as a multiline one, if the **next** line(s) after it's first line is indented relatively to the first line. The step name is then simply extended by adding further lines with newlines. In the example above, the Given step name will be: .. code-block:: python 'I have a step with:\nSome\nExtra\nLines' You can of course register a step using the full name (including the newlines), but it seems more practical to use step arguments and capture lines after first line (or some subset of them) into the argument: .. code-block:: python from pytest_bdd import given, then, scenario, parsers scenarios("multiline.feature") @given(parsers.parse("I have a step with:\n{content}"), target_fixture="text") def given_text(content): return content @then("the text should be parsed with correct indentation") def text_should_be_correct(text): assert text == "Some\nExtra\nLines" Scenarios shortcut ------------------ If you have a relatively large set of feature files, it's boring to manually bind scenarios to the tests using the scenario decorator. Of course with the manual approach you get all the power to be able to additionally parametrize the test, give the test function a nice name, document it, etc, but in the majority of the cases you don't need that. Instead, you want to bind all the scenarios found in the ``features`` folder(s) recursively automatically, by using the ``scenarios`` helper. .. code-block:: python from pytest_bdd import scenarios # assume 'features' subfolder is in this file's directory scenarios('features') That's all you need to do to bind all scenarios found in the ``features`` folder! Note that you can pass multiple paths, and those paths can be either feature files or feature folders. .. code-block:: python from pytest_bdd import scenarios # pass multiple paths/files scenarios('features', 'other_features/some.feature', 'some_other_features') But what if you need to manually bind a certain scenario, leaving others to be automatically bound? Just write your scenario in a "normal" way, but ensure you do it **before** the call of ``scenarios`` helper. .. code-block:: python from pytest_bdd import scenario, scenarios @scenario('features/some.feature', 'Test something') def test_something(): pass # assume 'features' subfolder is in this file's directory scenarios('features') In the example above, the ``test_something`` scenario binding will be kept manual, other scenarios found in the ``features`` folder will be bound automatically. Scenario outlines ----------------- Scenarios can be parametrized to cover multiple cases. These are called `Scenario Outlines `_ in Gherkin, and the variable templates are written using angular brackets (e.g. ````). Example: .. code-block:: gherkin # content of scenario_outlines.feature Feature: Scenario outlines Scenario Outline: Outlined given, when, then Given there are cucumbers When I eat cucumbers Then I should have cucumbers Examples: | start | eat | left | | 12 | 5 | 7 | .. code-block:: python from pytest_bdd import scenarios, given, when, then, parsers scenarios("scenario_outlines.feature") @given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") def given_cucumbers(start): return {"start": start, "eat": 0} @when(parsers.parse("I eat {eat:d} cucumbers")) def eat_cucumbers(cucumbers, eat): cucumbers["eat"] += eat @then(parsers.parse("I should have {left:d} cucumbers")) def should_have_left_cucumbers(cucumbers, left): assert cucumbers["start"] - cucumbers["eat"] == left Organizing your scenarios ------------------------- The more features and scenarios you have, the more important the question of their organization becomes. The things you can do (and that is also a recommended way): * organize your feature files in the folders by semantic groups: :: features │ ├──frontend │ │ │ └──auth │ │ │ └──login.feature └──backend │ └──auth │ └──login.feature This looks fine, but how do you run tests only for a certain feature? As pytest-bdd uses pytest, and bdd scenarios are actually normal tests. But test files are separate from the feature files, the mapping is up to developers, so the test files structure can look completely different: :: tests │ └──functional │ └──test_auth.py │ └ """Authentication tests.""" from pytest_bdd import scenario @scenario('frontend/auth/login.feature') def test_logging_in_frontend(): pass @scenario('backend/auth/login.feature') def test_logging_in_backend(): pass For picking up tests to run we can use the `tests selection `_ technique. The problem is that you have to know how your tests are organized, knowing only the feature files organization is not enough. Cucumber uses `tags `_ as a way of categorizing your features and scenarios, which pytest-bdd supports. For example, we could have: .. code-block:: gherkin @login @backend Feature: Login @successful Scenario: Successful login pytest-bdd uses `pytest markers `_ as a `storage` of the tags for the given scenario test, so we can use standard test selection: .. code-block:: bash pytest -m "backend and login and successful" The feature and scenario markers are not different from standard pytest markers, and the ``@`` symbol is stripped out automatically to allow test selector expressions. If you want to have bdd-related tags to be distinguishable from the other test markers, use a prefix like ``bdd``. Note that if you use pytest with the ``--strict`` option, all bdd tags mentioned in the feature files should be also in the ``markers`` setting of the ``pytest.ini`` config. Also for tags please use names which are python-compatible variable names, i.e. start with a non-number, only underscores or alphanumeric characters, etc. That way you can safely use tags for tests filtering. You can customize how tags are converted to pytest marks by implementing the ``pytest_bdd_apply_tag`` hook and returning ``True`` from it: .. code-block:: python def pytest_bdd_apply_tag(tag, function): if tag == 'todo': marker = pytest.mark.skip(reason="Not implemented yet") marker(function) return True else: # Fall back to the default behavior of pytest-bdd return None Test setup ---------- Test setup is implemented within the Given section. Even though these steps are executed imperatively to apply possible side-effects, pytest-bdd is trying to benefit of the PyTest fixtures which is based on the dependency injection and makes the setup more declarative style. .. code-block:: python @given("I have a beautiful article", target_fixture="article") def article(): return Article(is_beautiful=True) The target PyTest fixture "article" gets the return value and any other step can depend on it. .. code-block:: gherkin Feature: The power of PyTest Scenario: Symbolic name across steps Given I have a beautiful article When I publish this article The When step is referencing the ``article`` to publish it. .. code-block:: python @when("I publish this article") def publish_article(article): article.publish() Many other BDD toolkits operate on a global context and put the side effects there. This makes it very difficult to implement the steps, because the dependencies appear only as the side-effects during run-time and not declared in the code. The "publish article" step has to trust that the article is already in the context, has to know the name of the attribute it is stored there, the type etc. In pytest-bdd you just declare an argument of the step function that it depends on and the PyTest will make sure to provide it. Still side effects can be applied in the imperative style by design of the BDD. .. code-block:: gherkin Feature: News website Scenario: Publishing an article Given I have a beautiful article And my article is published Functional tests can reuse your fixture libraries created for the unit-tests and upgrade them by applying the side effects. .. code-block:: python @pytest.fixture def article(): return Article(is_beautiful=True) @given("I have a beautiful article") def i_have_a_beautiful_article(article): pass @given("my article is published") def published_article(article): article.publish() return article This way side-effects were applied to our article and PyTest makes sure that all steps that require the "article" fixture will receive the same object. The value of the "published_article" and the "article" fixtures is the same object. Fixtures are evaluated **only once** within the PyTest scope and their values are cached. Backgrounds ----------- It's often the case that to cover certain feature, you'll need multiple scenarios. And it's logical that the setup for those scenarios will have some common parts (if not equal). For this, there are `backgrounds`. pytest-bdd implements `Gherkin backgrounds `_ for features. .. code-block:: gherkin Feature: Multiple site support Background: Given a global administrator named "Greg" And a blog named "Greg's anti-tax rants" And a customer named "Wilson" And a blog named "Expensive Therapy" owned by "Wilson" Scenario: Wilson posts to his own blog Given I am logged in as Wilson When I try to post to "Expensive Therapy" Then I should see "Your article was published." Scenario: Greg posts to a client's blog Given I am logged in as Greg When I try to post to "Expensive Therapy" Then I should see "Your article was published." In this example, all steps from the background will be executed before all the scenario's own given steps, adding a possibility to prepare some common setup for multiple scenarios in a single feature. About best practices for Background, please read Gherkin's `Tips for using Background `_. .. NOTE:: Only "Given" steps should be used in "Background" section. Steps "When" and "Then" are prohibited, because their purposes are related to actions and consuming outcomes; that is in conflict with the aim of "Background" - to prepare the system for tests or "put the system in a known state" as "Given" does it. The statement above applies to strict Gherkin mode, which is enabled by default. Reusing fixtures ---------------- Sometimes scenarios define new names for an existing fixture that can be inherited (reused). For example, if we have the pytest fixture: .. code-block:: python @pytest.fixture def article(): """Test article.""" return Article() Then this fixture can be reused with other names using given(): .. code-block:: python @given('I have a beautiful article') def i_have_an_article(article): """I have an article.""" Reusing steps ------------- It is possible to define some common steps in the parent ``conftest.py`` and simply expect them in the child test file. .. code-block:: gherkin # content of common_steps.feature Scenario: All steps are declared in the conftest Given I have a bar Then bar should have value "bar" .. code-block:: python # content of conftest.py from pytest_bdd import given, then @given("I have a bar", target_fixture="bar") def bar(): return "bar" @then('bar should have value "bar"') def bar_is_bar(bar): assert bar == "bar" .. code-block:: python # content of test_common.py @scenario("common_steps.feature", "All steps are declared in the conftest") def test_conftest(): pass There are no definitions of steps in the test file. They were collected from the parent conftest.py. Default steps ------------- Here is the list of steps that are implemented inside pytest-bdd: given * trace - enters the `pdb` debugger via `pytest.set_trace()` when * trace - enters the `pdb` debugger via `pytest.set_trace()` then * trace - enters the `pdb` debugger via `pytest.set_trace()` Feature file paths ------------------ By default, pytest-bdd will use the current module's path as the base path for finding feature files, but this behaviour can be changed in the pytest configuration file (i.e. `pytest.ini`, `tox.ini` or `setup.cfg`) by declaring the new base path in the `bdd_features_base_dir` key. The path is interpreted as relative to the `pytest root directory `__. You can also override the features base path on a per-scenario basis, in order to override the path for specific tests. pytest.ini: .. code-block:: ini [pytest] bdd_features_base_dir = features/ tests/test_publish_article.py: .. code-block:: python from pytest_bdd import scenario @scenario("foo.feature", "Foo feature in features/foo.feature") def test_foo(): pass @scenario( "foo.feature", "Foo feature in tests/local-features/foo.feature", features_base_dir="./local-features/", ) def test_foo_local(): pass The `features_base_dir` parameter can also be passed to the `@scenario` decorator. Avoid retyping the feature file name ------------------------------------ If you want to avoid retyping the feature file name when defining your scenarios in a test file, use ``functools.partial``. This will make your life much easier when defining multiple scenarios in a test file. For example: .. code-block:: python # content of test_publish_article.py from functools import partial import pytest_bdd scenario = partial(pytest_bdd.scenario, "/path/to/publish_article.feature") @scenario("Publishing the article") def test_publish(): pass @scenario("Publishing the article as unprivileged user") def test_publish_unprivileged(): pass You can learn more about `functools.partial `_ in the Python docs. Programmatic step generation ---------------------------- Sometimes you have step definitions that would be much easier to automate rather than writing them manually over and over again. This is common, for example, when using libraries like `pytest-factoryboy `_ that automatically creates fixtures. Writing step definitions for every model can become a tedious task. For this reason, pytest-bdd provides a way to generate step definitions automatically. The trick is to pass the ``stacklevel`` parameter to the ``given``, ``when``, ``then``, ``step`` decorators. This will instruct them to inject the step fixtures in the appropriate module, rather than just injecting them in the caller frame. Let's look at a concrete example; let's say you have a class ``Wallet`` that has some amount of each currency: .. code-block:: python # contents of wallet.py import dataclass @dataclass class Wallet: verified: bool amount_eur: int amount_usd: int amount_gbp: int amount_jpy: int You can use pytest-factoryboy to automatically create model fixtures for this class: .. code-block:: python # contents of wallet_factory.py from wallet import Wallet import factory from pytest_factoryboy import register class WalletFactory(factory.Factory): class Meta: model = Wallet amount_eur = 0 amount_usd = 0 amount_gbp = 0 amount_jpy = 0 register(Wallet) # creates the "wallet" fixture register(Wallet, "second_wallet") # creates the "second_wallet" fixture Now we can define a function ``generate_wallet_steps(...)`` that creates the steps for any wallet fixture (in our case, it will be ``wallet`` and ``second_wallet``): .. code-block:: python # contents of wallet_steps.py import re from dataclasses import fields import factory import pytest from pytest_bdd import given, when, then, scenarios, parsers def generate_wallet_steps(model_name="wallet", stacklevel=1): stacklevel += 1 human_name = model_name.replace("_", " ") # "second_wallet" -> "second wallet" @given(f"I have a {human_name}", target_fixture=model_name, stacklevel=stacklevel) def _(request): return request.getfixturevalue(model_name) # Generate steps for currency fields: for field in fields(Wallet): match = re.fullmatch(r"amount_(?P[a-z]{3})", field.name) if not match: continue currency = match["currency"] @given( parsers.parse(f"I have {{value:d}} {currency.upper()} in my {human_name}"), target_fixture=f"{model_name}__amount_{currency}", stacklevel=stacklevel, ) def _(value: int) -> int: return value @then( parsers.parse(f"I should have {{value:d}} {currency.upper()} in my {human_name}"), stacklevel=stacklevel, ) def _(value: int, _currency=currency, _model_name=model_name) -> None: wallet = request.getfixturevalue(_model_name) assert getattr(wallet, f"amount_{_currency}") == value # Inject the steps into the current module generate_wallet_steps("wallet") generate_wallet_steps("second_wallet") This last file, ``wallet_steps.py``, now contains all the step definitions for our "wallet" and "second_wallet" fixtures. We can now define a scenario like this: .. code-block:: gherkin # contents of wallet.feature Feature: A feature Scenario: Wallet EUR amount stays constant Given I have 10 EUR in my wallet And I have a wallet Then I should have 10 EUR in my wallet Scenario: Second wallet JPY amount stays constant Given I have 100 JPY in my second wallet And I have a second wallet Then I should have 100 JPY in my second wallet and finally a test file that puts it all together and run the scenarios: .. code-block:: python # contents of test_wallet.py from pytest_factoryboy import scenarios from wallet_factory import * # import the registered fixtures "wallet" and "second_wallet" from wallet_steps import * # import all the step definitions into this test file scenarios("wallet.feature") Hooks ----- pytest-bdd exposes several `pytest hooks `_ which might be helpful building useful reporting, visualization, etc. on top of it: * `pytest_bdd_before_scenario(request, feature, scenario)` - Called before scenario is executed * `pytest_bdd_after_scenario(request, feature, scenario)` - Called after scenario is executed (even if one of steps has failed) * `pytest_bdd_before_step(request, feature, scenario, step, step_func)` - Called before step function is executed and it's arguments evaluated * `pytest_bdd_before_step_call(request, feature, scenario, step, step_func, step_func_args)` - Called before step function is executed with evaluated arguments * `pytest_bdd_after_step(request, feature, scenario, step, step_func, step_func_args)` - Called after step function is successfully executed * `pytest_bdd_step_error(request, feature, scenario, step, step_func, step_func_args, exception)` - Called when step function failed to execute * `pytest_bdd_step_func_lookup_error(request, feature, scenario, step, exception)` - Called when step lookup failed Browser testing --------------- Tools recommended to use for browser testing: * pytest-splinter_ - pytest `splinter `_ integration for the real browser testing Reporting --------- It's important to have nice reporting out of your bdd tests. Cucumber introduced some kind of standard for `json format `_ which can be used for, for example, by `this `_ Jenkins plugin. To have an output in json format: :: pytest --cucumberjson= This will output an expanded (meaning scenario outlines will be expanded to several scenarios) Cucumber format. To enable gherkin-formatted output on terminal, use :: pytest --gherkin-terminal-reporter Test code generation helpers ---------------------------- For newcomers it's sometimes hard to write all needed test code without being frustrated. To simplify their life, a simple code generator was implemented. It allows to create fully functional (but of course empty) tests and step definitions for a given feature file. It's done as a separate console script provided by pytest-bdd package: :: pytest-bdd generate .. It will print the generated code to the standard output so you can easily redirect it to the file: :: pytest-bdd generate features/some.feature > tests/functional/test_some.py Advanced code generation ------------------------ For more experienced users, there's a smart code generation/suggestion feature. It will only generate the test code which is not yet there, checking existing tests and step definitions the same way it's done during the test execution. The code suggestion tool is called via passing additional pytest arguments: :: pytest --generate-missing --feature features tests/functional The output will be like: :: ============================= test session starts ============================== platform linux2 -- Python 2.7.6 -- py-1.4.24 -- pytest-2.6.2 plugins: xdist, pep8, cov, cache, bdd, bdd, bdd collected 2 items Scenario is not bound to any test: "Code is generated for scenarios which are not bound to any tests" in feature "Missing code generation" in /tmp/pytest-552/testdir/test_generate_missing0/tests/generation.feature -------------------------------------------------------------------------------- Step is not defined: "I have a custom bar" in scenario: "Code is generated for scenario steps which are not yet defined(implemented)" in feature "Missing code generation" in /tmp/pytest-552/testdir/test_generate_missing0/tests/generation.feature -------------------------------------------------------------------------------- Please place the code above to the test file(s): @scenario('tests/generation.feature', 'Code is generated for scenarios which are not bound to any tests') def test_Code_is_generated_for_scenarios_which_are_not_bound_to_any_tests(): """Code is generated for scenarios which are not bound to any tests.""" @given("I have a custom bar") def I_have_a_custom_bar(): """I have a custom bar.""" As as side effect, the tool will validate the files for format errors, also some of the logic bugs, for example the ordering of the types of the steps. .. _Migration from 5.x.x: Migration of your tests from versions 5.x.x ------------------------------------------- The primary focus of the pytest-bdd is the compatibility with the latest gherkin developments e.g. multiple scenario outline example tables with tags support etc. In order to provide the best compatibility, it is best to support the features described in the official gherkin reference. This means deprecation of some non-standard features that were implemented in pytest-bdd. Removal of the feature examples ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The example tables on the feature level are no longer supported. If you had examples on the feature level, you should copy them to each individual scenario. Removal of the vertical examples ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Vertical example tables are no longer supported since the official gherkin doesn't support them. The example tables should have horizontal orientation. Step arguments are no longer fixtures ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Step parsed arguments conflicted with the fixtures. Now they no longer define fixture. If the fixture has to be defined by the step, the target_fixture param should be used. Variable templates in steps are only parsed for Scenario Outlines ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In previous versions of pytest, steps containing ```` would be parsed both by ``Scenario`` and ``Scenario Outline``. Now they are only parsed within a ``Scenario Outline``. .. _Migration from 4.x.x: Migration of your tests from versions 4.x.x ------------------------------------------- Replace usage of inside step definitions with parsed {parameter} ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Templated steps (e.g. ``@given("there are cucumbers")``) should now the use step argument parsers in order to match the scenario outlines and get the values from the example tables. The values from the example tables are no longer passed as fixtures, although if you define your step to use a parser, the parameters will be still provided as fixtures. .. code-block:: python # Old step definition: @given("there are cucumbers") def given_cucumbers(start): pass # New step definition: @given(parsers.parse("there are {start} cucumbers")) def given_cucumbers(start): pass Scenario `example_converters` are removed in favor of the converters provided on the step level: .. code-block:: python # Old code: @given("there are cucumbers") def given_cucumbers(start): return {"start": start} @scenario("outline.feature", "Outlined", example_converters={"start": float}) def test_outline(): pass # New code: @given(parsers.parse("there are {start} cucumbers"), converters={"start": float}) def given_cucumbers(start): return {"start": start} @scenario("outline.feature", "Outlined") def test_outline(): pass Refuse combining scenario outline and pytest parametrization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The significant downside of combining scenario outline and pytest parametrization approach was an inability to see the test table from the feature file. .. _Migration from 3.x.x: Migration of your tests from versions 3.x.x ------------------------------------------- Given steps are no longer fixtures. In case it is needed to make given step setup a fixture, the target_fixture parameter should be used. .. code-block:: python @given("there's an article", target_fixture="article") def there_is_an_article(): return Article() Given steps no longer have the `fixture` parameter. In fact the step may depend on multiple fixtures. Just normal step declaration with the dependency injection should be used. .. code-block:: python @given("there's an article") def there_is_an_article(article): pass Strict gherkin option is removed, so the ``strict_gherkin`` parameter can be removed from the scenario decorators as well as ``bdd_strict_gherkin`` from the ini files. Step validation handlers for the hook ``pytest_bdd_step_validation_error`` should be removed. License ------- This software is licensed under the `MIT License `_. © 2013 Oleg Pidsadnyi, Anatoly Bubenkov and others pytest-bdd-7.1.2/docs/000077500000000000000000000000001457564257700145565ustar00rootroot00000000000000pytest-bdd-7.1.2/docs/Makefile000066400000000000000000000127141457564257700162230ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pytest-BDD.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pytest-BDD.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Pytest-BDD" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pytest-BDD" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." pytest-bdd-7.1.2/docs/conf.py000066400000000000000000000174221457564257700160630ustar00rootroot00000000000000# # Pytest-BDD documentation build configuration file, created by # sphinx-quickstart on Sun Apr 7 21:07:56 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys from importlib import metadata sys.path.insert(0, os.path.abspath("..")) import pytest_bdd # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "Pytest-BDD" copyright = "2013, Oleg Pidsadnyi" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = metadata.version("pytest-bdd") # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = "Pytest-BDDdoc" # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [("index", "Pytest-BDD.tex", "Pytest-BDD Documentation", "Oleg Pidsadnyi", "manual")] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [("index", "pytest-bdd", "Pytest-BDD Documentation", ["Oleg Pidsadnyi"], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( "index", "Pytest-BDD", "Pytest-BDD Documentation", "Oleg Pidsadnyi", "Pytest-BDD", "One line description of project.", "Miscellaneous", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' pytest-bdd-7.1.2/docs/index.rst000066400000000000000000000002631457564257700164200ustar00rootroot00000000000000Welcome to Pytest-BDD's documentation! ====================================== .. contents:: .. include:: ../README.rst .. include:: ../AUTHORS.rst .. include:: ../CHANGES.rst pytest-bdd-7.1.2/poetry.lock000066400000000000000000001111671457564257700160310ustar00rootroot00000000000000# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "cachetools" version = "5.3.3" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" files = [ {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, ] [[package]] name = "chardet" version = "5.2.0" description = "Universal encoding detector for Python 3" optional = false python-versions = ">=3.7" files = [ {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, ] [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] [[package]] name = "coverage" version = "7.4.3" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ {file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6"}, {file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4"}, {file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524"}, {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d"}, {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb"}, {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0"}, {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc"}, {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2"}, {file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94"}, {file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0"}, {file = "coverage-7.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cbbe5e739d45a52f3200a771c6d2c7acf89eb2524890a4a3aa1a7fa0695d2a47"}, {file = "coverage-7.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:489763b2d037b164846ebac0cbd368b8a4ca56385c4090807ff9fad817de4113"}, {file = "coverage-7.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:451f433ad901b3bb00184d83fd83d135fb682d780b38af7944c9faeecb1e0bfe"}, {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcc66e222cf4c719fe7722a403888b1f5e1682d1679bd780e2b26c18bb648cdc"}, {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ec74cfef2d985e145baae90d9b1b32f85e1741b04cd967aaf9cfa84c1334f3"}, {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:abbbd8093c5229c72d4c2926afaee0e6e3140de69d5dcd918b2921f2f0c8baba"}, {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:35eb581efdacf7b7422af677b92170da4ef34500467381e805944a3201df2079"}, {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8249b1c7334be8f8c3abcaaa996e1e4927b0e5a23b65f5bf6cfe3180d8ca7840"}, {file = "coverage-7.4.3-cp311-cp311-win32.whl", hash = "sha256:cf30900aa1ba595312ae41978b95e256e419d8a823af79ce670835409fc02ad3"}, {file = "coverage-7.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:18c7320695c949de11a351742ee001849912fd57e62a706d83dfc1581897fa2e"}, {file = "coverage-7.4.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b51bfc348925e92a9bd9b2e48dad13431b57011fd1038f08316e6bf1df107d10"}, {file = "coverage-7.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d6cdecaedea1ea9e033d8adf6a0ab11107b49571bbb9737175444cea6eb72328"}, {file = "coverage-7.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b2eccb883368f9e972e216c7b4c7c06cabda925b5f06dde0650281cb7666a30"}, {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c00cdc8fa4e50e1cc1f941a7f2e3e0f26cb2a1233c9696f26963ff58445bac7"}, {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4a8dd3dcf4cbd3165737358e4d7dfbd9d59902ad11e3b15eebb6393b0446e"}, {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:062b0a75d9261e2f9c6d071753f7eef0fc9caf3a2c82d36d76667ba7b6470003"}, {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ebe7c9e67a2d15fa97b77ea6571ce5e1e1f6b0db71d1d5e96f8d2bf134303c1d"}, {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0a120238dd71c68484f02562f6d446d736adcc6ca0993712289b102705a9a3a"}, {file = "coverage-7.4.3-cp312-cp312-win32.whl", hash = "sha256:37389611ba54fd6d278fde86eb2c013c8e50232e38f5c68235d09d0a3f8aa352"}, {file = "coverage-7.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:d25b937a5d9ffa857d41be042b4238dd61db888533b53bc76dc082cb5a15e914"}, {file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454"}, {file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e"}, {file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2"}, {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e"}, {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6"}, {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c"}, {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0"}, {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1"}, {file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f"}, {file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9"}, {file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f"}, {file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c"}, {file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e"}, {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765"}, {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee"}, {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501"}, {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f"}, {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45"}, {file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9"}, {file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa"}, {file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51"}, {file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52"}, ] [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli"] [[package]] name = "distlib" version = "0.3.8" description = "Distribution utilities" optional = false python-versions = "*" files = [ {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, ] [[package]] name = "exceptiongroup" version = "1.2.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, ] [package.extras] test = ["pytest (>=6)"] [[package]] name = "execnet" version = "2.0.2" description = "execnet: rapid multi-Python deployment" optional = false python-versions = ">=3.7" files = [ {file = "execnet-2.0.2-py3-none-any.whl", hash = "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41"}, {file = "execnet-2.0.2.tar.gz", hash = "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"}, ] [package.extras] testing = ["hatch", "pre-commit", "pytest", "tox"] [[package]] name = "filelock" version = "3.13.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] [[package]] name = "mako" version = "1.3.2" description = "A super-fast templating language that borrows the best ideas from the existing templating languages." optional = false python-versions = ">=3.8" files = [ {file = "Mako-1.3.2-py3-none-any.whl", hash = "sha256:32a99d70754dfce237019d17ffe4a282d2d3351b9c476e90d8a60e63f133b80c"}, {file = "Mako-1.3.2.tar.gz", hash = "sha256:2a0c8ad7f6274271b3bb7467dd37cf9cc6dab4bc19cb69a4ef10669402de698e"}, ] [package.dependencies] MarkupSafe = ">=0.9.2" [package.extras] babel = ["Babel"] lingua = ["lingua"] testing = ["pytest"] [[package]] name = "markupsafe" version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" files = [ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] [[package]] name = "mypy" version = "1.9.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] [[package]] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] [[package]] name = "packaging" version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] [[package]] name = "parse" version = "1.20.1" description = "parse() is the opposite of format()" optional = false python-versions = "*" files = [ {file = "parse-1.20.1-py2.py3-none-any.whl", hash = "sha256:76ddd5214255ae711db4c512be636151fbabaa948c6f30115aecc440422ca82c"}, {file = "parse-1.20.1.tar.gz", hash = "sha256:09002ca350ad42e76629995f71f7b518670bcf93548bdde3684fd55d2be51975"}, ] [[package]] name = "parse-type" version = "0.6.2" description = "Simplifies to build parse types based on the parse module" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*" files = [ {file = "parse_type-0.6.2-py2.py3-none-any.whl", hash = "sha256:06d39a8b70fde873eb2a131141a0e79bb34a432941fb3d66fad247abafc9766c"}, {file = "parse_type-0.6.2.tar.gz", hash = "sha256:79b1f2497060d0928bc46016793f1fca1057c4aacdf15ef876aa48d75a73a355"}, ] [package.dependencies] parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} six = ">=1.15" [package.extras] develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)"] docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"] testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] [[package]] name = "platformdirs" version = "4.2.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.8" files = [ {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] [[package]] name = "pluggy" version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "pygments" version = "2.17.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.7" files = [ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, ] [package.extras] plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyproject-api" version = "1.6.1" description = "API to interact with the python pyproject.toml based projects" optional = false python-versions = ">=3.8" files = [ {file = "pyproject_api-1.6.1-py3-none-any.whl", hash = "sha256:4c0116d60476b0786c88692cf4e325a9814965e2469c5998b830bba16b183675"}, {file = "pyproject_api-1.6.1.tar.gz", hash = "sha256:1817dc018adc0d1ff9ca1ed8c60e1623d5aaca40814b953af14a9cf9a5cae538"}, ] [package.dependencies] packaging = ">=23.1" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] docs = ["furo (>=2023.8.19)", "sphinx (<7.2)", "sphinx-autodoc-typehints (>=1.24)"] testing = ["covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "setuptools (>=68.1.2)", "wheel (>=0.41.2)"] [[package]] name = "pytest" version = "8.1.1" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, ] [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" pluggy = ">=1.4,<2.0" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-xdist" version = "3.5.0" description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" optional = false python-versions = ">=3.7" files = [ {file = "pytest-xdist-3.5.0.tar.gz", hash = "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a"}, {file = "pytest_xdist-3.5.0-py3-none-any.whl", hash = "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"}, ] [package.dependencies] execnet = ">=1.1" pytest = ">=6.2.0" [package.extras] psutil = ["psutil (>=3.0)"] setproctitle = ["setproctitle"] testing = ["filelock"] [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.7" files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] [[package]] name = "tox" version = "4.14.1" description = "tox is a generic virtualenv management and test command line tool" optional = false python-versions = ">=3.8" files = [ {file = "tox-4.14.1-py3-none-any.whl", hash = "sha256:b03754b6ee6dadc70f2611da82b4ed8f625fcafd247e15d1d0cb056f90a06d3b"}, {file = "tox-4.14.1.tar.gz", hash = "sha256:f0ad758c3bbf7e237059c929d3595479363c3cdd5a06ac3e49d1dd020ffbee45"}, ] [package.dependencies] cachetools = ">=5.3.2" chardet = ">=5.2" colorama = ">=0.4.6" filelock = ">=3.13.1" packaging = ">=23.2" platformdirs = ">=4.1" pluggy = ">=1.3" pyproject-api = ">=1.6.1" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} virtualenv = ">=20.25" [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-argparse-cli (>=1.11.1)", "sphinx-autodoc-typehints (>=1.25.2)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.11)"] testing = ["build[virtualenv] (>=1.0.3)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.2)", "devpi-process (>=1)", "diff-cover (>=8.0.2)", "distlib (>=0.3.8)", "flaky (>=3.7)", "hatch-vcs (>=0.4)", "hatchling (>=1.21)", "psutil (>=5.9.7)", "pytest (>=7.4.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-xdist (>=3.5)", "re-assert (>=1.1)", "time-machine (>=2.13)", "wheel (>=0.42)"] [[package]] name = "types-setuptools" version = "69.1.0.20240310" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ {file = "types-setuptools-69.1.0.20240310.tar.gz", hash = "sha256:fc0e1082f55c974611bce844b1e5beb2d1a895501f4a464e48305592a4268100"}, {file = "types_setuptools-69.1.0.20240310-py3-none-any.whl", hash = "sha256:7801245ecaf371d24f1154924c8f1f0efdc53977339bf79886b5b10890af6478"}, ] [[package]] name = "typing-extensions" version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, ] [[package]] name = "virtualenv" version = "20.25.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ {file = "virtualenv-20.25.1-py3-none-any.whl", hash = "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a"}, {file = "virtualenv-20.25.1.tar.gz", hash = "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197"}, ] [package.dependencies] distlib = ">=0.3.7,<1" filelock = ">=3.12.2,<4" platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.8" content-hash = "b40d47067f444deec4964404014795593f1b602f8a2f6376279bb5a27d5e18be" pytest-bdd-7.1.2/pyproject.toml000066400000000000000000000051551457564257700165500ustar00rootroot00000000000000[tool.poetry] name = "pytest-bdd" version = "7.1.2" description = "BDD for pytest" authors = ["Oleg Pidsadnyi ", "Anatoly Bubenkov "] maintainers = ["Alessio Bogon <778703+youtux@users.noreply.github.com>"] license = "MIT" readme = "README.rst" homepage = "https://pytest-bdd.readthedocs.io/" documentation = "https://pytest-bdd.readthedocs.io/" repository = "https://github.com/pytest-dev/pytest-bdd" classifiers = [ "Development Status :: 6 - Mature", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ] [tool.poetry.plugins."pytest11"] "pytest-bdd" = "pytest_bdd.plugin" [tool.poetry.scripts] "pytest-bdd" = "pytest_bdd.scripts:main" [tool.poetry.dependencies] python = ">=3.8" Mako = "*" parse = "*" parse-type = "*" pytest = ">=6.2.0" typing-extensions = "*" packaging = "*" [tool.poetry.group.dev.dependencies] tox = ">=4.11.3" mypy = ">=1.6.0" types-setuptools = ">=68.2.0.0" pytest-xdist = ">=3.3.1" coverage = {extras = ["toml"], version = ">=6.5.0"} Pygments = ">=2.13.0" # for code-block highlighting [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.black] line-length = 120 target-version = ["py38", "py39", "py310", "py311", "py312"] [tool.isort] profile = "black" line_length = 120 multi_line_output = 3 [tool.coverage.report] exclude_lines = [ "if TYPE_CHECKING:", "if typing\\.TYPE_CHECKING:", ] [tool.coverage.html] show_contexts = true [tool.coverage.run] branch = true # `parallel` will cause each tox env to put data into a different file, so that we can combine them later parallel = true source = ["pytest_bdd", "tests"] dynamic_context = "test_function" [tool.coverage.paths] # treat these directories as the same when combining # the first item is going to be the canonical dir source = [ "src/pytest_bdd", ".tox/*/lib/python*/site-packages/pytest_bdd", ] [tool.mypy] python_version = "3.8" warn_return_any = true warn_unused_configs = true files = "src/pytest_bdd/**/*.py" [[tool.mypy.overrides]] module = ["parse", "parse_type"] ignore_missing_imports = true pytest-bdd-7.1.2/pytest.ini000066400000000000000000000000661457564257700156610ustar00rootroot00000000000000[pytest] testpaths = tests filterwarnings = error pytest-bdd-7.1.2/src/000077500000000000000000000000001457564257700144155ustar00rootroot00000000000000pytest-bdd-7.1.2/src/pytest_bdd/000077500000000000000000000000001457564257700165565ustar00rootroot00000000000000pytest-bdd-7.1.2/src/pytest_bdd/__init__.py000066400000000000000000000003601457564257700206660ustar00rootroot00000000000000"""pytest-bdd public API.""" from __future__ import annotations from pytest_bdd.scenario import scenario, scenarios from pytest_bdd.steps import given, step, then, when __all__ = ["given", "when", "step", "then", "scenario", "scenarios"] pytest-bdd-7.1.2/src/pytest_bdd/compat.py000066400000000000000000000047071457564257700204230ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Sequence from importlib.metadata import version from typing import Any from _pytest.fixtures import FixtureDef, FixtureManager, FixtureRequest from _pytest.nodes import Node from packaging.version import parse as parse_version pytest_version = parse_version(version("pytest")) __all__ = ["getfixturedefs", "inject_fixture"] if pytest_version.release >= (8, 1): def getfixturedefs(fixturemanager: FixtureManager, fixturename: str, node: Node) -> Sequence[FixtureDef] | None: return fixturemanager.getfixturedefs(fixturename, node) def inject_fixture(request: FixtureRequest, arg: str, value: Any) -> None: """Inject fixture into pytest fixture request. :param request: pytest fixture request :param arg: argument name :param value: argument value """ request._fixturemanager._register_fixture( name=arg, func=lambda: value, nodeid=request.node.nodeid, ) else: def getfixturedefs(fixturemanager: FixtureManager, fixturename: str, node: Node) -> Sequence[FixtureDef] | None: return fixturemanager.getfixturedefs(fixturename, node.nodeid) def inject_fixture(request: FixtureRequest, arg: str, value: Any) -> None: """Inject fixture into pytest fixture request. :param request: pytest fixture request :param arg: argument name :param value: argument value """ fd = FixtureDef( fixturemanager=request._fixturemanager, baseid=None, argname=arg, func=lambda: value, scope="function", params=None, ) fd.cached_result = (value, 0, None) old_fd = request._fixture_defs.get(arg) add_fixturename = arg not in request.fixturenames def fin() -> None: request._fixturemanager._arg2fixturedefs[arg].remove(fd) if old_fd is not None: request._fixture_defs[arg] = old_fd if add_fixturename: request._pyfuncitem._fixtureinfo.names_closure.remove(arg) request.addfinalizer(fin) # inject fixture definition request._fixturemanager._arg2fixturedefs.setdefault(arg, []).append(fd) # inject fixture value in request cache request._fixture_defs[arg] = fd if add_fixturename: request._pyfuncitem._fixtureinfo.names_closure.append(arg) pytest-bdd-7.1.2/src/pytest_bdd/cucumber_json.py000066400000000000000000000124341457564257700217720ustar00rootroot00000000000000"""Cucumber json output formatter.""" from __future__ import annotations import json import math import os import time import typing if typing.TYPE_CHECKING: from typing import Any from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.reports import TestReport from _pytest.terminal import TerminalReporter def add_options(parser: Parser) -> None: """Add pytest-bdd options.""" group = parser.getgroup("bdd", "Cucumber JSON") group.addoption( "--cucumberjson", "--cucumber-json", action="store", dest="cucumber_json_path", metavar="path", default=None, help="create cucumber json style report file at given path.", ) def configure(config: Config) -> None: cucumber_json_path = config.option.cucumber_json_path # prevent opening json log on worker nodes (xdist) if cucumber_json_path and not hasattr(config, "workerinput"): config._bddcucumberjson = LogBDDCucumberJSON(cucumber_json_path) config.pluginmanager.register(config._bddcucumberjson) def unconfigure(config: Config) -> None: xml = getattr(config, "_bddcucumberjson", None) if xml is not None: del config._bddcucumberjson config.pluginmanager.unregister(xml) class LogBDDCucumberJSON: """Logging plugin for cucumber like json output.""" def __init__(self, logfile: str) -> None: logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.features: dict[str, dict] = {} def _get_result(self, step: dict[str, Any], report: TestReport, error_message: bool = False) -> dict[str, Any]: """Get scenario test run result. :param step: `Step` step we get result for :param report: pytest `Report` object :return: `dict` in form {"status": "", ["error_message": ""]} """ result: dict[str, Any] = {} if report.passed or not step["failed"]: # ignore setup/teardown result = {"status": "passed"} elif report.failed: result = {"status": "failed", "error_message": str(report.longrepr) if error_message else ""} elif report.skipped: result = {"status": "skipped"} result["duration"] = int(math.floor((10**9) * step["duration"])) # nanosec return result def _serialize_tags(self, item: dict[str, Any]) -> list[dict[str, Any]]: """Serialize item's tags. :param item: json-serialized `Scenario` or `Feature`. :return: `list` of `dict` in the form of: [ { "name": "", "line": 2, } ] """ return [{"name": tag, "line": item["line_number"] - 1} for tag in item["tags"]] def pytest_runtest_logreport(self, report: TestReport) -> None: try: scenario = report.scenario except AttributeError: # skip reporting for non-bdd tests return if not scenario["steps"] or report.when != "call": # skip if there isn't a result or scenario has no steps return def stepmap(step: dict[str, Any]) -> dict[str, Any]: error_message = False if step["failed"] and not scenario.setdefault("failed", False): scenario["failed"] = True error_message = True step_name = step["name"] return { "keyword": step["keyword"], "name": step_name, "line": step["line_number"], "match": {"location": ""}, "result": self._get_result(step, report, error_message), } if scenario["feature"]["filename"] not in self.features: self.features[scenario["feature"]["filename"]] = { "keyword": "Feature", "uri": scenario["feature"]["rel_filename"], "name": scenario["feature"]["name"] or scenario["feature"]["rel_filename"], "id": scenario["feature"]["rel_filename"].lower().replace(" ", "-"), "line": scenario["feature"]["line_number"], "description": scenario["feature"]["description"], "tags": self._serialize_tags(scenario["feature"]), "elements": [], } self.features[scenario["feature"]["filename"]]["elements"].append( { "keyword": "Scenario", "id": report.item["name"], "name": scenario["name"], "line": scenario["line_number"], "description": "", "tags": self._serialize_tags(scenario), "type": "scenario", "steps": [stepmap(step) for step in scenario["steps"]], } ) def pytest_sessionstart(self) -> None: self.suite_start_time = time.time() def pytest_sessionfinish(self) -> None: with open(self.logfile, "w", encoding="utf-8") as logfile: logfile.write(json.dumps(list(self.features.values()))) def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: terminalreporter.write_sep("-", f"generated json file: {self.logfile}") pytest-bdd-7.1.2/src/pytest_bdd/exceptions.py000066400000000000000000000014621457564257700213140ustar00rootroot00000000000000"""pytest-bdd Exceptions.""" from __future__ import annotations class ScenarioIsDecoratorOnly(Exception): """Scenario can be only used as decorator.""" class ScenarioValidationError(Exception): """Base class for scenario validation.""" class ScenarioNotFound(ScenarioValidationError): """Scenario Not Found.""" class ExamplesNotValidError(ScenarioValidationError): """Example table is not valid.""" class StepDefinitionNotFoundError(Exception): """Step definition not found.""" class NoScenariosFound(Exception): """No scenarios found.""" class FeatureError(Exception): """Feature parse error.""" message = "{0}.\nLine number: {1}.\nLine: {2}.\nFile: {3}" def __str__(self) -> str: """String representation.""" return self.message.format(*self.args) pytest-bdd-7.1.2/src/pytest_bdd/feature.py000066400000000000000000000053211457564257700205640ustar00rootroot00000000000000"""Feature. The way of describing the behavior is based on Gherkin language, but a very limited version. It doesn't support any parameter tables. If the parametrization is needed to generate more test cases it can be done on the fixture level of the pytest. The syntax can be used here to make a connection between steps and it will also validate the parameters mentioned in the steps with ones provided in the pytest parametrization table. Syntax example: Feature: Articles Scenario: Publishing the article Given I'm an author user And I have an article When I go to the article page And I press the publish button Then I should not see the error message And the article should be published # Note: will query the database :note: The "#" symbol is used for comments. :note: There are no multiline steps, the description of the step must fit in one line. """ from __future__ import annotations import glob import os.path from .parser import Feature, parse_feature # Global features dictionary features: dict[str, Feature] = {} def get_feature(base_path: str, filename: str, encoding: str = "utf-8") -> Feature: """Get a feature by the filename. :param str base_path: Base feature directory. :param str filename: Filename of the feature file. :param str encoding: Feature file encoding. :return: `Feature` instance from the parsed feature cache. :note: The features are parsed on the execution of the test and stored in the global variable cache to improve the performance when multiple scenarios are referencing the same file. """ __tracebackhide__ = True full_name = os.path.abspath(os.path.join(base_path, filename)) feature = features.get(full_name) if not feature: feature = parse_feature(base_path, filename, encoding=encoding) features[full_name] = feature return feature def get_features(paths: list[str], **kwargs) -> list[Feature]: """Get features for given paths. :param list paths: `list` of paths (file or dirs) :return: `list` of `Feature` objects. """ seen_names = set() features = [] for path in paths: if path not in seen_names: seen_names.add(path) if os.path.isdir(path): features.extend( get_features(glob.iglob(os.path.join(path, "**", "*.feature"), recursive=True), **kwargs) ) else: base, name = os.path.split(path) feature = get_feature(base, name, **kwargs) features.append(feature) features.sort(key=lambda feature: feature.name or feature.filename) return features pytest-bdd-7.1.2/src/pytest_bdd/generation.py000066400000000000000000000154541457564257700212740ustar00rootroot00000000000000"""pytest-bdd missing test code generation.""" from __future__ import annotations import itertools import os.path from typing import TYPE_CHECKING, cast from _pytest._io import TerminalWriter from mako.lookup import TemplateLookup from .compat import getfixturedefs from .feature import get_features from .scenario import inject_fixturedefs_for_step, make_python_docstring, make_python_name, make_string_literal from .steps import get_step_fixture_name from .types import STEP_TYPES if TYPE_CHECKING: from typing import Any, Sequence from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureDef, FixtureManager from _pytest.main import Session from _pytest.python import Function from .parser import Feature, ScenarioTemplate, Step template_lookup = TemplateLookup(directories=[os.path.join(os.path.dirname(__file__), "templates")]) def add_options(parser: Parser) -> None: """Add pytest-bdd options.""" group = parser.getgroup("bdd", "Generation") group._addoption( "--generate-missing", action="store_true", dest="generate_missing", default=False, help="Generate missing bdd test code for given feature files and exit.", ) group._addoption( "--feature", metavar="FILE_OR_DIR", action="append", dest="features", help="Feature file or directory to generate missing code for. Multiple allowed.", ) def cmdline_main(config: Config) -> int | None: """Check config option to show missing code.""" if config.option.generate_missing: return show_missing_code(config) return None # Make mypy happy def generate_code(features: list[Feature], scenarios: list[ScenarioTemplate], steps: list[Step]) -> str: """Generate test code for the given filenames.""" grouped_steps = group_steps(steps) template = template_lookup.get_template("test.py.mak") code = template.render( features=features, scenarios=scenarios, steps=grouped_steps, make_python_name=make_python_name, make_python_docstring=make_python_docstring, make_string_literal=make_string_literal, ) return cast(str, code) def show_missing_code(config: Config) -> int: """Wrap pytest session to show missing code.""" from _pytest.main import wrap_session return wrap_session(config, _show_missing_code_main) def print_missing_code(scenarios: list[ScenarioTemplate], steps: list[Step]) -> None: """Print missing code with TerminalWriter.""" tw = TerminalWriter() scenario = step = None for scenario in scenarios: tw.line() tw.line( 'Scenario "{scenario.name}" is not bound to any test in the feature "{scenario.feature.name}"' " in the file {scenario.feature.filename}:{scenario.line_number}".format(scenario=scenario), red=True, ) if scenario: tw.sep("-", red=True) for step in steps: tw.line() if step.scenario is not None: tw.line( """Step {step} is not defined in the scenario "{step.scenario.name}" in the feature""" """ "{step.scenario.feature.name}" in the file""" """ {step.scenario.feature.filename}:{step.line_number}""".format(step=step), red=True, ) elif step.background is not None: tw.line( """Step {step} is not defined in the background of the feature""" """ "{step.background.feature.name}" in the file""" """ {step.background.feature.filename}:{step.line_number}""".format(step=step), red=True, ) if step: tw.sep("-", red=True) tw.line("Please place the code above to the test file(s):") tw.line() features = sorted( (scenario.feature for scenario in scenarios), key=lambda feature: feature.name or feature.filename ) code = generate_code(features, scenarios, steps) tw.write(code) def _find_step_fixturedef( fixturemanager: FixtureManager, item: Function, step: Step ) -> Sequence[FixtureDef[Any]] | None: """Find step fixturedef.""" with inject_fixturedefs_for_step(step=step, fixturemanager=fixturemanager, node=item): bdd_name = get_step_fixture_name(step=step) return getfixturedefs(fixturemanager, bdd_name, item) def parse_feature_files(paths: list[str], **kwargs: Any) -> tuple[list[Feature], list[ScenarioTemplate], list[Step]]: """Parse feature files of given paths. :param paths: `list` of paths (file or dirs) :return: `list` of `tuple` in form: (`list` of `Feature` objects, `list` of `Scenario` objects, `list` of `Step` objects). """ features = get_features(paths, **kwargs) scenarios = sorted( itertools.chain.from_iterable(feature.scenarios.values() for feature in features), key=lambda scenario: (scenario.feature.name or scenario.feature.filename, scenario.name), ) steps = sorted((step for scenario in scenarios for step in scenario.steps), key=lambda step: step.name) return features, scenarios, steps def group_steps(steps: list[Step]) -> list[Step]: """Group steps by type.""" steps = sorted(steps, key=lambda step: step.type) seen_steps = set() grouped_steps = [] for step in itertools.chain.from_iterable( sorted(group, key=lambda step: step.name) for _, group in itertools.groupby(steps, lambda step: step.type) ): if step.name not in seen_steps: grouped_steps.append(step) seen_steps.add(step.name) grouped_steps.sort(key=lambda step: STEP_TYPES.index(step.type)) return grouped_steps def _show_missing_code_main(config: Config, session: Session) -> None: """Preparing fixture duplicates for output.""" tw = TerminalWriter() session.perform_collect() fm = session._fixturemanager if config.option.features is None: tw.line("The --feature parameter is required.", red=True) session.exitstatus = 100 return features, scenarios, steps = parse_feature_files(config.option.features) for item in session.items: if scenario := getattr(item.obj, "__scenario__", None): if scenario in scenarios: scenarios.remove(scenario) for step in scenario.steps: if _find_step_fixturedef(fm, item, step=step): try: steps.remove(step) except ValueError: pass for scenario in scenarios: for step in scenario.steps: if step.background is None: steps.remove(step) grouped_steps = group_steps(steps) print_missing_code(scenarios, grouped_steps) if scenarios or steps: session.exitstatus = 100 pytest-bdd-7.1.2/src/pytest_bdd/gherkin_terminal_reporter.py000066400000000000000000000073111457564257700243760ustar00rootroot00000000000000from __future__ import annotations import typing from _pytest.terminal import TerminalReporter if typing.TYPE_CHECKING: from typing import Any from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.reports import TestReport def add_options(parser: Parser) -> None: group = parser.getgroup("terminal reporting", "reporting", after="general") group._addoption( "--gherkin-terminal-reporter", action="store_true", dest="gherkin_terminal_reporter", default=False, help="enable gherkin output", ) def configure(config: Config) -> None: if config.option.gherkin_terminal_reporter: # Get the standard terminal reporter plugin and replace it with our current_reporter = config.pluginmanager.getplugin("terminalreporter") if current_reporter.__class__ != TerminalReporter: raise Exception( "gherkin-terminal-reporter is not compatible with any other terminal reporter." "You can use only one terminal reporter." "Currently '{0}' is used." "Please decide to use one by deactivating {0} or gherkin-terminal-reporter.".format( current_reporter.__class__ ) ) gherkin_reporter = GherkinTerminalReporter(config) config.pluginmanager.unregister(current_reporter) config.pluginmanager.register(gherkin_reporter, "terminalreporter") if config.pluginmanager.getplugin("dsession"): raise Exception("gherkin-terminal-reporter is not compatible with 'xdist' plugin.") class GherkinTerminalReporter(TerminalReporter): def __init__(self, config: Config) -> None: super().__init__(config) def pytest_runtest_logreport(self, report: TestReport) -> Any: rep = report res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return None if isinstance(word, tuple): word, word_markup = word elif rep.passed: word_markup = {"green": True} elif rep.failed: word_markup = {"red": True} elif rep.skipped: word_markup = {"yellow": True} feature_markup = {"blue": True} scenario_markup = word_markup if self.verbosity <= 0 or not hasattr(report, "scenario"): return super().pytest_runtest_logreport(rep) if self.verbosity == 1: self.ensure_newline() self._tw.write("Feature: ", **feature_markup) self._tw.write(report.scenario["feature"]["name"], **feature_markup) self._tw.write("\n") self._tw.write(" Scenario: ", **scenario_markup) self._tw.write(report.scenario["name"], **scenario_markup) self._tw.write(" ") self._tw.write(word, **word_markup) self._tw.write("\n") elif self.verbosity > 1: self.ensure_newline() self._tw.write("Feature: ", **feature_markup) self._tw.write(report.scenario["feature"]["name"], **feature_markup) self._tw.write("\n") self._tw.write(" Scenario: ", **scenario_markup) self._tw.write(report.scenario["name"], **scenario_markup) self._tw.write("\n") for step in report.scenario["steps"]: self._tw.write(f" {step['keyword']} {step['name']}\n", **scenario_markup) self._tw.write(f" {word}", **word_markup) self._tw.write("\n\n") self.stats.setdefault(cat, []).append(rep) return None pytest-bdd-7.1.2/src/pytest_bdd/hooks.py000066400000000000000000000025151457564257700202560ustar00rootroot00000000000000from __future__ import annotations import pytest """Pytest-bdd pytest hooks.""" def pytest_bdd_before_scenario(request, feature, scenario): """Called before scenario is executed.""" def pytest_bdd_after_scenario(request, feature, scenario): """Called after scenario is executed.""" def pytest_bdd_before_step(request, feature, scenario, step, step_func): """Called before step function is set up.""" def pytest_bdd_before_step_call(request, feature, scenario, step, step_func, step_func_args): """Called before step function is executed.""" def pytest_bdd_after_step(request, feature, scenario, step, step_func, step_func_args): """Called after step function is successfully executed.""" def pytest_bdd_step_error(request, feature, scenario, step, step_func, step_func_args, exception): """Called when step function failed to execute.""" def pytest_bdd_step_func_lookup_error(request, feature, scenario, step, exception): """Called when step lookup failed.""" @pytest.hookspec(firstresult=True) def pytest_bdd_apply_tag(tag, function): """Apply a tag (from a ``.feature`` file) to the given scenario. The default implementation does the equivalent of ``getattr(pytest.mark, tag)(function)``, but you can override this hook and return ``True`` to do more sophisticated handling of tags. """ pytest-bdd-7.1.2/src/pytest_bdd/parser.py000066400000000000000000000321671457564257700204350ustar00rootroot00000000000000from __future__ import annotations import os.path import re import textwrap import typing from collections import OrderedDict from dataclasses import dataclass, field from functools import cached_property from typing import cast from . import exceptions, types SPLIT_LINE_RE = re.compile(r"(?") COMMENT_RE = re.compile(r"(^|(?<=\s))#") STEP_PREFIXES = [ ("Feature: ", types.FEATURE), ("Scenario Outline: ", types.SCENARIO_OUTLINE), ("Examples:", types.EXAMPLES), ("Scenario: ", types.SCENARIO), ("Background:", types.BACKGROUND), ("Given ", types.GIVEN), ("When ", types.WHEN), ("Then ", types.THEN), ("@", types.TAG), # Continuation of the previously mentioned step type ("And ", None), ("But ", None), ] TYPES_WITH_DESCRIPTIONS = [types.FEATURE, types.SCENARIO, types.SCENARIO_OUTLINE] if typing.TYPE_CHECKING: from typing import Any, Iterable, Mapping, Match, Sequence def split_line(line: str) -> list[str]: """Split the given Examples line. :param str|unicode line: Feature file Examples line. :return: List of strings. """ return [cell.replace("\\|", "|").strip() for cell in SPLIT_LINE_RE.split(line)[1:-1]] def parse_line(line: str) -> tuple[str, str]: """Parse step line to get the step prefix (Scenario, Given, When, Then or And) and the actual step name. :param line: Line of the Feature file. :return: `tuple` in form ("", ""). """ for prefix, _ in STEP_PREFIXES: if line.startswith(prefix): return prefix.strip(), line[len(prefix) :].strip() return "", line def strip_comments(line: str) -> str: """Remove comments. :param str line: Line of the Feature file. :return: Stripped line. """ if res := COMMENT_RE.search(line): line = line[: res.start()] return line.strip() def get_step_type(line: str) -> str | None: """Detect step type by the beginning of the line. :param str line: Line of the Feature file. :return: SCENARIO, GIVEN, WHEN, THEN, or `None` if can't be detected. """ for prefix, _type in STEP_PREFIXES: if line.startswith(prefix): return _type return None def parse_feature(basedir: str, filename: str, encoding: str = "utf-8") -> Feature: """Parse the feature file. :param str basedir: Feature files base directory. :param str filename: Relative path to the feature file. :param str encoding: Feature file encoding (utf-8 by default). """ __tracebackhide__ = True abs_filename = os.path.abspath(os.path.join(basedir, filename)) rel_filename = os.path.join(os.path.basename(basedir), filename) feature = Feature( scenarios=OrderedDict(), filename=abs_filename, rel_filename=rel_filename, line_number=1, name=None, tags=set(), background=None, description="", ) scenario: ScenarioTemplate | None = None mode: str | None = None prev_mode = None description: list[str] = [] step = None multiline_step = False prev_line = None with open(abs_filename, encoding=encoding) as f: content = f.read() for line_number, line in enumerate(content.splitlines(), start=1): unindented_line = line.lstrip() line_indent = len(line) - len(unindented_line) if step and (step.indent < line_indent or ((not unindented_line) and multiline_step)): multiline_step = True # multiline step, so just add line and continue step.add_line(line) continue else: step = None multiline_step = False stripped_line = line.strip() clean_line = strip_comments(line) if not clean_line and (not prev_mode or prev_mode not in TYPES_WITH_DESCRIPTIONS): # Blank lines are included in feature and scenario descriptions continue mode = get_step_type(clean_line) or mode allowed_prev_mode = (types.BACKGROUND, types.GIVEN, types.WHEN) if not scenario and prev_mode not in allowed_prev_mode and mode in types.STEP_TYPES: raise exceptions.FeatureError( "Step definition outside of a Scenario or a Background", line_number, clean_line, filename ) if mode == types.FEATURE: if prev_mode is None or prev_mode == types.TAG: _, feature.name = parse_line(clean_line) feature.line_number = line_number feature.tags = get_tags(prev_line) elif prev_mode == types.FEATURE: # Do not include comments in descriptions if not stripped_line.startswith("#"): description.append(clean_line) else: raise exceptions.FeatureError( "Multiple features are not allowed in a single feature file", line_number, clean_line, filename, ) prev_mode = mode # Remove Feature, Given, When, Then, And keyword, parsed_line = parse_line(clean_line) if mode in [types.SCENARIO, types.SCENARIO_OUTLINE]: # Lines between the scenario declaration # and the scenario's first step line # are considered part of the scenario description. if scenario and not keyword: # Do not include comments in descriptions if not stripped_line.startswith("#"): scenario.add_description_line(clean_line) continue tags = get_tags(prev_line) scenario = ScenarioTemplate( feature=feature, name=parsed_line, line_number=line_number, tags=tags, templated=mode == types.SCENARIO_OUTLINE, ) feature.scenarios[parsed_line] = scenario elif mode == types.BACKGROUND: feature.background = Background(feature=feature, line_number=line_number) elif mode == types.EXAMPLES: mode = types.EXAMPLES_HEADERS scenario.examples.line_number = line_number elif mode == types.EXAMPLES_HEADERS: scenario.examples.set_param_names([l for l in split_line(parsed_line) if l]) mode = types.EXAMPLE_LINE elif mode == types.EXAMPLE_LINE: scenario.examples.add_example(list(split_line(stripped_line))) elif mode and mode not in (types.FEATURE, types.TAG): step = Step(name=parsed_line, type=mode, indent=line_indent, line_number=line_number, keyword=keyword) if feature.background and not scenario: feature.background.add_step(step) else: scenario = cast(ScenarioTemplate, scenario) scenario.add_step(step) prev_line = clean_line feature.description = "\n".join(description).strip() return feature @dataclass class Feature: scenarios: OrderedDict[str, ScenarioTemplate] filename: str rel_filename: str name: str | None tags: set[str] background: Background | None line_number: int description: str @dataclass class ScenarioTemplate: """A scenario template. Created when parsing the feature file, it will then be combined with the examples to create a Scenario. """ feature: Feature name: str line_number: int templated: bool tags: set[str] = field(default_factory=set) examples: Examples | None = field(default_factory=lambda: Examples()) _steps: list[Step] = field(init=False, default_factory=list) _description_lines: list[str] = field(init=False, default_factory=list) def add_step(self, step: Step) -> None: step.scenario = self self._steps.append(step) @property def steps(self) -> list[Step]: background = self.feature.background return (background.steps if background else []) + self._steps def render(self, context: Mapping[str, Any]) -> Scenario: background_steps = self.feature.background.steps if self.feature.background else [] if not self.templated: scenario_steps = self._steps else: scenario_steps = [ Step( name=step.render(context), type=step.type, indent=step.indent, line_number=step.line_number, keyword=step.keyword, ) for step in self._steps ] steps = background_steps + scenario_steps return Scenario( feature=self.feature, name=self.name, line_number=self.line_number, steps=steps, tags=self.tags, description=self._description_lines, ) def add_description_line(self, description_line): """Add a description line to the scenario. :param str description_line: """ self._description_lines.append(description_line) @property def description(self): """Get the scenario's description. :return: The scenario description """ return "\n".join(self._description_lines) @dataclass class Scenario: feature: Feature name: str line_number: int steps: list[Step] tags: set[str] = field(default_factory=set) description: list[str] = field(default_factory=list) @dataclass class Step: type: str _name: str line_number: int indent: int keyword: str failed: bool = field(init=False, default=False) scenario: ScenarioTemplate | None = field(init=False, default=None) background: Background | None = field(init=False, default=None) lines: list[str] = field(init=False, default_factory=list) def __init__(self, name: str, type: str, indent: int, line_number: int, keyword: str) -> None: self.name = name self.type = type self.indent = indent self.line_number = line_number self.keyword = keyword self.failed = False self.scenario = None self.background = None self.lines = [] def add_line(self, line: str) -> None: """Add line to the multiple step. :param str line: Line of text - the continuation of the step name. """ self.lines.append(line) self._invalidate_full_name_cache() @cached_property def full_name(self) -> str: multilines_content = textwrap.dedent("\n".join(self.lines)) if self.lines else "" # Remove the multiline quotes, if present. multilines_content = re.sub( pattern=r'^"""\n(?P.*)\n"""$', repl=r"\g", string=multilines_content, flags=re.DOTALL, # Needed to make the "." match also new lines ) lines = [self._name] + [multilines_content] return "\n".join(lines).strip() def _invalidate_full_name_cache(self) -> None: """Invalidate the full_name cache.""" if "full_name" in self.__dict__: del self.full_name @property def name(self) -> str: return self.full_name @name.setter def name(self, value: str) -> None: self._name = value self._invalidate_full_name_cache() def __str__(self) -> str: """Full step name including the type.""" return f'{self.type.capitalize()} "{self.name}"' @property def params(self) -> tuple[str, ...]: return tuple(frozenset(STEP_PARAM_RE.findall(self.name))) def render(self, context: Mapping[str, Any]) -> str: def replacer(m: Match): varname = m.group(1) return str(context[varname]) return STEP_PARAM_RE.sub(replacer, self.name) @dataclass class Background: feature: Feature line_number: int steps: list[Step] = field(init=False, default_factory=list) def add_step(self, step: Step) -> None: """Add step to the background.""" step.background = self self.steps.append(step) @dataclass class Examples: """Example table.""" line_number: int | None = field(default=None) name: str | None = field(default=None) example_params: list[str] = field(init=False, default_factory=list) examples: list[Sequence[str]] = field(init=False, default_factory=list) def set_param_names(self, keys: Iterable[str]) -> None: self.example_params = [str(key) for key in keys] def add_example(self, values: Sequence[str]) -> None: self.examples.append(values) def as_contexts(self) -> Iterable[dict[str, Any]]: if not self.examples: return header, rows = self.example_params, self.examples for row in rows: assert len(header) == len(row) yield dict(zip(header, row)) def __bool__(self) -> bool: return bool(self.examples) def get_tags(line: str | None) -> set[str]: """Get tags out of the given line. :param str line: Feature file text line. :return: List of tags. """ if not line or not line.strip().startswith("@"): return set() return {tag.lstrip("@") for tag in line.strip().split(" @") if len(tag) > 1} pytest-bdd-7.1.2/src/pytest_bdd/parsers.py000066400000000000000000000060621457564257700206130ustar00rootroot00000000000000"""Step parsers.""" from __future__ import annotations import abc import re as base_re from typing import Any, Dict, TypeVar, cast, overload import parse as base_parse from parse_type import cfparse as base_cfparse class StepParser(abc.ABC): """Parser of the individual step.""" def __init__(self, name: str) -> None: self.name = name @abc.abstractmethod def parse_arguments(self, name: str) -> dict[str, Any] | None: """Get step arguments from the given step name. :return: `dict` of step arguments """ ... @abc.abstractmethod def is_matching(self, name: str) -> bool: """Match given name with the step name.""" ... class re(StepParser): """Regex step parser.""" def __init__(self, name: str, *args: Any, **kwargs: Any) -> None: """Compile regex.""" super().__init__(name) self.regex = base_re.compile(self.name, *args, **kwargs) def parse_arguments(self, name: str) -> dict[str, str] | None: """Get step arguments. :return: `dict` of step arguments """ match = self.regex.fullmatch(name) if match is None: return None return match.groupdict() def is_matching(self, name: str) -> bool: """Match given name with the step name.""" return bool(self.regex.fullmatch(name)) class parse(StepParser): """parse step parser.""" def __init__(self, name: str, *args: Any, **kwargs: Any) -> None: """Compile parse expression.""" super().__init__(name) self.parser = base_parse.compile(self.name, *args, **kwargs) def parse_arguments(self, name: str) -> dict[str, Any]: """Get step arguments. :return: `dict` of step arguments """ return cast(Dict[str, Any], self.parser.parse(name).named) def is_matching(self, name: str) -> bool: """Match given name with the step name.""" try: return bool(self.parser.parse(name)) except ValueError: return False class cfparse(parse): """cfparse step parser.""" def __init__(self, name: str, *args: Any, **kwargs: Any) -> None: """Compile parse expression.""" super(parse, self).__init__(name) self.parser = base_cfparse.Parser(self.name, *args, **kwargs) class string(StepParser): """Exact string step parser.""" def parse_arguments(self, name: str) -> dict: """No parameters are available for simple string step. :return: `dict` of step arguments """ return {} def is_matching(self, name: str) -> bool: """Match given name with the step name.""" return self.name == name TStepParser = TypeVar("TStepParser", bound=StepParser) @overload def get_parser(step_name: str) -> string: ... @overload def get_parser(step_name: TStepParser) -> TStepParser: ... def get_parser(step_name: str | StepParser) -> StepParser: """Get parser by given name.""" if isinstance(step_name, StepParser): return step_name return string(step_name) pytest-bdd-7.1.2/src/pytest_bdd/plugin.py000066400000000000000000000073761457564257700204430ustar00rootroot00000000000000"""Pytest plugin entry point. Used for any fixtures needed.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Callable, Generator, TypeVar, cast import pytest from typing_extensions import ParamSpec from . import cucumber_json, generation, gherkin_terminal_reporter, given, reporting, then, when from .utils import CONFIG_STACK if TYPE_CHECKING: from _pytest.config import Config, PytestPluginManager from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureRequest from _pytest.nodes import Item from _pytest.runner import CallInfo from pluggy._result import _Result from .parser import Feature, Scenario, Step P = ParamSpec("P") T = TypeVar("T") def pytest_addhooks(pluginmanager: PytestPluginManager) -> None: """Register plugin hooks.""" from pytest_bdd import hooks pluginmanager.add_hookspecs(hooks) @given("trace") @when("trace") @then("trace") def _() -> None: """Enter pytest's pdb trace.""" pytest.set_trace() @pytest.fixture def _pytest_bdd_example() -> dict: """The current scenario outline parametrization. This is used internally by pytest_bdd. If no outline is used, we just return an empty dict to render the current template without any actual variable. Otherwise pytest_bdd will add all the context variables in this fixture from the example definitions in the feature file. """ return {} def pytest_addoption(parser: Parser) -> None: """Add pytest-bdd options.""" add_bdd_ini(parser) cucumber_json.add_options(parser) generation.add_options(parser) gherkin_terminal_reporter.add_options(parser) def add_bdd_ini(parser: Parser) -> None: parser.addini("bdd_features_base_dir", "Base features directory.") @pytest.hookimpl(trylast=True) def pytest_configure(config: Config) -> None: """Configure all subplugins.""" CONFIG_STACK.append(config) cucumber_json.configure(config) gherkin_terminal_reporter.configure(config) def pytest_unconfigure(config: Config) -> None: """Unconfigure all subplugins.""" if CONFIG_STACK: CONFIG_STACK.pop() cucumber_json.unconfigure(config) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item: Item, call: CallInfo) -> Generator[None, _Result, None]: outcome = yield reporting.runtest_makereport(item, call, outcome.get_result()) @pytest.hookimpl(tryfirst=True) def pytest_bdd_before_scenario(request: FixtureRequest, feature: Feature, scenario: Scenario) -> None: reporting.before_scenario(request, feature, scenario) @pytest.hookimpl(tryfirst=True) def pytest_bdd_step_error( request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any], step_func_args: dict, exception: Exception, ) -> None: reporting.step_error(request, feature, scenario, step, step_func, step_func_args, exception) @pytest.hookimpl(tryfirst=True) def pytest_bdd_before_step( request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any], ) -> None: reporting.before_step(request, feature, scenario, step, step_func) @pytest.hookimpl(tryfirst=True) def pytest_bdd_after_step( request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any], step_func_args: dict[str, Any], ) -> None: reporting.after_step(request, feature, scenario, step, step_func, step_func_args) def pytest_cmdline_main(config: Config) -> int | None: return generation.cmdline_main(config) def pytest_bdd_apply_tag(tag: str, function: Callable[P, T]) -> Callable[P, T]: mark = getattr(pytest.mark, tag) marked = mark(function) return cast(Callable[P, T], marked) pytest-bdd-7.1.2/src/pytest_bdd/py.typed000066400000000000000000000000001457564257700202430ustar00rootroot00000000000000pytest-bdd-7.1.2/src/pytest_bdd/reporting.py000066400000000000000000000124021457564257700211400ustar00rootroot00000000000000"""Reporting functionality. Collection of the scenario execution statuses, timing and other information that enriches the pytest test reporting. """ from __future__ import annotations import time from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Any, Callable from _pytest.fixtures import FixtureRequest from _pytest.nodes import Item from _pytest.reports import TestReport from _pytest.runner import CallInfo from .parser import Feature, Scenario, Step class StepReport: """Step execution report.""" failed = False stopped = None def __init__(self, step: Step) -> None: """Step report constructor. :param pytest_bdd.parser.Step step: Step. """ self.step = step self.started = time.perf_counter() def serialize(self) -> dict[str, Any]: """Serialize the step execution report. :return: Serialized step execution report. :rtype: dict """ return { "name": self.step.name, "type": self.step.type, "keyword": self.step.keyword, "line_number": self.step.line_number, "failed": self.failed, "duration": self.duration, } def finalize(self, failed: bool) -> None: """Stop collecting information and finalize the report. :param bool failed: Whether the step execution is failed. """ self.stopped = time.perf_counter() self.failed = failed @property def duration(self) -> float: """Step execution duration. :return: Step execution duration. :rtype: float """ if self.stopped is None: return 0 return self.stopped - self.started class ScenarioReport: """Scenario execution report.""" def __init__(self, scenario: Scenario) -> None: """Scenario report constructor. :param pytest_bdd.parser.Scenario scenario: Scenario. :param node: pytest test node object """ self.scenario: Scenario = scenario self.step_reports: list[StepReport] = [] @property def current_step_report(self) -> StepReport: """Get current step report. :return: Last or current step report. :rtype: pytest_bdd.reporting.StepReport """ return self.step_reports[-1] def add_step_report(self, step_report: StepReport) -> None: """Add new step report. :param step_report: New current step report. :type step_report: pytest_bdd.reporting.StepReport """ self.step_reports.append(step_report) def serialize(self) -> dict[str, Any]: """Serialize scenario execution report in order to transfer reporting from nodes in the distributed mode. :return: Serialized report. :rtype: dict """ scenario = self.scenario feature = scenario.feature return { "steps": [step_report.serialize() for step_report in self.step_reports], "name": scenario.name, "line_number": scenario.line_number, "tags": sorted(scenario.tags), "feature": { "name": feature.name, "filename": feature.filename, "rel_filename": feature.rel_filename, "line_number": feature.line_number, "description": feature.description, "tags": sorted(feature.tags), }, } def fail(self) -> None: """Stop collecting information and finalize the report as failed.""" self.current_step_report.finalize(failed=True) remaining_steps = self.scenario.steps[len(self.step_reports) :] # Fail the rest of the steps and make reports. for step in remaining_steps: report = StepReport(step=step) report.finalize(failed=True) self.add_step_report(report) def runtest_makereport(item: Item, call: CallInfo, rep: TestReport) -> None: """Store item in the report object.""" try: scenario_report: ScenarioReport = item.__scenario_report__ except AttributeError: pass else: rep.scenario = scenario_report.serialize() rep.item = {"name": item.name} def before_scenario(request: FixtureRequest, feature: Feature, scenario: Scenario) -> None: """Create scenario report for the item.""" request.node.__scenario_report__ = ScenarioReport(scenario=scenario) def step_error( request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any], step_func_args: dict, exception: Exception, ) -> None: """Finalize the step report as failed.""" request.node.__scenario_report__.fail() def before_step( request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any], ) -> None: """Store step start time.""" request.node.__scenario_report__.add_step_report(StepReport(step=step)) def after_step( request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, step_func: Callable, step_func_args: dict, ) -> None: """Finalize the step report as successful.""" request.node.__scenario_report__.current_step_report.finalize(failed=False) pytest-bdd-7.1.2/src/pytest_bdd/scenario.py000066400000000000000000000362561457564257700207470ustar00rootroot00000000000000"""Scenario implementation. The pytest will collect the test case and the steps will be executed line by line. Example: test_publish_article = scenario( feature_name="publish_article.feature", scenario_name="Publishing the article", ) """ from __future__ import annotations import contextlib import logging import os import re from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, TypeVar, cast import pytest from _pytest.fixtures import FixtureDef, FixtureManager, FixtureRequest, call_fixture_func from typing_extensions import ParamSpec from . import exceptions from .compat import getfixturedefs, inject_fixture from .feature import get_feature, get_features from .steps import StepFunctionContext, get_step_fixture_name from .utils import CONFIG_STACK, get_args, get_caller_module_locals, get_caller_module_path if TYPE_CHECKING: from _pytest.mark.structures import ParameterSet from _pytest.nodes import Node from .parser import Feature, Scenario, ScenarioTemplate, Step P = ParamSpec("P") T = TypeVar("T") logger = logging.getLogger(__name__) PYTHON_REPLACE_REGEX = re.compile(r"\W") ALPHA_REGEX = re.compile(r"^\d+_*") def find_fixturedefs_for_step(step: Step, fixturemanager: FixtureManager, node: Node) -> Iterable[FixtureDef[Any]]: """Find the fixture defs that can parse a step.""" # happens to be that _arg2fixturedefs is changed during the iteration so we use a copy fixture_def_by_name = list(fixturemanager._arg2fixturedefs.items()) for fixturename, fixturedefs in fixture_def_by_name: for pos, fixturedef in enumerate(fixturedefs): step_func_context = getattr(fixturedef.func, "_pytest_bdd_step_context", None) if step_func_context is None: continue if step_func_context.type is not None and step_func_context.type != step.type: continue match = step_func_context.parser.is_matching(step.name) if not match: continue fixturedefs = getfixturedefs(fixturemanager, fixturename, node) if fixturedef not in (fixturedefs or []): continue yield fixturedef # Function copied from pytest 8.0 (removed in later versions). def iterparentnodeids(nodeid: str) -> Iterator[str]: """Return the parent node IDs of a given node ID, inclusive. For the node ID "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" the result would be "" "testing" "testing/code" "testing/code/test_excinfo.py" "testing/code/test_excinfo.py::TestFormattedExcinfo" "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" Note that / components are only considered until the first ::. """ SEP = "/" pos = 0 first_colons: Optional[int] = nodeid.find("::") if first_colons == -1: first_colons = None # The root Session node - always present. yield "" # Eagerly consume SEP parts until first colons. while True: at = nodeid.find(SEP, pos, first_colons) if at == -1: break if at > 0: yield nodeid[:at] pos = at + len(SEP) # Eagerly consume :: parts. while True: at = nodeid.find("::", pos) if at == -1: break if at > 0: yield nodeid[:at] pos = at + len("::") # The node ID itself. if nodeid: yield nodeid @contextlib.contextmanager def inject_fixturedefs_for_step(step: Step, fixturemanager: FixtureManager, node: Node) -> Iterator[None]: """Inject fixture definitions that can parse a step. We fist iterate over all the fixturedefs that can parse the step. Then we sort them by their "path" (list of parent IDs) so that we respect the fixture scoping rules. Finally, we inject them into the request. """ bdd_name = get_step_fixture_name(step=step) fixturedefs = list(find_fixturedefs_for_step(step=step, fixturemanager=fixturemanager, node=node)) # Sort the fixture definitions by their "path", so that the `bdd_name` fixture will # respect the fixture scope def get_fixture_path(fixture_def: FixtureDef) -> list[str]: return list(iterparentnodeids(fixture_def.baseid)) fixturedefs.sort(key=lambda x: get_fixture_path(x)) if not fixturedefs: yield return logger.debug("Adding providers for fixture %r: %r", bdd_name, fixturedefs) fixturemanager._arg2fixturedefs[bdd_name] = fixturedefs try: yield finally: del fixturemanager._arg2fixturedefs[bdd_name] def get_step_function(request, step: Step) -> StepFunctionContext | None: """Get the step function (context) for the given step. We first figure out what's the step fixture name that we have to inject. Then we let `patch_argumented_step_functions` find out what step definition fixtures can parse the current step, and it will inject them for the step fixture name. Finally we let request.getfixturevalue(...) fetch the step definition fixture. """ __tracebackhide__ = True bdd_name = get_step_fixture_name(step=step) with inject_fixturedefs_for_step(step=step, fixturemanager=request._fixturemanager, node=request.node): try: return cast(StepFunctionContext, request.getfixturevalue(bdd_name)) except pytest.FixtureLookupError: return None def _execute_step_function( request: FixtureRequest, scenario: Scenario, step: Step, context: StepFunctionContext ) -> None: """Execute step function.""" __tracebackhide__ = True kw = { "request": request, "feature": scenario.feature, "scenario": scenario, "step": step, "step_func": context.step_func, "step_func_args": {}, } request.config.hook.pytest_bdd_before_step(**kw) # Get the step argument values. converters = context.converters kwargs = {} args = get_args(context.step_func) try: parsed_args = context.parser.parse_arguments(step.name) assert parsed_args is not None, ( f"Unexpected `NoneType` returned from " f"parse_arguments(...) in parser: {context.parser!r}" ) for arg, value in parsed_args.items(): if arg in converters: value = converters[arg](value) kwargs[arg] = value kwargs = {arg: kwargs[arg] if arg in kwargs else request.getfixturevalue(arg) for arg in args} kw["step_func_args"] = kwargs request.config.hook.pytest_bdd_before_step_call(**kw) # Execute the step as if it was a pytest fixture, so that we can allow "yield" statements in it return_value = call_fixture_func(fixturefunc=context.step_func, request=request, kwargs=kwargs) except Exception as exception: request.config.hook.pytest_bdd_step_error(exception=exception, **kw) raise if context.target_fixture is not None: inject_fixture(request, context.target_fixture, return_value) request.config.hook.pytest_bdd_after_step(**kw) def _execute_scenario(feature: Feature, scenario: Scenario, request: FixtureRequest) -> None: """Execute the scenario. :param feature: Feature. :param scenario: Scenario. :param request: request. :param encoding: Encoding. """ __tracebackhide__ = True request.config.hook.pytest_bdd_before_scenario(request=request, feature=feature, scenario=scenario) try: for step in scenario.steps: step_func_context = get_step_function(request=request, step=step) if step_func_context is None: exc = exceptions.StepDefinitionNotFoundError( f"Step definition is not found: {step}. " f'Line {step.line_number} in scenario "{scenario.name}" in the feature "{scenario.feature.filename}"' ) request.config.hook.pytest_bdd_step_func_lookup_error( request=request, feature=feature, scenario=scenario, step=step, exception=exc ) raise exc _execute_step_function(request, scenario, step, step_func_context) finally: request.config.hook.pytest_bdd_after_scenario(request=request, feature=feature, scenario=scenario) def _get_scenario_decorator( feature: Feature, feature_name: str, templated_scenario: ScenarioTemplate, scenario_name: str ) -> Callable[[Callable[P, T]], Callable[P, T]]: # HACK: Ideally we would use `def decorator(fn)`, but we want to return a custom exception # when the decorator is misused. # Pytest inspect the signature to determine the required fixtures, and in that case it would look # for a fixture called "fn" that doesn't exist (if it exists then it's even worse). # It will error with a "fixture 'fn' not found" message instead. # We can avoid this hack by using a pytest hook and check for misuse instead. def decorator(*args: Callable[P, T]) -> Callable[P, T]: if not args: raise exceptions.ScenarioIsDecoratorOnly( "scenario function can only be used as a decorator. Refer to the documentation." ) [fn] = args func_args = get_args(fn) # We need to tell pytest that the original function requires its fixtures, # otherwise indirect fixtures would not work. @pytest.mark.usefixtures(*func_args) def scenario_wrapper(request: FixtureRequest, _pytest_bdd_example: dict[str, str]) -> Any: __tracebackhide__ = True scenario = templated_scenario.render(_pytest_bdd_example) _execute_scenario(feature, scenario, request) fixture_values = [request.getfixturevalue(arg) for arg in func_args] return fn(*fixture_values) example_parametrizations = collect_example_parametrizations(templated_scenario) if example_parametrizations is not None: # Parametrize the scenario outlines scenario_wrapper = pytest.mark.parametrize( "_pytest_bdd_example", example_parametrizations, )(scenario_wrapper) for tag in templated_scenario.tags.union(feature.tags): config = CONFIG_STACK[-1] config.hook.pytest_bdd_apply_tag(tag=tag, function=scenario_wrapper) scenario_wrapper.__doc__ = f"{feature_name}: {scenario_name}" scenario_wrapper.__scenario__ = templated_scenario return cast(Callable[P, T], scenario_wrapper) return decorator def collect_example_parametrizations( templated_scenario: ScenarioTemplate, ) -> list[ParameterSet] | None: if contexts := list(templated_scenario.examples.as_contexts()): return [pytest.param(context, id="-".join(context.values())) for context in contexts] else: return None def scenario( feature_name: str, scenario_name: str, encoding: str = "utf-8", features_base_dir: str | None = None, ) -> Callable[[Callable[P, T]], Callable[P, T]]: """Scenario decorator. :param str feature_name: Feature file name. Absolute or relative to the configured feature base path. :param str scenario_name: Scenario name. :param str encoding: Feature file encoding. """ __tracebackhide__ = True scenario_name = scenario_name caller_module_path = get_caller_module_path() # Get the feature if features_base_dir is None: features_base_dir = get_features_base_dir(caller_module_path) feature = get_feature(features_base_dir, feature_name, encoding=encoding) # Get the scenario try: scenario = feature.scenarios[scenario_name] except KeyError: feature_name = feature.name or "[Empty]" raise exceptions.ScenarioNotFound( f'Scenario "{scenario_name}" in feature "{feature_name}" in {feature.filename} is not found.' ) return _get_scenario_decorator( feature=feature, feature_name=feature_name, templated_scenario=scenario, scenario_name=scenario_name ) def get_features_base_dir(caller_module_path: str) -> str: d = get_from_ini("bdd_features_base_dir", None) if d is None: return os.path.dirname(caller_module_path) rootdir = CONFIG_STACK[-1].rootpath return os.path.join(rootdir, d) def get_from_ini(key: str, default: str) -> str: """Get value from ini config. Return default if value has not been set. Use if the default value is dynamic. Otherwise set default on addini call. """ config = CONFIG_STACK[-1] value = config.getini(key) if not isinstance(value, str): raise TypeError(f"Expected a string for configuration option {value!r}, got a {type(value)} instead") return value if value != "" else default def make_python_name(string: str) -> str: """Make python attribute name out of a given string.""" string = re.sub(PYTHON_REPLACE_REGEX, "", string.replace(" ", "_")) return re.sub(ALPHA_REGEX, "", string).lower() def make_python_docstring(string: str) -> str: """Make a python docstring literal out of a given string.""" return '"""{}."""'.format(string.replace('"""', '\\"\\"\\"')) def make_string_literal(string: str) -> str: """Make python string literal out of a given string.""" return "'{}'".format(string.replace("'", "\\'")) def get_python_name_generator(name: str) -> Iterable[str]: """Generate a sequence of suitable python names out of given arbitrary string name.""" python_name = make_python_name(name) suffix = "" index = 0 def get_name() -> str: return f"test_{python_name}{suffix}" while True: yield get_name() index += 1 suffix = f"_{index}" def scenarios(*feature_paths: str, **kwargs: Any) -> None: """Parse features from the paths and put all found scenarios in the caller module. :param *feature_paths: feature file paths to use for scenarios """ caller_locals = get_caller_module_locals() caller_path = get_caller_module_path() features_base_dir = kwargs.get("features_base_dir") if features_base_dir is None: features_base_dir = get_features_base_dir(caller_path) abs_feature_paths = [] for path in feature_paths: if not os.path.isabs(path): path = os.path.abspath(os.path.join(features_base_dir, path)) abs_feature_paths.append(path) found = False module_scenarios = frozenset( (attr.__scenario__.feature.filename, attr.__scenario__.name) for name, attr in caller_locals.items() if hasattr(attr, "__scenario__") ) for feature in get_features(abs_feature_paths): for scenario_name, scenario_object in feature.scenarios.items(): # skip already bound scenarios if (scenario_object.feature.filename, scenario_name) not in module_scenarios: @scenario(feature.filename, scenario_name, **kwargs) def _scenario() -> None: pass # pragma: no cover for test_name in get_python_name_generator(scenario_name): if test_name not in caller_locals: # found an unique test name caller_locals[test_name] = _scenario break found = True if not found: raise exceptions.NoScenariosFound(abs_feature_paths) pytest-bdd-7.1.2/src/pytest_bdd/scripts.py000066400000000000000000000051171457564257700206230ustar00rootroot00000000000000"""pytest-bdd scripts.""" from __future__ import annotations import argparse import glob import os.path import re from .generation import generate_code, parse_feature_files MIGRATE_REGEX = re.compile(r"\s?(\w+)\s=\sscenario\((.+)\)", flags=re.MULTILINE) def migrate_tests(args: argparse.Namespace) -> None: """Migrate outdated tests to the most recent form.""" path = args.path for file_path in glob.iglob(os.path.join(os.path.abspath(path), "**", "*.py"), recursive=True): migrate_tests_in_file(file_path) def migrate_tests_in_file(file_path: str) -> None: """Migrate all bdd-based tests in the given test file.""" try: with open(file_path, "r+") as fd: content = fd.read() new_content = MIGRATE_REGEX.sub(r"\n@scenario(\2)\ndef \1():\n pass\n", content) if new_content != content: # the regex above potentially causes the end of the file to # have an extra newline new_content = new_content.rstrip("\n") + "\n" fd.seek(0) fd.write(new_content) print(f"migrated: {file_path}") else: print(f"skipped: {file_path}") except OSError: pass def check_existense(file_name: str) -> str: """Check file or directory name for existence.""" if not os.path.exists(file_name): raise argparse.ArgumentTypeError(f"{file_name} is an invalid file or directory name") return file_name def print_generated_code(args: argparse.Namespace) -> None: """Print generated test code for the given filenames.""" features, scenarios, steps = parse_feature_files(args.files) code = generate_code(features, scenarios, steps) print(code) def main() -> None: """Main entry point.""" parser = argparse.ArgumentParser(prog="pytest-bdd") subparsers = parser.add_subparsers(help="sub-command help", dest="command") subparsers.required = True parser_generate = subparsers.add_parser("generate", help="generate help") parser_generate.add_argument( "files", metavar="FEATURE_FILE", type=check_existense, nargs="+", help="Feature files to generate test code with", ) parser_generate.set_defaults(func=print_generated_code) parser_migrate = subparsers.add_parser("migrate", help="migrate help") parser_migrate.add_argument("path", metavar="PATH", help="Migrate outdated tests to the most recent form") parser_migrate.set_defaults(func=migrate_tests) args = parser.parse_args() if hasattr(args, "func"): args.func(args) pytest-bdd-7.1.2/src/pytest_bdd/steps.py000066400000000000000000000144521457564257700202740ustar00rootroot00000000000000"""Step decorators. Example: @given("I have an article", target_fixture="article") def _(author): return create_test_article(author=author) @when("I go to the article page") def _(browser, article): browser.visit(urljoin(browser.url, "/articles/{0}/".format(article.id))) @then("I should not see the error message") def _(browser): with pytest.raises(ElementDoesNotExist): browser.find_by_css(".message.error").first Multiple names for the steps: @given("I have an article") @given("there is an article") def _(author): return create_test_article(author=author) Reusing existing fixtures for a different step name: @given("I have a beautiful article") def _(article): pass """ from __future__ import annotations import enum from dataclasses import dataclass, field from itertools import count from typing import Any, Callable, Iterable, Literal, TypeVar import pytest from _pytest.fixtures import FixtureRequest from typing_extensions import ParamSpec from . import compat from .parser import Step from .parsers import StepParser, get_parser from .types import GIVEN, THEN, WHEN from .utils import get_caller_module_locals P = ParamSpec("P") T = TypeVar("T") @enum.unique class StepNamePrefix(enum.Enum): step_def = "pytestbdd_stepdef" step_impl = "pytestbdd_stepimpl" @dataclass class StepFunctionContext: type: Literal["given", "when", "then"] | None step_func: Callable[..., Any] parser: StepParser converters: dict[str, Callable[[str], Any]] = field(default_factory=dict) target_fixture: str | None = None def get_step_fixture_name(step: Step) -> str: """Get step fixture name""" return f"{StepNamePrefix.step_impl.value}_{step.type}_{step.name}" def given( name: str | StepParser, converters: dict[str, Callable[[str], Any]] | None = None, target_fixture: str | None = None, stacklevel: int = 1, ) -> Callable[[Callable[P, T]], Callable[P, T]]: """Given step decorator. :param name: Step name or a parser object. :param converters: Optional `dict` of the argument or parameter converters in form {: }. :param target_fixture: Target fixture name to replace by steps definition function. :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. :return: Decorator function for the step. """ return step(name, GIVEN, converters=converters, target_fixture=target_fixture, stacklevel=stacklevel) def when( name: str | StepParser, converters: dict[str, Callable[[str], Any]] | None = None, target_fixture: str | None = None, stacklevel: int = 1, ) -> Callable[[Callable[P, T]], Callable[P, T]]: """When step decorator. :param name: Step name or a parser object. :param converters: Optional `dict` of the argument or parameter converters in form {: }. :param target_fixture: Target fixture name to replace by steps definition function. :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. :return: Decorator function for the step. """ return step(name, WHEN, converters=converters, target_fixture=target_fixture, stacklevel=stacklevel) def then( name: str | StepParser, converters: dict[str, Callable[[str], Any]] | None = None, target_fixture: str | None = None, stacklevel: int = 1, ) -> Callable[[Callable[P, T]], Callable[P, T]]: """Then step decorator. :param name: Step name or a parser object. :param converters: Optional `dict` of the argument or parameter converters in form {: }. :param target_fixture: Target fixture name to replace by steps definition function. :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. :return: Decorator function for the step. """ return step(name, THEN, converters=converters, target_fixture=target_fixture, stacklevel=stacklevel) def step( name: str | StepParser, type_: Literal["given", "when", "then"] | None = None, converters: dict[str, Callable[[str], Any]] | None = None, target_fixture: str | None = None, stacklevel: int = 1, ) -> Callable[[Callable[P, T]], Callable[P, T]]: """Generic step decorator. :param name: Step name as in the feature file. :param type_: Step type ("given", "when" or "then"). If None, this step will work for all the types. :param converters: Optional step arguments converters mapping. :param target_fixture: Optional fixture name to replace by step definition. :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. :return: Decorator function for the step. Example: >>> @step("there is an wallet", target_fixture="wallet") >>> def _() -> dict[str, int]: >>> return {"eur": 0, "usd": 0} """ if converters is None: converters = {} def decorator(func: Callable[P, T]) -> Callable[P, T]: parser = get_parser(name) context = StepFunctionContext( type=type_, step_func=func, parser=parser, converters=converters, target_fixture=target_fixture, ) def step_function_marker() -> StepFunctionContext: return context step_function_marker._pytest_bdd_step_context = context caller_locals = get_caller_module_locals(stacklevel=stacklevel) fixture_step_name = find_unique_name( f"{StepNamePrefix.step_def.value}_{type_ or '*'}_{parser.name}", seen=caller_locals.keys() ) caller_locals[fixture_step_name] = pytest.fixture(name=fixture_step_name)(step_function_marker) return func return decorator def find_unique_name(name: str, seen: Iterable[str]) -> str: """Find unique name among a set of strings. New names are generated by appending an increasing number at the end of the name. Example: >>> find_unique_name("foo", ["foo", "foo_1"]) 'foo_2' """ seen = set(seen) if name not in seen: return name for i in count(1): new_name = f"{name}_{i}" if new_name not in seen: return new_name pytest-bdd-7.1.2/src/pytest_bdd/templates/000077500000000000000000000000001457564257700205545ustar00rootroot00000000000000pytest-bdd-7.1.2/src/pytest_bdd/templates/test.py.mak000066400000000000000000000011541457564257700226550ustar00rootroot00000000000000% if features: """${ features[0].name or features[0].rel_filename } feature tests.""" from pytest_bdd import ( given, scenario, then, when, ) % endif % for scenario in sorted(scenarios, key=lambda scenario: scenario.name): @scenario('${scenario.feature.rel_filename}', ${ make_string_literal(scenario.name)}) def test_${ make_python_name(scenario.name)}(): ${make_python_docstring(scenario.name)} % endfor % for step in steps: @${step.type}(${ make_string_literal(step.name)}) def _(): ${make_python_docstring(step.name)} raise NotImplementedError % if not loop.last: % endif % endfor pytest-bdd-7.1.2/src/pytest_bdd/types.py000066400000000000000000000005401457564257700202730ustar00rootroot00000000000000"""Common type definitions.""" from __future__ import annotations FEATURE = "feature" SCENARIO_OUTLINE = "scenario outline" EXAMPLES = "examples" EXAMPLES_HEADERS = "example headers" EXAMPLE_LINE = "example line" SCENARIO = "scenario" BACKGROUND = "background" GIVEN = "given" WHEN = "when" THEN = "then" TAG = "tag" STEP_TYPES = (GIVEN, WHEN, THEN) pytest-bdd-7.1.2/src/pytest_bdd/utils.py000066400000000000000000000050121457564257700202660ustar00rootroot00000000000000"""Various utility functions.""" from __future__ import annotations import base64 import pickle import re from inspect import getframeinfo, signature from sys import _getframe from typing import TYPE_CHECKING, TypeVar if TYPE_CHECKING: from typing import Any, Callable from _pytest.config import Config from _pytest.pytester import RunResult T = TypeVar("T") CONFIG_STACK: list[Config] = [] def get_args(func: Callable[..., Any]) -> list[str]: """Get a list of argument names for a function. :param func: The function to inspect. :return: A list of argument names. :rtype: list """ params = signature(func).parameters.values() return [ param.name for param in params if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty ] def get_caller_module_locals(stacklevel: int = 1) -> dict[str, Any]: """Get the caller module locals dictionary. We use sys._getframe instead of inspect.stack(0) because the latter is way slower, since it iterates over all the frames in the stack. """ return _getframe(stacklevel + 1).f_locals def get_caller_module_path(depth: int = 2) -> str: """Get the caller module path. We use sys._getframe instead of inspect.stack(0) because the latter is way slower, since it iterates over all the frames in the stack. """ frame = _getframe(depth) return getframeinfo(frame, context=0).filename _DUMP_START = "_pytest_bdd_>>>" _DUMP_END = "<<<_pytest_bdd_" def dump_obj(*objects: Any) -> None: """Dump objects to stdout so that they can be inspected by the test suite.""" for obj in objects: dump = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL) encoded = base64.b64encode(dump).decode("ascii") print(f"{_DUMP_START}{encoded}{_DUMP_END}") def collect_dumped_objects(result: RunResult) -> list: """Parse all the objects dumped with `dump_object` from the result. Note: You must run the result with output to stdout enabled. For example, using ``pytester.runpytest("-s")``. """ stdout = result.stdout.str() # pytest < 6.2, otherwise we could just do str(result.stdout) payloads = re.findall(rf"{_DUMP_START}(.*?){_DUMP_END}", stdout) return [pickle.loads(base64.b64decode(payload)) for payload in payloads] def setdefault(obj: object, name: str, default: T) -> T: """Just like dict.setdefault, but for objects.""" try: return getattr(obj, name) except AttributeError: setattr(obj, name, default) return default pytest-bdd-7.1.2/tests/000077500000000000000000000000001457564257700147705ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/__init__.py000066400000000000000000000000001457564257700170670ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/000077500000000000000000000000001457564257700157245ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/__init__.py000066400000000000000000000000001457564257700200230ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/cfparse/000077500000000000000000000000001457564257700173475ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/cfparse/__init__.py000066400000000000000000000000001457564257700214460ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/cfparse/test_args.py000066400000000000000000000053771457564257700217300ustar00rootroot00000000000000"""Step arguments tests.""" import textwrap def test_every_step_takes_param_with_the_same_name(pytester): """Test every step takes param with the same name.""" pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step arguments Scenario: Every step takes a parameter with the same name Given I have 1 Euro When I pay 2 Euro And I pay 1 Euro Then I should have 0 Euro And I should have 999999 Euro # In my dream... """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import parsers, given, when, then, scenario @scenario("arguments.feature", "Every step takes a parameter with the same name") def test_arguments(): pass @pytest.fixture def values(): return [1, 2, 1, 0, 999999] @given(parsers.cfparse("I have {euro:d} Euro")) def _(euro, values): assert euro == values.pop(0) @when(parsers.cfparse("I pay {euro:d} Euro")) def _(euro, values, request): assert euro == values.pop(0) @then(parsers.cfparse("I should have {euro:d} Euro")) def _(euro, values): assert euro == values.pop(0) """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_argument_in_when(pytester): """Test step arguments in when steps.""" pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step arguments Scenario: Argument in when Given I have an argument 1 When I get argument 5 Then My argument should be 5 """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import parsers, given, when, then, scenario @scenario("arguments.feature", "Argument in when") def test_arguments(): pass @pytest.fixture def arguments(): return dict() @given(parsers.cfparse("I have an argument {arg:Number}", extra_types=dict(Number=int))) def _(arguments, arg): arguments["arg"] = arg @when(parsers.cfparse("I get argument {arg:d}")) def _(arguments, arg): arguments["arg"] = arg @then(parsers.cfparse("My argument should be {arg:d}")) def _(arguments, arg): assert arguments["arg"] == arg """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/args/parse/000077500000000000000000000000001457564257700170365ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/parse/__init__.py000066400000000000000000000000001457564257700211350ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/parse/test_args.py000066400000000000000000000052241457564257700214060ustar00rootroot00000000000000"""Step arguments tests.""" import textwrap def test_every_steps_takes_param_with_the_same_name(pytester): pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step arguments Scenario: Every step takes a parameter with the same name Given I have 1 Euro When I pay 2 Euro And I pay 1 Euro Then I should have 0 Euro And I should have 999999 Euro # In my dream... """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import parsers, given, when, then, scenario @scenario("arguments.feature", "Every step takes a parameter with the same name") def test_arguments(): pass @pytest.fixture def values(): return [1, 2, 1, 0, 999999] @given(parsers.parse("I have {euro:d} Euro")) def _(euro, values): assert euro == values.pop(0) @when(parsers.parse("I pay {euro:d} Euro")) def _(euro, values, request): assert euro == values.pop(0) @then(parsers.parse("I should have {euro:d} Euro")) def _(euro, values): assert euro == values.pop(0) """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_argument_in_when_step_1(pytester): pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step arguments Scenario: Argument in when Given I have an argument 1 When I get argument 5 Then My argument should be 5 """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import parsers, given, when, then, scenario @pytest.fixture def arguments(): return dict() @scenario("arguments.feature", "Argument in when") def test_arguments(): pass @given(parsers.parse("I have an argument {arg:Number}", extra_types=dict(Number=int))) def _(arguments, arg): arguments["arg"] = arg @when(parsers.parse("I get argument {arg:d}")) def _(arguments, arg): arguments["arg"] = arg @then(parsers.parse("My argument should be {arg:d}")) def _(arguments, arg): assert arguments["arg"] == arg """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/args/regex/000077500000000000000000000000001457564257700170365ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/regex/__init__.py000066400000000000000000000000001457564257700211350ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/args/regex/test_args.py000066400000000000000000000107131457564257700214050ustar00rootroot00000000000000"""Step arguments tests.""" import textwrap def test_every_steps_takes_param_with_the_same_name(pytester): pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step arguments Scenario: Every step takes a parameter with the same name Given I have 1 Euro When I pay 2 Euro And I pay 1 Euro Then I should have 0 Euro And I should have 999999 Euro """ ), ) pytester.makepyfile( textwrap.dedent( r""" import pytest from pytest_bdd import parsers, given, when, then, scenario @scenario("arguments.feature", "Every step takes a parameter with the same name") def test_arguments(): pass @pytest.fixture def values(): return [1, 2, 1, 0, 999999] @given(parsers.re(r"I have (?P\d+) Euro"), converters=dict(euro=int)) def _(euro, values): assert euro == values.pop(0) @when(parsers.re(r"I pay (?P\d+) Euro"), converters=dict(euro=int)) def _(euro, values, request): assert euro == values.pop(0) @then(parsers.re(r"I should have (?P\d+) Euro"), converters=dict(euro=int)) def _(euro, values): assert euro == values.pop(0) """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_exact_match(pytester): """Test that parsers.re does an exact match (fullmatch) of the whole string. This tests exists because in the past we only used re.match, which only finds a match at the beginning of the string, so if there were any more characters not matching at the end, they were ignored""" pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step arguments Scenario: Every step takes a parameter with the same name Given I have 2 Euro # Step that should not be found: When I pay 1 Euro by mistake Then I should have 1 Euro left """ ), ) pytester.makepyfile( textwrap.dedent( r""" import pytest from pytest_bdd import parsers, given, when, then, scenarios scenarios("arguments.feature") @given(parsers.re(r"I have (?P\d+) Euro"), converters={"amount": int}, target_fixture="wallet") def _(amount): return {"EUR": amount} # Purposefully using a re that will not match the step "When I pay 1 Euro and 50 cents" @when(parsers.re(r"I pay (?P\d+) Euro"), converters={"amount": int}) def _(amount, wallet): wallet["EUR"] -= amount @then(parsers.re(r"I should have (?P\d+) Euro left"), converters={"amount": int}) def _(amount, wallet): assert wallet["EUR"] == amount """ ) ) result = pytester.runpytest() result.assert_outcomes(failed=1) result.stdout.fnmatch_lines( '*StepDefinitionNotFoundError: Step definition is not found: When "I pay 1 Euro by mistake"*' ) def test_argument_in_when(pytester): pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step arguments Scenario: Argument in when, step 1 Given I have an argument 1 When I get argument 5 Then My argument should be 5 """ ), ) pytester.makepyfile( textwrap.dedent( r""" import pytest from pytest_bdd import parsers, given, when, then, scenario @pytest.fixture def arguments(): return dict() @scenario("arguments.feature", "Argument in when, step 1") def test_arguments(): pass @given(parsers.re(r"I have an argument (?P\d+)")) def _(arguments, arg): arguments["arg"] = arg @when(parsers.re(r"I get argument (?P\d+)")) def _(arguments, arg): arguments["arg"] = arg @then(parsers.re(r"My argument should be (?P\d+)")) def _(arguments, arg): assert arguments["arg"] == arg """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/args/test_common.py000066400000000000000000000061711457564257700206320ustar00rootroot00000000000000import textwrap from pytest_bdd.utils import collect_dumped_objects def test_reuse_same_step_different_converters(pytester): pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Reuse same step with different converters Scenario: Step function should be able to be decorated multiple times with different converters Given I have a foo with int value 42 And I have a foo with str value 42 And I have a foo with float value 42 When pass Then pass """ ), ) pytester.makepyfile( textwrap.dedent( r""" import pytest from pytest_bdd import parsers, given, when, then, scenarios from pytest_bdd.utils import dump_obj scenarios("arguments.feature") @given(parsers.re(r"^I have a foo with int value (?P.*?)$"), converters={"value": int}) @given(parsers.re(r"^I have a foo with str value (?P.*?)$"), converters={"value": str}) @given(parsers.re(r"^I have a foo with float value (?P.*?)$"), converters={"value": float}) def _(value): dump_obj(value) return value @then("pass") @when("pass") def _(): pass """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) [int_value, str_value, float_value] = collect_dumped_objects(result) assert type(int_value) is int assert int_value == 42 assert type(str_value) is str assert str_value == "42" assert type(float_value) is float assert float_value == 42.0 def test_string_steps_dont_take_precedence(pytester): """Test that normal steps don't take precedence over the other steps.""" pytester.makefile( ".feature", arguments=textwrap.dedent( """\ Feature: Step precedence Scenario: String steps don't take precedence over other steps Given I have a foo with value 42 When pass Then pass """ ), ) pytester.makeconftest( textwrap.dedent( """ from pytest_bdd import given, when, then, parsers from pytest_bdd.utils import dump_obj @given("I have a foo with value 42") def _(): dump_obj("str") return 42 @then("pass") @when("pass") def _(): pass """ ) ) pytester.makepyfile( textwrap.dedent( r""" import pytest from pytest_bdd import parsers, given, when, then, scenarios from pytest_bdd.utils import dump_obj scenarios("arguments.feature") @given(parsers.re(r"^I have a foo with value (?P.*?)$")) def _(value): dump_obj("re") return 42 """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) [which] = collect_dumped_objects(result) assert which == "re" pytest-bdd-7.1.2/tests/conftest.py000066400000000000000000000010221457564257700171620ustar00rootroot00000000000000import pytest pytest_plugins = "pytester" def pytest_generate_tests(metafunc): if "pytest_params" in metafunc.fixturenames: parametrizations = [ pytest.param([], id="no-import-mode"), pytest.param(["--import-mode=prepend"], id="--import-mode=prepend"), pytest.param(["--import-mode=append"], id="--import-mode=append"), pytest.param(["--import-mode=importlib"], id="--import-mode=importlib"), ] metafunc.parametrize("pytest_params", parametrizations) pytest-bdd-7.1.2/tests/feature/000077500000000000000000000000001457564257700164235ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/feature/__init__.py000066400000000000000000000000001457564257700205220ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/feature/test_alias.py000066400000000000000000000030611457564257700211250ustar00rootroot00000000000000"""Test step alias when decorated multiple times.""" import textwrap def test_step_alias(pytester): pytester.makefile( ".feature", alias=textwrap.dedent( """\ Feature: Step aliases Scenario: Multiple step aliases Given I have an empty list And I have foo (which is 1) in my list # Alias of the "I have foo (which is 1) in my list" And I have bar (alias of foo) in my list When I do crash (which is 2) And I do boom (alias of crash) Then my list should be [1, 1, 2, 2] """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import given, when, then, scenario @scenario("alias.feature", "Multiple step aliases") def test_alias(): pass @given("I have an empty list", target_fixture="results") def _(): return [] @given("I have foo (which is 1) in my list") @given("I have bar (alias of foo) in my list") def _(results): results.append(1) @when("I do crash (which is 2)") @when("I do boom (alias of crash)") def _(results): results.append(2) @then("my list should be [1, 1, 2, 2]") def _(results): assert results == [1, 1, 2, 2] """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/feature/test_background.py000066400000000000000000000045571457564257700221660ustar00rootroot00000000000000"""Test feature background.""" import textwrap FEATURE = """\ Feature: Background support Background: Given foo has a value "bar" And a background step with multiple lines: one two Scenario: Basic usage Then foo should have value "bar" Scenario: Background steps are executed first Given foo has no value "bar" And foo has a value "dummy" Then foo should have value "dummy" And foo should not have value "bar" """ STEPS = r"""\ import re import pytest from pytest_bdd import given, then, parsers @pytest.fixture def foo(): return {} @given(parsers.re(r"a background step with multiple lines:\n(?P.+)", flags=re.DOTALL)) def _(foo, data): assert data == "one\ntwo" @given('foo has a value "bar"') def _(foo): foo["bar"] = "bar" return foo["bar"] @given('foo has a value "dummy"') def _(foo): foo["dummy"] = "dummy" return foo["dummy"] @given('foo has no value "bar"') def _(foo): assert foo["bar"] del foo["bar"] @then('foo should have value "bar"') def _(foo): assert foo["bar"] == "bar" @then('foo should have value "dummy"') def _(foo): assert foo["dummy"] == "dummy" @then('foo should not have value "bar"') def _(foo): assert "bar" not in foo """ def test_background_basic(pytester): """Test feature background.""" pytester.makefile(".feature", background=textwrap.dedent(FEATURE)) pytester.makeconftest(textwrap.dedent(STEPS)) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenario @scenario("background.feature", "Basic usage") def test_background(): pass """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_background_check_order(pytester): """Test feature background to ensure that background steps are executed first.""" pytester.makefile(".feature", background=textwrap.dedent(FEATURE)) pytester.makeconftest(textwrap.dedent(STEPS)) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenario @scenario("background.feature", "Background steps are executed first") def test_background(): pass """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/feature/test_cucumber_json.py000066400000000000000000000172431457564257700227010ustar00rootroot00000000000000"""Test cucumber json output.""" from __future__ import annotations import json import os.path import textwrap from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from _pytest.pytester import Pytester, RunResult def runandparse(pytester: Pytester, *args: Any) -> tuple[RunResult, list[dict[str, Any]]]: """Run tests in testdir and parse json output.""" resultpath = pytester.path.joinpath("cucumber.json") result = pytester.runpytest(f"--cucumberjson={resultpath}", "-s", *args) with resultpath.open() as f: jsonobject = json.load(f) return result, jsonobject class OfType: """Helper object to help compare object type to initialization type""" def __init__(self, type: type = None) -> None: self.type = type def __eq__(self, other: object) -> bool: return isinstance(other, self.type) if self.type else True def test_step_trace(pytester): """Test step trace.""" pytester.makefile( ".ini", pytest=textwrap.dedent( """ [pytest] markers = scenario-passing-tag scenario-failing-tag scenario-outline-passing-tag feature-tag """ ), ) pytester.makefile( ".feature", test=textwrap.dedent( """ @feature-tag Feature: One passing scenario, one failing scenario @scenario-passing-tag Scenario: Passing Given a passing step And some other passing step @scenario-failing-tag Scenario: Failing Given a passing step And a failing step @scenario-outline-passing-tag Scenario Outline: Passing outline Given type and value Examples: example1 | type | value | | str | hello | | int | 42 | | float | 1.0 | """ ), ) pytester.makepyfile( textwrap.dedent( """ import pytest from pytest_bdd import given, when, scenario, parsers @given('a passing step') def _(): return 'pass' @given('some other passing step') def _(): return 'pass' @given('a failing step') def _(): raise Exception('Error') @given(parsers.parse('type {type} and value {value}')) def _(): return 'pass' @scenario('test.feature', 'Passing') def test_passing(): pass @scenario('test.feature', 'Failing') def test_failing(): pass @scenario('test.feature', 'Passing outline') def test_passing_outline(): pass """ ) ) result, jsonobject = runandparse(pytester) result.assert_outcomes(passed=4, failed=1) assert result.ret expected = [ { "description": "", "elements": [ { "description": "", "id": "test_passing", "keyword": "Scenario", "line": 5, "name": "Passing", "steps": [ { "keyword": "Given", "line": 6, "match": {"location": ""}, "name": "a passing step", "result": {"status": "passed", "duration": OfType(int)}, }, { "keyword": "And", "line": 7, "match": {"location": ""}, "name": "some other passing step", "result": {"status": "passed", "duration": OfType(int)}, }, ], "tags": [{"name": "scenario-passing-tag", "line": 4}], "type": "scenario", }, { "description": "", "id": "test_failing", "keyword": "Scenario", "line": 10, "name": "Failing", "steps": [ { "keyword": "Given", "line": 11, "match": {"location": ""}, "name": "a passing step", "result": {"status": "passed", "duration": OfType(int)}, }, { "keyword": "And", "line": 12, "match": {"location": ""}, "name": "a failing step", "result": {"error_message": OfType(str), "status": "failed", "duration": OfType(int)}, }, ], "tags": [{"name": "scenario-failing-tag", "line": 9}], "type": "scenario", }, { "description": "", "keyword": "Scenario", "tags": [{"line": 14, "name": "scenario-outline-passing-tag"}], "steps": [ { "line": 16, "match": {"location": ""}, "result": {"status": "passed", "duration": OfType(int)}, "keyword": "Given", "name": "type str and value hello", } ], "line": 15, "type": "scenario", "id": "test_passing_outline[str-hello]", "name": "Passing outline", }, { "description": "", "keyword": "Scenario", "tags": [{"line": 14, "name": "scenario-outline-passing-tag"}], "steps": [ { "line": 16, "match": {"location": ""}, "result": {"status": "passed", "duration": OfType(int)}, "keyword": "Given", "name": "type int and value 42", } ], "line": 15, "type": "scenario", "id": "test_passing_outline[int-42]", "name": "Passing outline", }, { "description": "", "keyword": "Scenario", "tags": [{"line": 14, "name": "scenario-outline-passing-tag"}], "steps": [ { "line": 16, "match": {"location": ""}, "result": {"status": "passed", "duration": OfType(int)}, "keyword": "Given", "name": "type float and value 1.0", } ], "line": 15, "type": "scenario", "id": "test_passing_outline[float-1.0]", "name": "Passing outline", }, ], "id": os.path.join("test_step_trace0", "test.feature"), "keyword": "Feature", "line": 2, "name": "One passing scenario, one failing scenario", "tags": [{"name": "feature-tag", "line": 1}], "uri": os.path.join(pytester.path.name, "test.feature"), } ] assert jsonobject == expected pytest-bdd-7.1.2/tests/feature/test_description.py000066400000000000000000000032751457564257700223660ustar00rootroot00000000000000"""Test descriptions.""" import textwrap def test_description(pytester): """Test description for the feature.""" pytester.makefile( ".feature", description=textwrap.dedent( """\ Feature: Description In order to achieve something I want something Because it will be cool Some description goes here. Scenario: Description Also, the scenario can have a description. It goes here between the scenario name and the first step. Given I have a bar """ ), ) pytester.makepyfile( textwrap.dedent( """\ import textwrap from pytest_bdd import given, scenario @scenario("description.feature", "Description") def test_description(): pass @given("I have a bar") def _(): return "bar" def test_feature_description(): assert test_description.__scenario__.feature.description == textwrap.dedent( \"\"\"\\ In order to achieve something I want something Because it will be cool Some description goes here.\"\"\" ) def test_scenario_description(): assert test_description.__scenario__.description == textwrap.dedent( \"\"\"\\ Also, the scenario can have a description. It goes here between the scenario name and the first step.\"\"\" ) """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=3) pytest-bdd-7.1.2/tests/feature/test_feature_base_dir.py000066400000000000000000000076711457564257700233320ustar00rootroot00000000000000"""Test feature base dir.""" import os import pytest NOT_EXISTING_FEATURE_PATHS = [".", "/does/not/exist/"] @pytest.mark.parametrize("base_dir", NOT_EXISTING_FEATURE_PATHS) def test_feature_path_not_found(pytester, base_dir): """Test feature base dir.""" prepare_testdir(pytester, base_dir) result = pytester.runpytest("-k", "test_not_found_by_ini") result.assert_outcomes(passed=2) def test_feature_path_ok(pytester): base_dir = "features" prepare_testdir(pytester, base_dir) result = pytester.runpytest("-k", "test_ok_by_ini") result.assert_outcomes(passed=2) def test_feature_path_ok_running_outside_rootdir(pytester): base_dir = "features" prepare_testdir(pytester, base_dir) old_dir = os.getcwd() os.chdir("/") try: result = pytester.runpytest(pytester.path, "-k", "test_ok_by_ini") result.assert_outcomes(passed=2) finally: os.chdir(old_dir) def test_feature_path_by_param_not_found(pytester): """As param takes precedence even if ini config is correct it should fail if passed param is incorrect""" base_dir = "features" prepare_testdir(pytester, base_dir) result = pytester.runpytest("-k", "test_not_found_by_param") result.assert_outcomes(passed=4) @pytest.mark.parametrize("base_dir", NOT_EXISTING_FEATURE_PATHS) def test_feature_path_by_param_ok(pytester, base_dir): """If ini config is incorrect but param path is fine it should be able to find features""" prepare_testdir(pytester, base_dir) result = pytester.runpytest("-k", "test_ok_by_param") result.assert_outcomes(passed=2) def prepare_testdir(pytester, ini_base_dir): pytester.makeini( """ [pytest] bdd_features_base_dir={} """.format( ini_base_dir ) ) feature_file = pytester.mkdir("features").joinpath("steps.feature") feature_file.write_text( """ Feature: Feature path Scenario: When scenario found Given found """ ) pytester.makepyfile( """ import os.path import pytest from pytest_bdd import scenario, scenarios FEATURE = 'steps.feature' @pytest.fixture(params=[ 'When scenario found', ]) def scenario_name(request): return request.param @pytest.mark.parametrize( 'multiple', [True, False] ) def test_not_found_by_ini(scenario_name, multiple): with pytest.raises(IOError) as exc: if multiple: scenarios(FEATURE) else: scenario(FEATURE, scenario_name) assert os.path.abspath(os.path.join('{}', FEATURE)) in str(exc.value) @pytest.mark.parametrize( 'multiple', [True, False] ) def test_ok_by_ini(scenario_name, multiple): # Shouldn't raise any exception if multiple: scenarios(FEATURE) else: scenario(FEATURE, scenario_name) @pytest.mark.parametrize( 'multiple', [True, False] ) @pytest.mark.parametrize( 'param_base_dir', [ '.', '/does/not/exist/', ] ) def test_not_found_by_param(scenario_name, param_base_dir, multiple): with pytest.raises(IOError) as exc: if multiple: scenarios(FEATURE, features_base_dir=param_base_dir) else: scenario(FEATURE, scenario_name, features_base_dir=param_base_dir) assert os.path.abspath(os.path.join(param_base_dir, FEATURE)) in str(exc.value) @pytest.mark.parametrize( 'multiple', [True, False] ) def test_ok_by_param(scenario_name, multiple): # Shouldn't raise any exception no matter of bdd_features_base_dir in ini if multiple: scenarios(FEATURE, features_base_dir='features') else: scenario(FEATURE, scenario_name, features_base_dir='features') """.format( ini_base_dir ) ) pytest-bdd-7.1.2/tests/feature/test_gherkin_terminal_reporter.py000066400000000000000000000160601457564257700253030ustar00rootroot00000000000000from __future__ import annotations import textwrap import pytest FEATURE = """\ Feature: Gherkin terminal output feature Scenario: Scenario example 1 Given there is a bar When the bar is accessed Then world explodes """ TEST = """\ from pytest_bdd import given, when, then, scenario @given('there is a bar') def _(): return 'bar' @when('the bar is accessed') def _(): pass @then('world explodes') def _(): pass @scenario('test.feature', 'Scenario example 1') def test_scenario_1(): pass """ def test_default_output_should_be_the_same_as_regular_terminal_reporter(pytester): pytester.makefile(".feature", test=FEATURE) pytester.makepyfile(TEST) regular = pytester.runpytest() gherkin = pytester.runpytest("--gherkin-terminal-reporter") regular.assert_outcomes(passed=1, failed=0) gherkin.assert_outcomes(passed=1, failed=0) def parse_lines(lines: list[str]) -> list[str]: return [line for line in lines if not line.startswith("===")] assert all(l1 == l2 for l1, l2 in zip(parse_lines(regular.stdout.lines), parse_lines(gherkin.stdout.lines))) def test_verbose_mode_should_display_feature_and_scenario_names_instead_of_test_names_in_a_single_line(pytester): pytester.makefile(".feature", test=FEATURE) pytester.makepyfile(TEST) result = pytester.runpytest("--gherkin-terminal-reporter", "-v") result.assert_outcomes(passed=1, failed=0) result.stdout.fnmatch_lines("Feature: Gherkin terminal output feature") result.stdout.fnmatch_lines("*Scenario: Scenario example 1 PASSED") def test_verbose_mode_should_preserve_displaying_regular_tests_as_usual(pytester): pytester.makepyfile( textwrap.dedent( """\ def test_1(): pass """ ) ) regular = pytester.runpytest() gherkin = pytester.runpytest("--gherkin-terminal-reporter", "-v") regular.assert_outcomes(passed=1, failed=0) gherkin.assert_outcomes(passed=1, failed=0) regular.stdout.re_match_lines( r"test_verbose_mode_should_preserve_displaying_regular_tests_as_usual\.py \.\s+\[100%\]" ) gherkin.stdout.re_match_lines( r"test_verbose_mode_should_preserve_displaying_regular_tests_as_usual\.py::test_1 PASSED\s+\[100%\]" ) def test_double_verbose_mode_should_display_full_scenario_description(pytester): pytester.makefile(".feature", test=FEATURE) pytester.makepyfile(TEST) result = pytester.runpytest("--gherkin-terminal-reporter", "-vv") result.assert_outcomes(passed=1, failed=0) result.stdout.fnmatch_lines("*Scenario: Scenario example 1") result.stdout.fnmatch_lines("*Given there is a bar") result.stdout.fnmatch_lines("*When the bar is accessed") result.stdout.fnmatch_lines("*Then world explodes") result.stdout.fnmatch_lines("*PASSED") @pytest.mark.parametrize("verbosity", ["", "-v", "-vv"]) def test_error_message_for_missing_steps(pytester, verbosity): pytester.makefile(".feature", test=FEATURE) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenarios scenarios('.') """ ) ) result = pytester.runpytest("--gherkin-terminal-reporter", verbosity) result.assert_outcomes(passed=0, failed=1) result.stdout.fnmatch_lines( """*StepDefinitionNotFoundError: Step definition is not found: Given "there is a bar". """ """Line 3 in scenario "Scenario example 1"*""" ) @pytest.mark.parametrize("verbosity", ["", "-v", "-vv"]) def test_error_message_should_be_displayed(pytester, verbosity): pytester.makefile(".feature", test=FEATURE) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import given, when, then, scenario @given('there is a bar') def _(): return 'bar' @when('the bar is accessed') def _(): pass @then('world explodes') def _(): raise Exception("BIGBADABOOM") @scenario('test.feature', 'Scenario example 1') def test_scenario_1(): pass """ ) ) result = pytester.runpytest("--gherkin-terminal-reporter", verbosity) result.assert_outcomes(passed=0, failed=1) result.stdout.fnmatch_lines("E Exception: BIGBADABOOM") result.stdout.fnmatch_lines("test_error_message_should_be_displayed.py:15: Exception") def test_local_variables_should_be_displayed_when_showlocals_option_is_used(pytester): pytester.makefile(".feature", test=FEATURE) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import given, when, then, scenario @given('there is a bar') def _(): return 'bar' @when('the bar is accessed') def _(): pass @then('world explodes') def _(): local_var = "MULTIPASS" raise Exception("BIGBADABOOM") @scenario('test.feature', 'Scenario example 1') def test_scenario_1(): pass """ ) ) result = pytester.runpytest("--gherkin-terminal-reporter", "--showlocals") result.assert_outcomes(passed=0, failed=1) result.stdout.fnmatch_lines("""request*=* cucumbers When I eat cucumbers Then I should have cucumbers Examples: | start | eat | left | |{start}|{eat}|{left}| """.format( **example ) ), ) pytester.makepyfile( test_gherkin=textwrap.dedent( """\ from pytest_bdd import given, when, scenario, then, parsers @given(parsers.parse('there are {start} cucumbers'), target_fixture="start_cucumbers") def _(start): return start @when(parsers.parse('I eat {eat} cucumbers')) def _(start_cucumbers, eat): pass @then(parsers.parse('I should have {left} cucumbers')) def _(start_cucumbers, left): pass @scenario('test.feature', 'Scenario example 2') def test_scenario_2(): pass """ ) ) result = pytester.runpytest("--gherkin-terminal-reporter", "-vv") result.assert_outcomes(passed=1, failed=0) result.stdout.fnmatch_lines("*Scenario: Scenario example 2") result.stdout.fnmatch_lines("*Given there are {start} cucumbers".format(**example)) result.stdout.fnmatch_lines("*When I eat {eat} cucumbers".format(**example)) result.stdout.fnmatch_lines("*Then I should have {left} cucumbers".format(**example)) result.stdout.fnmatch_lines("*PASSED") pytest-bdd-7.1.2/tests/feature/test_multiline.py000066400000000000000000000102061457564257700220350ustar00rootroot00000000000000"""Multiline steps tests.""" import textwrap import pytest @pytest.mark.parametrize( ["feature_text", "expected_text"], [ ( textwrap.dedent( '''\ Feature: Multiline Scenario: Multiline step using sub indentation Given I have a step with: """ Some Extra Lines """ Then the text should be parsed with correct indentation ''' ), "Some\n\nExtra\nLines", ), ( textwrap.dedent( """\ Feature: Multiline Scenario: Multiline step using sub indentation Given I have a step with: Some Extra Lines Then the text should be parsed with correct indentation """ ), "Some\n\nExtra\nLines", ), ( textwrap.dedent( """\ Feature: Multiline Scenario: Multiline step using sub indentation Given I have a step with: Some Extra Lines Then the text should be parsed with correct indentation """ ), " Some\n\n Extra\nLines", ), ( textwrap.dedent( """\ Feature: Multiline Scenario: Multiline step using sub indentation Given I have a step with: Some Extra Lines """ ), "Some\nExtra\nLines", ), ], ) def test_multiline(pytester, feature_text, expected_text): pytester.makefile(".feature", multiline=feature_text) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import parsers, given, then, scenario expected_text = '''{expected_text}''' @scenario("multiline.feature", "Multiline step using sub indentation") def test_multiline(request): assert request.getfixturevalue("text") == expected_text @given(parsers.parse("I have a step with:\\n{{text}}"), target_fixture="text") def _(text): return text @then("the text should be parsed with correct indentation") def _(text): assert text == expected_text """.format( expected_text=expected_text.encode("unicode_escape").decode("utf-8"), ) ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_multiline_wrong_indent(pytester): """Multiline step using sub indentation wrong indent.""" pytester.makefile( ".feature", multiline=textwrap.dedent( """\ Feature: Multiline Scenario: Multiline step using sub indentation wrong indent Given I have a step with: Some Extra Lines Then the text should be parsed with correct indentation """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import parsers, given, then, scenario @scenario("multiline.feature", "Multiline step using sub indentation wrong indent") def test_multiline(request): pass @given(parsers.parse("I have a step with:\\n{{text}}"), target_fixture="text") def _(text): return text @then("the text should be parsed with correct indentation") def _(text): assert text == expected_text """ ) ) result = pytester.runpytest() result.assert_outcomes(failed=1) result.stdout.fnmatch_lines("*StepDefinitionNotFoundError: Step definition is not found:*") pytest-bdd-7.1.2/tests/feature/test_no_scenario.py000066400000000000000000000012771457564257700223420ustar00rootroot00000000000000"""Test no scenarios defined in the feature file.""" import textwrap def test_no_scenarios(pytester): """Test no scenarios defined in the feature file.""" features = pytester.mkdir("features") features.joinpath("test.feature").write_text( textwrap.dedent( """ Given foo When bar Then baz """ ), encoding="utf-8", ) pytester.makepyfile( textwrap.dedent( """ from pytest_bdd import scenarios scenarios('features') """ ) ) result = pytester.runpytest() result.stdout.fnmatch_lines(["*FeatureError: Step definition outside of a Scenario or a Background.*"]) pytest-bdd-7.1.2/tests/feature/test_no_sctrict_gherkin.py000066400000000000000000000044441457564257700237200ustar00rootroot00000000000000"""Test no strict gherkin for sections.""" def test_background_no_strict_gherkin(pytester): """Test background no strict gherkin.""" pytester.makepyfile( test_gherkin=""" import pytest from pytest_bdd import when, scenario @scenario( "no_strict_gherkin_background.feature", "Test background", ) def test_background(): pass @pytest.fixture def foo(): return {} @when('foo has a value "bar"') def _(foo): foo["bar"] = "bar" return foo["bar"] @when('foo is not boolean') def _(foo): assert foo is not bool @when('foo has not a value "baz"') def _(foo): assert "baz" not in foo """ ) pytester.makefile( ".feature", no_strict_gherkin_background=""" Feature: No strict Gherkin Background support Background: When foo has a value "bar" And foo is not boolean And foo has not a value "baz" Scenario: Test background """, ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_scenario_no_strict_gherkin(pytester): """Test scenario no strict gherkin.""" pytester.makepyfile( test_gherkin=""" import pytest from pytest_bdd import when, scenario @scenario( "no_strict_gherkin_scenario.feature", "Test scenario", ) def test_scenario(): pass @pytest.fixture def foo(): return {} @when('foo has a value "bar"') def _(foo): foo["bar"] = "bar" return foo["bar"] @when('foo is not boolean') def _(foo): assert foo is not bool @when('foo has not a value "baz"') def _(foo): assert "baz" not in foo """ ) pytester.makefile( ".feature", no_strict_gherkin_scenario=""" Feature: No strict Gherkin Scenario support Scenario: Test scenario When foo has a value "bar" And foo is not boolean And foo has not a value "baz" """, ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/feature/test_outline.py000066400000000000000000000132341457564257700215160ustar00rootroot00000000000000"""Scenario Outline tests.""" import textwrap from pytest_bdd.utils import collect_dumped_objects STEPS = """\ from pytest_bdd import parsers, given, when, then from pytest_bdd.utils import dump_obj @given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") def _(start): assert isinstance(start, int) dump_obj(start) return {"start": start} @when(parsers.parse("I eat {eat:g} cucumbers")) def _(cucumbers, eat): assert isinstance(eat, float) dump_obj(eat) cucumbers["eat"] = eat @then(parsers.parse("I should have {left} cucumbers")) def _(cucumbers, left): assert isinstance(left, str) dump_obj(left) assert cucumbers["start"] - cucumbers["eat"] == int(left) """ def test_outlined(pytester): pytester.makefile( ".feature", outline=textwrap.dedent( """\ Feature: Outline Scenario Outline: Outlined given, when, thens Given there are cucumbers When I eat cucumbers Then I should have cucumbers Examples: | start | eat | left | | 12 | 5 | 7 | # a comment | 5 | 4 | 1 | """ ), ) pytester.makeconftest(textwrap.dedent(STEPS)) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenario @scenario( "outline.feature", "Outlined given, when, thens", ) def test_outline(request): pass """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=2) # fmt: off assert collect_dumped_objects(result) == [ 12, 5.0, "7", 5, 4.0, "1", ] # fmt: on def test_unused_params(pytester): """Test parametrized scenario when the test function lacks parameters.""" pytester.makefile( ".feature", outline=textwrap.dedent( """\ Feature: Outline Scenario Outline: Outlined with unused params Given there are cucumbers When I eat cucumbers # And commented out step with Then I should have cucumbers Examples: | start | eat | left | unused_param | | 12 | 5 | 7 | value | """ ), ) pytester.makeconftest(textwrap.dedent(STEPS)) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenario @scenario("outline.feature", "Outlined with unused params") def test_outline(request): pass """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_outlined_with_other_fixtures(pytester): """Test outlined scenario also using other parametrized fixture.""" pytester.makefile( ".feature", outline=textwrap.dedent( """\ Feature: Outline Scenario Outline: Outlined given, when, thens Given there are cucumbers When I eat cucumbers Then I should have cucumbers Examples: | start | eat | left | | 12 | 5 | 7 | | 5 | 4 | 1 | """ ), ) pytester.makeconftest(textwrap.dedent(STEPS)) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import scenario @pytest.fixture(params=[1, 2, 3]) def other_fixture(request): return request.param @scenario( "outline.feature", "Outlined given, when, thens", ) def test_outline(other_fixture): pass """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=6) def test_outline_with_escaped_pipes(pytester): """Test parametrized feature example table with escaped pipe characters in input.""" pytester.makefile( ".feature", outline=textwrap.dedent( r"""\ Feature: Outline With Special characters Scenario Outline: Outline with escaped pipe character # Just print the string so that we can assert later what it was by reading the output Given I print the Examples: | string | | bork | | \|bork | | bork \| | | bork\|\|bork | | \| | | bork \\ | | bork \\\| | """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenario, given, parsers from pytest_bdd.utils import dump_obj @scenario("outline.feature", "Outline with escaped pipe character") def test_outline_with_escaped_pipe_character(request): pass @given(parsers.parse("I print the {string}")) def _(string): dump_obj(string) """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=7) assert collect_dumped_objects(result) == [ r"bork", r"|bork", r"bork |", r"bork||bork", r"|", r"bork \\", r"bork \\|", ] pytest-bdd-7.1.2/tests/feature/test_outline_empty_values.py000066400000000000000000000031211457564257700243050ustar00rootroot00000000000000"""Scenario Outline with empty example values tests.""" import textwrap from pytest_bdd.utils import collect_dumped_objects STEPS = """\ from pytest_bdd import given, when, then, parsers from pytest_bdd.utils import dump_obj # Using `parsers.re` so that we can match empty values @given(parsers.re("there are (?P.*?) cucumbers")) def _(start): dump_obj(start) @when(parsers.re("I eat (?P.*?) cucumbers")) def _(eat): dump_obj(eat) @then(parsers.re("I should have (?P.*?) cucumbers")) def _(left): dump_obj(left) """ def test_scenario_with_empty_example_values(pytester): pytester.makefile( ".feature", outline=textwrap.dedent( """\ Feature: Outline Scenario Outline: Outlined with empty example values Given there are cucumbers When I eat cucumbers Then I should have cucumbers Examples: | start | eat | left | | # | | | """ ), ) pytester.makeconftest(textwrap.dedent(STEPS)) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd.utils import dump_obj from pytest_bdd import scenario import json @scenario("outline.feature", "Outlined with empty example values") def test_outline(): pass """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) assert collect_dumped_objects(result) == ["#", "", ""] pytest-bdd-7.1.2/tests/feature/test_report.py000066400000000000000000000212061457564257700213500ustar00rootroot00000000000000"""Test scenario reporting.""" import textwrap import pytest class OfType: """Helper object comparison to which is always 'equal'.""" def __init__(self, type: type = None) -> None: self.type = type def __eq__(self, other: object) -> bool: return isinstance(other, self.type) if self.type else True def test_step_trace(pytester): """Test step trace.""" pytester.makefile( ".ini", pytest=textwrap.dedent( """ [pytest] markers = feature-tag scenario-passing-tag scenario-failing-tag """ ), ) feature = pytester.makefile( ".feature", test=textwrap.dedent( """ @feature-tag Feature: One passing scenario, one failing scenario @scenario-passing-tag Scenario: Passing Given a passing step And some other passing step @scenario-failing-tag Scenario: Failing Given a passing step And a failing step Scenario Outline: Outlined Given there are cucumbers When I eat cucumbers Then I should have cucumbers Examples: | start | eat | left | | 12 | 5 | 7 | | 5 | 4 | 1 | """ ), ) relpath = feature.relative_to(pytester.path.parent) pytester.makepyfile( textwrap.dedent( """ import pytest from pytest_bdd import given, when, then, scenarios, parsers @given('a passing step') def _(): return 'pass' @given('some other passing step') def _(): return 'pass' @given('a failing step') def _(): raise Exception('Error') @given(parsers.parse('there are {start:d} cucumbers'), target_fixture="cucumbers") def _(start): assert isinstance(start, int) return {"start": start} @when(parsers.parse('I eat {eat:g} cucumbers')) def _(cucumbers, eat): assert isinstance(eat, float) cucumbers['eat'] = eat @then(parsers.parse('I should have {left} cucumbers')) def _(cucumbers, left): assert isinstance(left, str) assert cucumbers['start'] - cucumbers['eat'] == int(left) scenarios('test.feature') """ ) ) result = pytester.inline_run("-vvl") assert result.ret report = result.matchreport("test_passing", when="call").scenario expected = { "feature": { "description": "", "filename": str(feature), "line_number": 2, "name": "One passing scenario, one failing scenario", "rel_filename": str(relpath), "tags": ["feature-tag"], }, "line_number": 5, "name": "Passing", "steps": [ { "duration": OfType(float), "failed": False, "keyword": "Given", "line_number": 6, "name": "a passing step", "type": "given", }, { "duration": OfType(float), "failed": False, "keyword": "And", "line_number": 7, "name": "some other passing step", "type": "given", }, ], "tags": ["scenario-passing-tag"], } assert report == expected report = result.matchreport("test_failing", when="call").scenario expected = { "feature": { "description": "", "filename": str(feature), "line_number": 2, "name": "One passing scenario, one failing scenario", "rel_filename": str(relpath), "tags": ["feature-tag"], }, "line_number": 10, "name": "Failing", "steps": [ { "duration": OfType(float), "failed": False, "keyword": "Given", "line_number": 11, "name": "a passing step", "type": "given", }, { "duration": OfType(float), "failed": True, "keyword": "And", "line_number": 12, "name": "a failing step", "type": "given", }, ], "tags": ["scenario-failing-tag"], } assert report == expected report = result.matchreport("test_outlined[12-5-7]", when="call").scenario expected = { "feature": { "description": "", "filename": str(feature), "line_number": 2, "name": "One passing scenario, one failing scenario", "rel_filename": str(relpath), "tags": ["feature-tag"], }, "line_number": 14, "name": "Outlined", "steps": [ { "duration": OfType(float), "failed": False, "keyword": "Given", "line_number": 15, "name": "there are 12 cucumbers", "type": "given", }, { "duration": OfType(float), "failed": False, "keyword": "When", "line_number": 16, "name": "I eat 5 cucumbers", "type": "when", }, { "duration": OfType(float), "failed": False, "keyword": "Then", "line_number": 17, "name": "I should have 7 cucumbers", "type": "then", }, ], "tags": [], } assert report == expected report = result.matchreport("test_outlined[5-4-1]", when="call").scenario expected = { "feature": { "description": "", "filename": str(feature), "line_number": 2, "name": "One passing scenario, one failing scenario", "rel_filename": str(relpath), "tags": ["feature-tag"], }, "line_number": 14, "name": "Outlined", "steps": [ { "duration": OfType(float), "failed": False, "keyword": "Given", "line_number": 15, "name": "there are 5 cucumbers", "type": "given", }, { "duration": OfType(float), "failed": False, "keyword": "When", "line_number": 16, "name": "I eat 4 cucumbers", "type": "when", }, { "duration": OfType(float), "failed": False, "keyword": "Then", "line_number": 17, "name": "I should have 1 cucumbers", "type": "then", }, ], "tags": [], } assert report == expected def test_complex_types(pytester, pytestconfig): """Test serialization of the complex types.""" if not pytestconfig.pluginmanager.has_plugin("xdist"): pytest.skip("Execnet not installed") import execnet.gateway_base pytester.makefile( ".feature", test=textwrap.dedent( """ Feature: Report serialization containing parameters of complex types Scenario Outline: Complex Given there is a coordinate Examples: | point | | 10,20 | """ ), ) pytester.makepyfile( textwrap.dedent( """ import pytest from pytest_bdd import given, when, then, scenario, parsers class Point: def __init__(self, x, y): self.x = x self.y = y @classmethod def parse(cls, value): return cls(*(int(x) for x in value.split(','))) class Alien(object): pass @given( parsers.parse('there is a coordinate {point}'), target_fixture="point", converters={"point": Point.parse}, ) def given_there_is_a_point(point): assert isinstance(point, Point) return point @pytest.mark.parametrize('alien', [Alien()]) @scenario('test.feature', 'Complex') def test_complex(alien): pass """ ) ) result = pytester.inline_run("-vvl") report = result.matchreport("test_complex[10,20-alien0]", when="call") assert report.passed assert execnet.gateway_base.dumps(report.item) assert execnet.gateway_base.dumps(report.scenario) pytest-bdd-7.1.2/tests/feature/test_same_function_name.py000066400000000000000000000014511457564257700236670ustar00rootroot00000000000000"""Function name same as step name.""" import textwrap def test_when_function_name_same_as_step_name(pytester): pytester.makefile( ".feature", same_name=textwrap.dedent( """\ Feature: Function name same as step name Scenario: When function name same as step name When something """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import when, scenario @scenario("same_name.feature", "When function name same as step name") def test_same_name(): pass @when("something") def _(): return "something" """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/feature/test_scenario.py000066400000000000000000000110061457564257700216350ustar00rootroot00000000000000"""Test scenario decorator.""" import textwrap def test_scenario_not_found(pytester, pytest_params): """Test the situation when scenario is not found.""" pytester.makefile( ".feature", not_found=textwrap.dedent( """\ Feature: Scenario is not found """ ), ) pytester.makepyfile( textwrap.dedent( """\ import re import pytest from pytest_bdd import parsers, given, then, scenario @scenario("not_found.feature", "NOT FOUND") def test_not_found(): pass """ ) ) result = pytester.runpytest_subprocess(*pytest_params) result.assert_outcomes(errors=1) result.stdout.fnmatch_lines('*Scenario "NOT FOUND" in feature "Scenario is not found" in*') def test_scenario_comments(pytester): """Test comments inside scenario.""" pytester.makefile( ".feature", comments=textwrap.dedent( """\ Feature: Comments Scenario: Comments # Comment Given I have a bar Scenario: Strings that are not comments Given comments should be at the start of words Then this is not a#comment And this is not "#acomment" """ ), ) pytester.makepyfile( textwrap.dedent( """\ import re import pytest from pytest_bdd import parsers, given, then, scenario @scenario("comments.feature", "Comments") def test_1(): pass @scenario("comments.feature", "Strings that are not comments") def test_2(): pass @given("I have a bar") def _(): return "bar" @given("comments should be at the start of words") def _(): pass @then(parsers.parse("this is not {acomment}")) def _(acomment): assert re.search("a.*comment", acomment) """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=2) def test_scenario_not_decorator(pytester, pytest_params): """Test scenario function is used not as decorator.""" pytester.makefile( ".feature", foo=""" Feature: Test function is not a decorator Scenario: Foo Given I have a bar """, ) pytester.makepyfile( """ from pytest_bdd import scenario test_foo = scenario('foo.feature', 'Foo') """ ) result = pytester.runpytest_subprocess(*pytest_params) result.assert_outcomes(failed=1) result.stdout.fnmatch_lines("*ScenarioIsDecoratorOnly: scenario function can only be used as a decorator*") def test_simple(pytester, pytest_params): """Test scenario decorator with a standard usage.""" pytester.makefile( ".feature", simple=""" Feature: Simple feature Scenario: Simple scenario Given I have a bar """, ) pytester.makepyfile( """ from pytest_bdd import scenario, given, then @scenario("simple.feature", "Simple scenario") def test_simple(): pass @given("I have a bar") def _(): return "bar" @then("pass") def _(): pass """ ) result = pytester.runpytest_subprocess(*pytest_params) result.assert_outcomes(passed=1) def test_angular_brakets_are_not_parsed(pytester): """Test that angular brackets are not parsed for "Scenario"s. (They should be parsed only when used in "Scenario Outline") """ pytester.makefile( ".feature", simple=""" Feature: Simple feature Scenario: Simple scenario Given I have a Then pass Scenario Outline: Outlined scenario Given I have a templated Then pass Examples: | foo | | bar | """, ) pytester.makepyfile( """ from pytest_bdd import scenarios, given, then, parsers scenarios("simple.feature") @given("I have a ") def _(): return "tag" @given(parsers.parse("I have a templated {foo}")) def _(foo): return "foo" @then("pass") def _(): pass """ ) result = pytester.runpytest() result.assert_outcomes(passed=2) pytest-bdd-7.1.2/tests/feature/test_scenarios.py000066400000000000000000000047051457564257700220300ustar00rootroot00000000000000"""Test scenarios shortcut.""" import textwrap def test_scenarios(pytester, pytest_params): """Test scenarios shortcut (used together with @scenario for individual test override).""" pytester.makeini( """ [pytest] console_output_style=classic """ ) pytester.makeconftest( """ import pytest from pytest_bdd import given @given('I have a bar') def _(): print('bar!') return 'bar' """ ) features = pytester.mkdir("features") features.joinpath("test.feature").write_text( textwrap.dedent( """ Scenario: Test scenario Given I have a bar """ ), "utf-8", ) subfolder = features.joinpath("subfolder") subfolder.mkdir() subfolder.joinpath("test.feature").write_text( textwrap.dedent( """ Scenario: Test subfolder scenario Given I have a bar Scenario: Test failing subfolder scenario Given I have a failing bar Scenario: Test already bound scenario Given I have a bar Scenario: Test scenario Given I have a bar """ ), "utf-8", ) pytester.makepyfile( """ import pytest from pytest_bdd import scenarios, scenario @scenario('features/subfolder/test.feature', 'Test already bound scenario') def test_already_bound(): pass scenarios('features') """ ) result = pytester.runpytest_subprocess("-v", "-s", *pytest_params) result.assert_outcomes(passed=4, failed=1) result.stdout.fnmatch_lines(["*collected 5 items"]) result.stdout.fnmatch_lines(["*test_test_subfolder_scenario *bar!", "PASSED"]) result.stdout.fnmatch_lines(["*test_test_scenario *bar!", "PASSED"]) result.stdout.fnmatch_lines(["*test_test_failing_subfolder_scenario *FAILED"]) result.stdout.fnmatch_lines(["*test_already_bound *bar!", "PASSED"]) result.stdout.fnmatch_lines(["*test_test_scenario_1 *bar!", "PASSED"]) def test_scenarios_none_found(pytester, pytest_params): """Test scenarios shortcut when no scenarios found.""" testpath = pytester.makepyfile( """ import pytest from pytest_bdd import scenarios scenarios('.') """ ) result = pytester.runpytest_subprocess(testpath, *pytest_params) result.assert_outcomes(errors=1) result.stdout.fnmatch_lines(["*NoScenariosFound*"]) pytest-bdd-7.1.2/tests/feature/test_steps.py000066400000000000000000000354041457564257700212000ustar00rootroot00000000000000import textwrap def test_steps(pytester): pytester.makefile( ".feature", steps=textwrap.dedent( """\ Feature: Steps are executed one by one Steps are executed one by one. Given and When sections are not mandatory in some cases. Scenario: Executed step by step Given I have a foo fixture with value "foo" And there is a list When I append 1 to the list And I append 2 to the list And I append 3 to the list Then foo should have value "foo" But the list should be [1, 2, 3] """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import given, when, then, scenario @scenario("steps.feature", "Executed step by step") def test_steps(): pass @given('I have a foo fixture with value "foo"', target_fixture="foo") def _(): return "foo" @given("there is a list", target_fixture="results") def _(): return [] @when("I append 1 to the list") def _(results): results.append(1) @when("I append 2 to the list") def _(results): results.append(2) @when("I append 3 to the list") def _(results): results.append(3) @then('foo should have value "foo"') def _(foo): assert foo == "foo" @then("the list should be [1, 2, 3]") def _(results): assert results == [1, 2, 3] """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1, failed=0) def test_step_function_can_be_decorated_multiple_times(pytester): pytester.makefile( ".feature", steps=textwrap.dedent( """\ Feature: Steps decoration Scenario: Step function can be decorated multiple times Given there is a foo with value 42 And there is a second foo with value 43 When I do nothing And I do nothing again Then I make no mistakes And I make no mistakes again """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import given, when, then, scenario, parsers @scenario("steps.feature", "Step function can be decorated multiple times") def test_steps(): pass @given(parsers.parse("there is a foo with value {value}"), target_fixture="foo") @given(parsers.parse("there is a second foo with value {value}"), target_fixture="second_foo") def _(value): return value @when("I do nothing") @when("I do nothing again") def _(): pass @then("I make no mistakes") @then("I make no mistakes again") def _(): assert True """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1, failed=0) def test_all_steps_can_provide_fixtures(pytester): """Test that given/when/then can all provide fixtures.""" pytester.makefile( ".feature", steps=textwrap.dedent( """\ Feature: Step fixture Scenario: Given steps can provide fixture Given Foo is "bar" Then foo should be "bar" Scenario: When steps can provide fixture When Foo is "baz" Then foo should be "baz" Scenario: Then steps can provide fixture Then foo is "qux" And foo should be "qux" """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import given, when, then, parsers, scenarios scenarios("steps.feature") @given(parsers.parse('Foo is "{value}"'), target_fixture="foo") def _(value): return value @when(parsers.parse('Foo is "{value}"'), target_fixture="foo") def _(value): return value @then(parsers.parse('Foo is "{value}"'), target_fixture="foo") def _(value): return value @then(parsers.parse('foo should be "{value}"')) def _(foo, value): assert foo == value """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=3, failed=0) def test_when_first(pytester): pytester.makefile( ".feature", steps=textwrap.dedent( """\ Feature: Steps are executed one by one Steps are executed one by one. Given and When sections are not mandatory in some cases. Scenario: When step can be the first When I do nothing Then I make no mistakes """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import when, then, scenario @scenario("steps.feature", "When step can be the first") def test_steps(): pass @when("I do nothing") def _(): pass @then("I make no mistakes") def _(): assert True """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1, failed=0) def test_then_after_given(pytester): pytester.makefile( ".feature", steps=textwrap.dedent( """\ Feature: Steps are executed one by one Steps are executed one by one. Given and When sections are not mandatory in some cases. Scenario: Then step can follow Given step Given I have a foo fixture with value "foo" Then foo should have value "foo" """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import given, then, scenario @scenario("steps.feature", "Then step can follow Given step") def test_steps(): pass @given('I have a foo fixture with value "foo"', target_fixture="foo") def _(): return "foo" @then('foo should have value "foo"') def _(foo): assert foo == "foo" """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1, failed=0) def test_conftest(pytester): pytester.makefile( ".feature", steps=textwrap.dedent( """\ Feature: Steps are executed one by one Steps are executed one by one. Given and When sections are not mandatory in some cases. Scenario: All steps are declared in the conftest Given I have a bar Then bar should have value "bar" """ ), ) pytester.makeconftest( textwrap.dedent( """\ from pytest_bdd import given, then @given("I have a bar", target_fixture="bar") def _(): return "bar" @then('bar should have value "bar"') def _(bar): assert bar == "bar" """ ) ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenario @scenario("steps.feature", "All steps are declared in the conftest") def test_steps(): pass """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1, failed=0) def test_multiple_given(pytester): """Using the same given fixture raises an error.""" pytester.makefile( ".feature", steps=textwrap.dedent( """\ Feature: Steps are executed one by one Scenario: Using the same given twice Given foo is "foo" And foo is "bar" Then foo should be "bar" """ ), ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import parsers, given, then, scenario @given(parsers.parse("foo is {value}"), target_fixture="foo") def _(value): return value @then(parsers.parse("foo should be {value}")) def _(foo, value): assert foo == value @scenario("steps.feature", "Using the same given twice") def test_given_twice(): pass """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1, failed=0) def test_step_hooks(pytester): """When step fails.""" pytester.makefile( ".feature", test=""" Scenario: When step has hook on failure Given I have a bar When it fails Scenario: When step's dependency a has failure Given I have a bar When it's dependency fails Scenario: When step is not found Given not found Scenario: When step validation error happens Given foo And foo """, ) pytester.makepyfile( """ import pytest from pytest_bdd import given, when, scenario @given('I have a bar') def _(): return 'bar' @when('it fails') def _(): raise Exception('when fails') @given('I have a bar') def _(): return 'bar' @pytest.fixture def dependency(): raise Exception('dependency fails') @when("it's dependency fails") def _(dependency): pass @scenario('test.feature', "When step's dependency a has failure") def test_when_dependency_fails(): pass @scenario('test.feature', 'When step has hook on failure') def test_when_fails(): pass @scenario('test.feature', 'When step is not found') def test_when_not_found(): pass @when('foo') def _(): return 'foo' @scenario('test.feature', 'When step validation error happens') def test_when_step_validation_error(): pass """ ) reprec = pytester.inline_run("-k test_when_fails") reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_bdd_before_scenario") assert calls[0].request calls = reprec.getcalls("pytest_bdd_after_scenario") assert calls[0].request calls = reprec.getcalls("pytest_bdd_before_step") assert calls[0].request calls = reprec.getcalls("pytest_bdd_before_step_call") assert calls[0].request calls = reprec.getcalls("pytest_bdd_after_step") assert calls[0].request calls = reprec.getcalls("pytest_bdd_step_error") assert calls[0].request reprec = pytester.inline_run("-k test_when_not_found") reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_bdd_step_func_lookup_error") assert calls[0].request reprec = pytester.inline_run("-k test_when_step_validation_error") reprec.assertoutcome(failed=1) reprec = pytester.inline_run("-k test_when_dependency_fails", "-vv") reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_bdd_before_step") assert len(calls) == 2 calls = reprec.getcalls("pytest_bdd_before_step_call") assert len(calls) == 1 calls = reprec.getcalls("pytest_bdd_step_error") assert calls[0].request def test_step_trace(pytester): """Test step trace.""" pytester.makeini( """ [pytest] console_output_style=classic """ ) pytester.makefile( ".feature", test=""" Scenario: When step has failure Given I have a bar When it fails Scenario: When step is not found Given not found Scenario: When step validation error happens Given foo And foo """, ) pytester.makepyfile( """ import pytest from pytest_bdd import given, when, scenario @given('I have a bar') def _(): return 'bar' @when('it fails') def _(): raise Exception('when fails') @scenario('test.feature', 'When step has failure') def test_when_fails_inline(): pass @scenario('test.feature', 'When step has failure') def test_when_fails_decorated(): pass @scenario('test.feature', 'When step is not found') def test_when_not_found(): pass @when('foo') def _(): return 'foo' @scenario('test.feature', 'When step validation error happens') def test_when_step_validation_error(): pass """ ) result = pytester.runpytest("-k test_when_fails_inline", "-vv") result.assert_outcomes(failed=1) result.stdout.fnmatch_lines(["*test_when_fails_inline*FAILED"]) assert "INTERNALERROR" not in result.stdout.str() result = pytester.runpytest("-k test_when_fails_decorated", "-vv") result.assert_outcomes(failed=1) result.stdout.fnmatch_lines(["*test_when_fails_decorated*FAILED"]) assert "INTERNALERROR" not in result.stdout.str() result = pytester.runpytest("-k test_when_not_found", "-vv") result.assert_outcomes(failed=1) result.stdout.fnmatch_lines(["*test_when_not_found*FAILED"]) assert "INTERNALERROR" not in result.stdout.str() result = pytester.runpytest("-k test_when_step_validation_error", "-vv") result.assert_outcomes(failed=1) result.stdout.fnmatch_lines(["*test_when_step_validation_error*FAILED"]) assert "INTERNALERROR" not in result.stdout.str() def test_steps_with_yield(pytester): """Test that steps definition containing a yield statement work the same way as pytest fixture do, that is the code after the yield is executed during teardown.""" pytester.makefile( ".feature", a="""\ Feature: A feature Scenario: A scenario When I setup stuff Then stuff should be 42 """, ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import given, when, then, scenarios scenarios("a.feature") @when("I setup stuff", target_fixture="stuff") def _(): print("Setting up...") yield 42 print("Tearing down...") @then("stuff should be 42") def _(stuff): assert stuff == 42 print("Asserted stuff is 42") """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) result.stdout.fnmatch_lines( [ "*Setting up...*", "*Asserted stuff is 42*", "*Tearing down...*", ] ) pytest-bdd-7.1.2/tests/feature/test_tags.py000066400000000000000000000130401457564257700207700ustar00rootroot00000000000000"""Test tags.""" import textwrap import pytest from pytest_bdd.parser import get_tags def test_tags_selector(pytester): """Test tests selection by tags.""" pytester.makefile( ".ini", pytest=textwrap.dedent( """ [pytest] markers = feature_tag_1 feature_tag_2 scenario_tag_01 scenario_tag_02 scenario_tag_10 scenario_tag_20 """ ), ) pytester.makefile( ".feature", test=""" @feature_tag_1 @feature_tag_2 Feature: Tags @scenario_tag_01 @scenario_tag_02 Scenario: Tags Given I have a bar @scenario_tag_10 @scenario_tag_20 Scenario: Tags 2 Given I have a bar """, ) pytester.makepyfile( """ import pytest from pytest_bdd import given, scenarios @given('I have a bar') def _(): return 'bar' scenarios('test.feature') """ ) result = pytester.runpytest("-m", "scenario_tag_10 and not scenario_tag_01", "-vv") outcomes = result.parseoutcomes() assert outcomes["passed"] == 1 assert outcomes["deselected"] == 1 result = pytester.runpytest("-m", "scenario_tag_01 and not scenario_tag_10", "-vv").parseoutcomes() assert result["passed"] == 1 assert result["deselected"] == 1 result = pytester.runpytest("-m", "feature_tag_1", "-vv").parseoutcomes() assert result["passed"] == 2 result = pytester.runpytest("-m", "feature_tag_10", "-vv").parseoutcomes() assert result["deselected"] == 2 def test_tags_after_background_issue_160(pytester): """Make sure using a tag after background works.""" pytester.makefile( ".ini", pytest=textwrap.dedent( """ [pytest] markers = tag """ ), ) pytester.makefile( ".feature", test=""" Feature: Tags after background Background: Given I have a bar @tag Scenario: Tags Given I have a baz Scenario: Tags 2 Given I have a baz """, ) pytester.makepyfile( """ import pytest from pytest_bdd import given, scenarios @given('I have a bar') def _(): return 'bar' @given('I have a baz') def _(): return 'baz' scenarios('test.feature') """ ) result = pytester.runpytest("-m", "tag", "-vv").parseoutcomes() assert result["passed"] == 1 assert result["deselected"] == 1 def test_apply_tag_hook(pytester): pytester.makeconftest( """ import pytest @pytest.hookimpl(tryfirst=True) def pytest_bdd_apply_tag(tag, function): if tag == 'todo': marker = pytest.mark.skipif(True, reason="Not implemented yet") marker(function) return True else: # Fall back to pytest-bdd's default behavior return None """ ) pytester.makefile( ".feature", test=""" Feature: Customizing tag handling @todo Scenario: Tags Given I have a bar @xfail Scenario: Tags 2 Given I have a bar """, ) pytester.makepyfile( """ from pytest_bdd import given, scenarios @given('I have a bar') def _(): return 'bar' scenarios('test.feature') """ ) result = pytester.runpytest("-rsx") result.stdout.fnmatch_lines(["SKIP*: Not implemented yet"]) result.stdout.fnmatch_lines(["*= 1 skipped, 1 xpassed * =*"]) def test_tag_with_spaces(pytester): pytester.makefile( ".ini", pytest=textwrap.dedent( """ [pytest] markers = test with spaces """ ), ) pytester.makeconftest( """ import pytest @pytest.hookimpl(tryfirst=True) def pytest_bdd_apply_tag(tag, function): assert tag == 'test with spaces' """ ) pytester.makefile( ".feature", test=""" Feature: Tag with spaces @test with spaces Scenario: Tags Given I have a bar """, ) pytester.makepyfile( """ from pytest_bdd import given, scenarios @given('I have a bar') def _(): return 'bar' scenarios('test.feature') """ ) result = pytester.runpytest_subprocess() result.stdout.fnmatch_lines(["*= 1 passed * =*"]) def test_at_in_scenario(pytester): pytester.makefile( ".feature", test=""" Feature: At sign in a scenario Scenario: Tags Given I have a foo@bar Scenario: Second Given I have a baz """, ) pytester.makepyfile( """ from pytest_bdd import given, scenarios @given('I have a foo@bar') def _(): return 'foo@bar' @given('I have a baz') def _(): return 'baz' scenarios('test.feature') """ ) strict_option = "--strict-markers" result = pytester.runpytest_subprocess(strict_option) result.stdout.fnmatch_lines(["*= 2 passed * =*"]) @pytest.mark.parametrize( "line, expected", [ ("@foo @bar", {"foo", "bar"}), ("@with spaces @bar", {"with spaces", "bar"}), ("@double @double", {"double"}), (" @indented", {"indented"}), (None, set()), ("foobar", set()), ("", set()), ], ) def test_get_tags(line, expected): assert get_tags(line) == expected pytest-bdd-7.1.2/tests/feature/test_wrong.py000066400000000000000000000026151457564257700211740ustar00rootroot00000000000000"""Test wrong feature syntax.""" import textwrap def test_multiple_features_single_file(pytester): """Test validation error when multiple features are placed in a single file.""" pytester.makefile( ".feature", wrong=textwrap.dedent( """\ Feature: Feature One Background: Given I have A And I have B Scenario: Do something with A When I do something with A Then something about B Feature: Feature Two Background: Given I have A Scenario: Something that just needs A When I do something else with A Then something else about B Scenario: Something that needs B again Given I have B When I do something else with B Then something else about A and B """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import then, scenario @scenario("wrong.feature", "Do something with A") def test_wrong(): pass """ ) ) result = pytester.runpytest() result.assert_outcomes(errors=1) result.stdout.fnmatch_lines("*FeatureError: Multiple features are not allowed in a single feature file.*") pytest-bdd-7.1.2/tests/generation/000077500000000000000000000000001457564257700171235ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/generation/__init__.py000066400000000000000000000000001457564257700212220ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/generation/test_generate_missing.py000066400000000000000000000104021457564257700240540ustar00rootroot00000000000000"""Code generation and assertion tests.""" import itertools import textwrap from pytest_bdd.scenario import get_python_name_generator def test_python_name_generator(): """Test python name generator function.""" assert list(itertools.islice(get_python_name_generator("Some name"), 3)) == [ "test_some_name", "test_some_name_1", "test_some_name_2", ] def test_generate_missing(pytester): """Test generate missing command.""" pytester.makefile( ".feature", generation=textwrap.dedent( """\ Feature: Missing code generation Background: Given I have a foobar Scenario: Scenario tests which are already bound to the tests stay as is Given I have a bar Scenario: Code is generated for scenarios which are not bound to any tests Given I have a bar Scenario: Code is generated for scenario steps which are not yet defined(implemented) Given I have a custom bar """ ), ) pytester.makepyfile( textwrap.dedent( """\ import functools from pytest_bdd import scenario, given scenario = functools.partial(scenario, "generation.feature") @given("I have a bar") def _(): return "bar" @scenario("Scenario tests which are already bound to the tests stay as is") def test_foo(): pass @scenario("Code is generated for scenario steps which are not yet defined(implemented)") def test_missing_steps(): pass """ ) ) result = pytester.runpytest("--generate-missing", "--feature", "generation.feature") result.assert_outcomes(passed=0, failed=0, errors=0) assert not result.stderr.str() assert result.ret == 0 result.stdout.fnmatch_lines( ['Scenario "Code is generated for scenarios which are not bound to any tests" is not bound to any test *'] ) result.stdout.fnmatch_lines( [ 'Step Given "I have a custom bar" is not defined in the scenario ' '"Code is generated for scenario steps which are not yet defined(implemented)" *' ] ) result.stdout.fnmatch_lines( ['Step Given "I have a foobar" is not defined in the background of the feature "Missing code generation" *'] ) result.stdout.fnmatch_lines(["Please place the code above to the test file(s):"]) def test_generate_missing_with_step_parsers(pytester): """Test that step parsers are correctly discovered and won't be part of the missing steps.""" pytester.makefile( ".feature", generation=textwrap.dedent( """\ Feature: Missing code generation with step parsers Scenario: Step parsers are correctly discovered Given I use the string parser without parameter And I use parsers.parse with parameter 1 And I use parsers.re with parameter 2 And I use parsers.cfparse with parameter 3 """ ), ) pytester.makepyfile( textwrap.dedent( """\ import functools from pytest_bdd import scenarios, given, parsers scenarios("generation.feature") @given("I use the string parser without parameter") def _(): return None @given(parsers.parse("I use parsers.parse with parameter {param}")) def _(param): return param @given(parsers.re(r"^I use parsers.re with parameter (?P.*?)$")) def _(param): return param @given(parsers.cfparse("I use parsers.cfparse with parameter {param:d}")) def _(param): return param """ ) ) result = pytester.runpytest("--generate-missing", "--feature", "generation.feature") result.assert_outcomes(passed=0, failed=0, errors=0) assert not result.stderr.str() assert result.ret == 0 output = result.stdout.str() assert "I use the string parser" not in output assert "I use parsers.parse" not in output assert "I use parsers.re" not in output assert "I use parsers.cfparse" not in output pytest-bdd-7.1.2/tests/library/000077500000000000000000000000001457564257700164345ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/library/__init__.py000066400000000000000000000000001457564257700205330ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/library/test_parent.py000066400000000000000000000262711457564257700213460ustar00rootroot00000000000000"""Test givens declared in the parent conftest and plugin files. Check the parent givens are collected and overridden in the local conftest. """ import textwrap from pytest_bdd.utils import collect_dumped_objects def test_parent(pytester): """Test parent given is collected. Both fixtures come from the parent conftest. """ pytester.makefile( ".feature", parent=textwrap.dedent( """\ Feature: Parent Scenario: Parenting is easy Given I have a parent fixture And I have an overridable fixture """ ), ) pytester.makeconftest( textwrap.dedent( """\ from pytest_bdd import given @given("I have a parent fixture", target_fixture="parent") def _(): return "parent" @given("I have an overridable fixture", target_fixture="overridable") def _(): return "parent" """ ) ) pytester.makepyfile( textwrap.dedent( """\ from pytest_bdd import scenario @scenario("parent.feature", "Parenting is easy") def test_parent(request): assert request.getfixturevalue("parent") == "parent" assert request.getfixturevalue("overridable") == "parent" """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_global_when_step(pytester): """Test when step defined in the parent conftest.""" pytester.makefile( ".feature", global_when=textwrap.dedent( """\ Feature: Global when Scenario: Global when step defined in parent conftest When I use a when step from the parent conftest """ ), ) pytester.makeconftest( textwrap.dedent( """\ from pytest_bdd import when from pytest_bdd.utils import dump_obj @when("I use a when step from the parent conftest") def _(): dump_obj("global when step") """ ) ) pytester.mkpydir("subdir").joinpath("test_global_when.py").write_text( textwrap.dedent( """\ from pytest_bdd import scenarios scenarios("../global_when.feature") """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) [collected_object] = collect_dumped_objects(result) assert collected_object == "global when step" def test_child(pytester): """Test the child conftest overriding the fixture.""" pytester.makeconftest( textwrap.dedent( """\ from pytest_bdd import given @given("I have a parent fixture", target_fixture="parent") def _(): return "parent" @given("I have an overridable fixture", target_fixture="overridable") def main_conftest(): return "parent" """ ) ) subdir = pytester.mkpydir("subdir") subdir.joinpath("conftest.py").write_text( textwrap.dedent( """\ from pytest_bdd import given @given("I have an overridable fixture", target_fixture="overridable") def subdir_conftest(): return "child" """ ) ) subdir.joinpath("child.feature").write_text( textwrap.dedent( """\ Feature: Child Scenario: Happy childhood Given I have a parent fixture And I have an overridable fixture """ ), ) subdir.joinpath("test_library.py").write_text( textwrap.dedent( """\ from pytest_bdd import scenario @scenario("child.feature", "Happy childhood") def test_override(request): assert request.getfixturevalue("parent") == "parent" assert request.getfixturevalue("overridable") == "child" """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_local(pytester): """Test locally overridden fixtures.""" pytester.makeconftest( textwrap.dedent( """\ from pytest_bdd import given @given("I have a parent fixture", target_fixture="parent") def _(): return "parent" @given("I have an overridable fixture", target_fixture="overridable") def _(): return "parent" """ ) ) subdir = pytester.mkpydir("subdir") subdir.joinpath("local.feature").write_text( textwrap.dedent( """\ Feature: Local Scenario: Local override Given I have a parent fixture And I have an overridable fixture """ ), ) subdir.joinpath("test_library.py").write_text( textwrap.dedent( """\ from pytest_bdd import given, scenario @given("I have an overridable fixture", target_fixture="overridable") def _(): return "local" @given("I have a parent fixture", target_fixture="parent") def _(): return "local" @scenario("local.feature", "Local override") def test_local(request): assert request.getfixturevalue("parent") == "local" assert request.getfixturevalue("overridable") == "local" """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_uses_correct_step_in_the_hierarchy(pytester): """ Test regression found in issue #524, where we couldn't find the correct step implemntation in the hierarchy of files/folder as expected. This test uses many files and folders that act as decoy, while the real step implementation is defined in the last file (test_b/test_b.py). """ pytester.makefile( ".feature", specific=textwrap.dedent( """\ Feature: Specificity of steps Scenario: Overlapping steps Given I have a specific thing Then pass """ ), ) pytester.makeconftest( textwrap.dedent( """\ from pytest_bdd import parsers, given, then from pytest_bdd.utils import dump_obj import pytest @given(parsers.re("(?P.*)")) def root_conftest_catchall(thing): dump_obj(thing + " (catchall) root_conftest") @given(parsers.parse("I have a {thing} thing")) def root_conftest(thing): dump_obj(thing + " root_conftest") @given("I have a specific thing") def root_conftest_specific(): dump_obj("specific" + "(specific) root_conftest") @then("pass") def _(): pass """ ) ) # Adding deceiving @when steps around the real test, so that we can check if the right one is used # the right one is the one in test_b/test_b.py # We purposefully use test_a and test_c as decoys (while test_b/test_b is "good one"), so that we can test that # we pick the right one. pytester.makepyfile( test_a="""\ from pytest_bdd import given, parsers from pytest_bdd.utils import dump_obj @given(parsers.re("(?P.*)")) def in_root_test_a_catch_all(thing): dump_obj(thing + " (catchall) test_a") @given(parsers.parse("I have a specific thing")) def in_root_test_a_specific(): dump_obj("specific" + " (specific) test_a") @given(parsers.parse("I have a {thing} thing")) def in_root_test_a(thing): dump_obj(thing + " root_test_a") """ ) pytester.makepyfile( test_c="""\ from pytest_bdd import given, parsers from pytest_bdd.utils import dump_obj @given(parsers.re("(?P.*)")) def in_root_test_c_catch_all(thing): dump_obj(thing + " (catchall) test_c") @given(parsers.parse("I have a specific thing")) def in_root_test_c_specific(): dump_obj("specific" + " (specific) test_c") @given(parsers.parse("I have a {thing} thing")) def in_root_test_c(thing): dump_obj(thing + " root_test_b") """ ) test_b_folder = pytester.mkpydir("test_b") # More decoys: test_b/test_a.py and test_b/test_c.py test_b_folder.joinpath("test_a.py").write_text( textwrap.dedent( """\ from pytest_bdd import given, parsers from pytest_bdd.utils import dump_obj @given(parsers.re("(?P.*)")) def in_root_test_b_test_a_catch_all(thing): dump_obj(thing + " (catchall) test_b_test_a") @given(parsers.parse("I have a specific thing")) def in_test_b_test_a_specific(): dump_obj("specific" + " (specific) test_b_test_a") @given(parsers.parse("I have a {thing} thing")) def in_test_b_test_a(thing): dump_obj(thing + " test_b_test_a") """ ) ) test_b_folder.joinpath("test_c.py").write_text( textwrap.dedent( """\ from pytest_bdd import given, parsers from pytest_bdd.utils import dump_obj @given(parsers.re("(?P.*)")) def in_root_test_b_test_c_catch_all(thing): dump_obj(thing + " (catchall) test_b_test_c") @given(parsers.parse("I have a specific thing")) def in_test_b_test_c_specific(): dump_obj("specific" + " (specific) test_a_test_c") @given(parsers.parse("I have a {thing} thing")) def in_test_b_test_c(thing): dump_obj(thing + " test_c_test_a") """ ) ) # Finally, the file with the actual step definition that should be used test_b_folder.joinpath("test_b.py").write_text( textwrap.dedent( """\ from pytest_bdd import scenarios, given, parsers from pytest_bdd.utils import dump_obj scenarios("../specific.feature") @given(parsers.parse("I have a {thing} thing")) def in_test_b_test_b(thing): dump_obj(f"{thing} test_b_test_b") """ ) ) test_b_folder.joinpath("test_b_alternative.py").write_text( textwrap.dedent( """\ from pytest_bdd import scenarios, given, parsers from pytest_bdd.utils import dump_obj scenarios("../specific.feature") # Here we try to use an argument different from the others, # to make sure it doesn't matter if a new step parser string is encountered. @given(parsers.parse("I have a {t} thing")) def in_test_b_test_b(t): dump_obj(f"{t} test_b_test_b") """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=2) [thing1, thing2] = collect_dumped_objects(result) assert thing1 == thing2 == "specific test_b_test_b" pytest-bdd-7.1.2/tests/scripts/000077500000000000000000000000001457564257700164575ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/scripts/__init__.py000066400000000000000000000000001457564257700205560ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/scripts/test_generate.py000066400000000000000000000135331457564257700216670ustar00rootroot00000000000000"""Test code generation command.""" import os import sys import textwrap from pytest_bdd.scripts import main PATH = os.path.dirname(__file__) def test_generate(pytester, monkeypatch, capsys): """Test if the code is generated by a given feature.""" features = pytester.mkdir("scripts") feature = features.joinpath("generate.feature") feature.write_text( textwrap.dedent( """\ Feature: Code generation Scenario: Given and when using the same fixture should not evaluate it twice Given I have an empty list And 1 have a fixture (appends 1 to a list) in reuse syntax When I use this fixture Then my list should be [1] """ ), "utf-8", ) monkeypatch.setattr(sys, "argv", ["", "generate", str(feature)]) main() out, err = capsys.readouterr() assert out == textwrap.dedent( '''\ """Code generation feature tests.""" from pytest_bdd import ( given, scenario, then, when, ) @scenario('scripts/generate.feature', 'Given and when using the same fixture should not evaluate it twice') def test_given_and_when_using_the_same_fixture_should_not_evaluate_it_twice(): """Given and when using the same fixture should not evaluate it twice.""" @given('1 have a fixture (appends 1 to a list) in reuse syntax') def _(): """1 have a fixture (appends 1 to a list) in reuse syntax.""" raise NotImplementedError @given('I have an empty list') def _(): """I have an empty list.""" raise NotImplementedError @when('I use this fixture') def _(): """I use this fixture.""" raise NotImplementedError @then('my list should be [1]') def _(): """my list should be [1].""" raise NotImplementedError ''' ) def test_generate_with_quotes(pytester): """Test that code generation escapes quote characters properly.""" pytester.makefile( ".feature", generate_with_quotes=textwrap.dedent( '''\ Feature: Handling quotes in code generation Scenario: A step definition with quotes should be escaped as needed Given I have a fixture with 'single' quotes And I have a fixture with "double" quotes And I have a fixture with single-quote \'\'\'triple\'\'\' quotes And I have a fixture with double-quote """triple""" quotes When I generate the code Then The generated string should be written ''' ), ) result = pytester.run("pytest-bdd", "generate", "generate_with_quotes.feature") assert result.stdout.str() == textwrap.dedent( '''\ """Handling quotes in code generation feature tests.""" from pytest_bdd import ( given, scenario, then, when, ) @scenario('generate_with_quotes.feature', 'A step definition with quotes should be escaped as needed') def test_a_step_definition_with_quotes_should_be_escaped_as_needed(): """A step definition with quotes should be escaped as needed.""" @given('I have a fixture with "double" quotes') def _(): """I have a fixture with "double" quotes.""" raise NotImplementedError @given('I have a fixture with \\'single\\' quotes') def _(): """I have a fixture with 'single' quotes.""" raise NotImplementedError @given('I have a fixture with double-quote """triple""" quotes') def _(): """I have a fixture with double-quote \\"\\"\\"triple\\"\\"\\" quotes.""" raise NotImplementedError @given('I have a fixture with single-quote \\'\\'\\'triple\\'\\'\\' quotes') def _(): """I have a fixture with single-quote \'\'\'triple\'\'\' quotes.""" raise NotImplementedError @when('I generate the code') def _(): """I generate the code.""" raise NotImplementedError @then('The generated string should be written') def _(): """The generated string should be written.""" raise NotImplementedError ''' ) def test_unicode_characters(pytester, monkeypatch): """Test generating code with unicode characters. Primary purpose is to ensure compatibility with Python2. """ pytester.makefile( ".feature", unicode_characters=textwrap.dedent( """\ Feature: Generating unicode characters Scenario: Calculating the circumference of a circle Given We have a circle When We want to know its circumference Then We calculate 2 * ℼ * 𝑟 """ ), ) result = pytester.run("pytest-bdd", "generate", "unicode_characters.feature") expected_output = textwrap.dedent( '''\ """Generating unicode characters feature tests.""" from pytest_bdd import ( given, scenario, then, when, ) @scenario('unicode_characters.feature', 'Calculating the circumference of a circle') def test_calculating_the_circumference_of_a_circle(): """Calculating the circumference of a circle.""" @given('We have a circle') def _(): """We have a circle.""" raise NotImplementedError @when('We want to know its circumference') def _(): """We want to know its circumference.""" raise NotImplementedError @then('We calculate 2 * ℼ * 𝑟') def _(): """We calculate 2 * ℼ * 𝑟.""" raise NotImplementedError ''' ) assert result.stdout.str() == expected_output pytest-bdd-7.1.2/tests/scripts/test_main.py000066400000000000000000000007141457564257700210160ustar00rootroot00000000000000"""Main command.""" import os import sys from pytest_bdd.scripts import main PATH = os.path.dirname(__file__) def test_main(monkeypatch, capsys): """Test if main command shows help when called without the subcommand.""" monkeypatch.setattr(sys, "argv", ["pytest-bdd"]) monkeypatch.setattr(sys, "exit", lambda x: x) main() out, err = capsys.readouterr() assert "usage: pytest-bdd [-h]" in err assert "pytest-bdd: error:" in err pytest-bdd-7.1.2/tests/scripts/test_migrate.py000066400000000000000000000021621457564257700215210ustar00rootroot00000000000000"""Test code generation command.""" import os import sys import textwrap from pytest_bdd.scripts import main PATH = os.path.dirname(__file__) def test_migrate(monkeypatch, capsys, pytester): """Test if the code is migrated by a given file mask.""" tests = pytester.mkpydir("tests") tests.joinpath("test_foo.py").write_text( textwrap.dedent( ''' """Foo bar tests.""" from pytest_bdd import scenario test_foo = scenario('foo_bar.feature', 'Foo bar') ''' ) ) monkeypatch.setattr(sys, "argv", ["", "migrate", str(tests)]) main() out, err = capsys.readouterr() out = "\n".join(sorted(out.splitlines())) expected = textwrap.dedent( """ migrated: {0}/test_foo.py skipped: {0}/__init__.py""".format( str(tests) )[ 1: ] ) assert out == expected assert tests.joinpath("test_foo.py").read_text() == textwrap.dedent( ''' """Foo bar tests.""" from pytest_bdd import scenario @scenario('foo_bar.feature', 'Foo bar') def test_foo(): pass ''' ) pytest-bdd-7.1.2/tests/steps/000077500000000000000000000000001457564257700161265ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/steps/__init__.py000066400000000000000000000000001457564257700202250ustar00rootroot00000000000000pytest-bdd-7.1.2/tests/steps/test_common.py000066400000000000000000000255761457564257700210460ustar00rootroot00000000000000import textwrap from typing import Any, Callable from unittest import mock import pytest from pytest_bdd import given, parser, parsers, then, when from pytest_bdd.utils import collect_dumped_objects @pytest.mark.parametrize("step_fn, step_type", [(given, "given"), (when, "when"), (then, "then")]) def test_given_when_then_delegate_to_step(step_fn: Callable[..., Any], step_type: str) -> None: """Test that @given, @when, @then just delegate the work to @step(...). This way we don't have to repeat integration tests for each step decorator. """ # Simple usage, just the step name with mock.patch("pytest_bdd.steps.step", autospec=True) as step_mock: step_fn("foo") step_mock.assert_called_once_with("foo", type_=step_type, converters=None, target_fixture=None, stacklevel=1) # Advanced usage: step parser, converters, target_fixture, ... with mock.patch("pytest_bdd.steps.step", autospec=True) as step_mock: parser = parsers.re(r"foo (?P\d+)") step_fn(parser, converters={"n": int}, target_fixture="foo_n", stacklevel=3) step_mock.assert_called_once_with( name=parser, type_=step_type, converters={"n": int}, target_fixture="foo_n", stacklevel=3 ) def test_step_function_multiple_target_fixtures(pytester): pytester.makefile( ".feature", target_fixture=textwrap.dedent( """\ Feature: Multiple target fixtures for step function Scenario: A step can be decorated multiple times with different target fixtures Given there is a foo with value "test foo" And there is a bar with value "test bar" Then foo should be "test foo" And bar should be "test bar" """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import given, when, then, scenarios, parsers from pytest_bdd.utils import dump_obj scenarios("target_fixture.feature") @given(parsers.parse('there is a foo with value "{value}"'), target_fixture="foo") @given(parsers.parse('there is a bar with value "{value}"'), target_fixture="bar") def _(value): return value @then(parsers.parse('foo should be "{expected_value}"')) def _(foo, expected_value): dump_obj(foo) assert foo == expected_value @then(parsers.parse('bar should be "{expected_value}"')) def _(bar, expected_value): dump_obj(bar) assert bar == expected_value """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) [foo, bar] = collect_dumped_objects(result) assert foo == "test foo" assert bar == "test bar" def test_step_functions_same_parser(pytester): pytester.makefile( ".feature", target_fixture=textwrap.dedent( """\ Feature: A feature Scenario: A scenario Given there is a foo with value "(?P\\w+)" And there is a foo with value "testfoo" When pass Then pass """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import given, when, then, scenarios, parsers from pytest_bdd.utils import dump_obj scenarios("target_fixture.feature") STEP = r'there is a foo with value "(?P\\w+)"' @given(STEP) def _(): dump_obj(('str',)) @given(parsers.re(STEP)) def _(value): dump_obj(('re', value)) @when("pass") @then("pass") def _(): pass """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) [first_given, second_given] = collect_dumped_objects(result) assert first_given == ("str",) assert second_given == ("re", "testfoo") def test_user_implements_a_step_generator(pytester): """Test advanced use cases, like the implementation of custom step generators.""" pytester.makefile( ".feature", user_step_generator=textwrap.dedent( """\ Feature: A feature Scenario: A scenario Given I have 10 EUR And the wallet is verified And I have a wallet When I pay 1 EUR Then I should have 9 EUR in my wallet """ ), ) pytester.makepyfile( textwrap.dedent( """\ import re from dataclasses import dataclass, fields import pytest from pytest_bdd import given, when, then, scenarios, parsers from pytest_bdd.utils import dump_obj @dataclass class Wallet: verified: bool amount_eur: int amount_usd: int amount_gbp: int amount_jpy: int def pay(self, amount: int, currency: str) -> None: if not self.verified: raise ValueError("Wallet account is not verified") currency = currency.lower() field = f"amount_{currency}" setattr(self, field, getattr(self, field) - amount) @pytest.fixture def wallet__verified(): return False @pytest.fixture def wallet__amount_eur(): return 0 @pytest.fixture def wallet__amount_usd(): return 0 @pytest.fixture def wallet__amount_gbp(): return 0 @pytest.fixture def wallet__amount_jpy(): return 0 @pytest.fixture() def wallet( wallet__verified, wallet__amount_eur, wallet__amount_usd, wallet__amount_gbp, wallet__amount_jpy, ): return Wallet( verified=wallet__verified, amount_eur=wallet__amount_eur, amount_usd=wallet__amount_usd, amount_gbp=wallet__amount_gbp, amount_jpy=wallet__amount_jpy, ) def generate_wallet_steps(model_name="wallet", stacklevel=1): stacklevel += 1 @given("I have a wallet", target_fixture=model_name, stacklevel=stacklevel) def _(wallet): return wallet @given( parsers.re(r"the wallet is (?Pnot)?verified"), target_fixture=f"{model_name}__verified", stacklevel=2, ) def _(negation: str): if negation: return False return True # Generate steps for currency fields: for field in fields(Wallet): match = re.fullmatch(r"amount_(?P[a-z]{3})", field.name) if not match: continue currency = match["currency"] @given( parsers.parse(f"I have {{value:d}} {currency.upper()}"), target_fixture=f"{model_name}__amount_{currency}", stacklevel=2, ) def _(value: int, _currency=currency) -> int: dump_obj(f"given {value} {_currency.upper()}") return value @when( parsers.parse(f"I pay {{value:d}} {currency.upper()}"), stacklevel=2, ) def _(wallet: Wallet, value: int, _currency=currency) -> None: dump_obj(f"pay {value} {_currency.upper()}") wallet.pay(value, _currency) @then( parsers.parse(f"I should have {{value:d}} {currency.upper()} in my wallet"), stacklevel=2, ) def _(wallet: Wallet, value: int, _currency=currency) -> None: dump_obj(f"assert {value} {_currency.upper()}") assert getattr(wallet, f"amount_{_currency}") == value generate_wallet_steps() scenarios("user_step_generator.feature") """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) [given, pay, assert_] = collect_dumped_objects(result) assert given == "given 10 EUR" assert pay == "pay 1 EUR" assert assert_ == "assert 9 EUR" def test_step_catches_all(pytester): """Test that the @step(...) decorator works for all kind of steps.""" pytester.makefile( ".feature", step_catches_all=textwrap.dedent( """\ Feature: A feature Scenario: A scenario Given foo And foo parametrized 1 When foo And foo parametrized 2 Then foo And foo parametrized 3 """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import step, scenarios, parsers from pytest_bdd.utils import dump_obj scenarios("step_catches_all.feature") @step("foo") def _(): dump_obj("foo") @step(parsers.parse("foo parametrized {n:d}")) def _(n): dump_obj(("foo parametrized", n)) """ ) ) result = pytester.runpytest("-s") result.assert_outcomes(passed=1) objects = collect_dumped_objects(result) assert objects == ["foo", ("foo parametrized", 1), "foo", ("foo parametrized", 2), "foo", ("foo parametrized", 3)] def test_step_name_is_cached(): """Test that the step name is cached and not re-computed eache time.""" step = parser.Step(name="step name", type="given", indent=8, line_number=3, keyword="Given") assert step.name == "step name" # manipulate the step name directly and validate the cache value is still returned step._name = "incorrect step name" assert step.name == "step name" # change the step name using the property and validate the cache has been invalidated step.name = "new step name" assert step.name == "new step name" # manipulate the step lines and validate the cache value is still returned step.lines.append("step line 1") assert step.name == "new step name" # add a step line and validate the cache has been invalidated step.add_line("step line 2") assert step.name == "new step name\nstep line 1\nstep line 2" pytest-bdd-7.1.2/tests/steps/test_given.py000066400000000000000000000016771457564257700206620ustar00rootroot00000000000000"""Given tests.""" import textwrap def test_given_injection(pytester): pytester.makefile( ".feature", given=textwrap.dedent( """\ Feature: Given Scenario: Test given fixture injection Given I have injecting given Then foo should be "injected foo" """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import given, then, scenario @scenario("given.feature", "Test given fixture injection") def test_given(): pass @given("I have injecting given", target_fixture="foo") def _(): return "injected foo" @then('foo should be "injected foo"') def _(foo): assert foo == "injected foo" """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/steps/test_unicode.py000066400000000000000000000057521457564257700211760ustar00rootroot00000000000000"""Tests for testing cases when we have unicode in feature file.""" import textwrap def test_steps_in_feature_file_have_unicode(pytester): pytester.makefile( ".feature", unicode=textwrap.dedent( """\ Feature: Юнікодні символи Scenario: Кроки в .feature файлі містять юнікод Given у мене є рядок який містить 'якийсь контент' Then I should see that the string equals to content 'якийсь контент' Scenario: Given names have unicode types Given I have an alias with a unicode type for foo Then foo should be "foo" """ ), ) pytester.makepyfile( textwrap.dedent( """\ import sys import pytest from pytest_bdd import parsers, given, then, scenario @scenario("unicode.feature", "Кроки в .feature файлі містять юнікод") def test_unicode(): pass @pytest.fixture def string(): return {"content": ""} @given(parsers.parse(u"у мене є рядок який містить '{content}'")) def _(content, string): string["content"] = content given(u"I have an alias with a unicode type for foo", target_fixture="foo") @then(parsers.parse("I should see that the string equals to content '{content}'")) def _(content, string): assert string["content"] == content """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_steps_in_py_file_have_unicode(pytester): pytester.makefile( ".feature", unicode=textwrap.dedent( """\ Feature: Юнікодні символи Scenario: Steps in .py file have unicode Given there is an other string with content 'якийсь контент' Then I should see that the other string equals to content 'якийсь контент' """ ), ) pytester.makepyfile( textwrap.dedent( """\ import pytest from pytest_bdd import given, then, scenario @scenario("unicode.feature", "Steps in .py file have unicode") def test_unicode(): pass @pytest.fixture def string(): return {"content": ""} @given("there is an other string with content 'якийсь контент'") def _(string): string["content"] = u"с каким-то контентом" @then("I should see that the other string equals to content 'якийсь контент'") def _(string): assert string["content"] == u"с каким-то контентом" """ ) ) result = pytester.runpytest() result.assert_outcomes(passed=1) pytest-bdd-7.1.2/tests/test_hooks.py000066400000000000000000000073411457564257700175310ustar00rootroot00000000000000import textwrap from pytest_bdd.utils import collect_dumped_objects def test_conftest_module_evaluated_twice(pytester): """Regression test for https://github.com/pytest-dev/pytest-bdd/issues/62""" pytester.makeconftest("") subdir = pytester.mkpydir("subdir") subdir.joinpath("conftest.py").write_text( textwrap.dedent( r""" def pytest_pyfunc_call(pyfuncitem): print('\npytest_pyfunc_call hook') def pytest_generate_tests(metafunc): print('\npytest_generate_tests hook') """ ) ) subdir.joinpath("test_foo.py").write_text( textwrap.dedent( r""" from pytest_bdd import scenario @scenario('foo.feature', 'Some scenario') def test_foo(): pass """ ) ) subdir.joinpath("foo.feature").write_text( textwrap.dedent( r""" Feature: The feature Scenario: Some scenario """ ) ) result = pytester.runpytest("-s") assert result.stdout.lines.count("pytest_pyfunc_call hook") == 1 assert result.stdout.lines.count("pytest_generate_tests hook") == 1 def test_item_collection_does_not_break_on_non_function_items(pytester): """Regression test for https://github.com/pytest-dev/pytest-bdd/issues/317""" pytester.makeconftest( """ import pytest @pytest.mark.tryfirst def pytest_collection_modifyitems(session, config, items): try: item_creator = CustomItem.from_parent # Only available in pytest >= 5.4.0 except AttributeError: item_creator = CustomItem items[:] = [item_creator(name=item.name, parent=item.parent) for item in items] class CustomItem(pytest.Item): def runtest(self): assert True """ ) pytester.makepyfile( """ def test_convert_me_to_custom_item_and_assert_true(): assert False """ ) result = pytester.runpytest() result.assert_outcomes(passed=1) def test_pytest_bdd_after_scenario_called_after_scenario(pytester): """Regression test for https://github.com/pytest-dev/pytest-bdd/pull/577""" pytester.makefile( ".feature", foo=textwrap.dedent( """\ Feature: A feature Scenario: Scenario 1 Given foo When bar Then baz Scenario: Scenario 2 When bar Then baz """ ), ) pytester.makepyfile( """ import pytest from pytest_bdd import given, when, then, scenarios scenarios("foo.feature") @given("foo") @when("bar") @then("baz") def _(): pass """ ) pytester.makeconftest( """ from pytest_bdd.utils import dump_obj def pytest_bdd_after_scenario(request, feature, scenario): dump_obj([feature, scenario]) """ ) result = pytester.runpytest("-s") result.assert_outcomes(passed=2) hook_calls = collect_dumped_objects(result) assert len(hook_calls) == 2 [(feature, scenario_1), (feature_2, scenario_2)] = hook_calls assert feature.name == feature_2.name == "A feature" assert scenario_1.name == "Scenario 1" assert scenario_2.name == "Scenario 2" def test_pytest_unconfigure_without_configure(pytester): """ Simulate a plugin forcing an exit during configuration before bdd is configured https://github.com/pytest-dev/pytest-bdd/issues/362 """ pytester.makeconftest( """ import pytest def pytest_configure(config): pytest.exit("Exit during configure", 0) """ ) result = pytester.runpytest() assert result.ret == 0 pytest-bdd-7.1.2/tests/utils.py000066400000000000000000000003241457564257700165010ustar00rootroot00000000000000from __future__ import annotations import pytest from packaging.utils import Version # We leave this here for the future as an easy way to do feature-based testing. PYTEST_VERSION = Version(pytest.__version__) pytest-bdd-7.1.2/tox.ini000066400000000000000000000016201457564257700151400ustar00rootroot00000000000000[tox] distshare = {homedir}/.tox/distshare envlist = py{3.8,3.9,3.10,3.11}-pytest{6.2,7.0,7.1,7.2,7.3,7.4,8.0,8.1,latest}-coverage py{3.12,3.13}-pytest{7.3,7.4,8.0,8.1,latest}-coverage py3.12-pytestlatest-xdist-coverage mypy [testenv] parallel_show_output = true setenv = coverage: _PYTEST_CMD=coverage run -m pytest xdist: _PYTEST_MORE_ARGS=-n3 -rfsxX deps = pytestlatest: pytest pytest8.1: pytest~=8.1.0 pytest8.0: pytest~=8.0.0 pytest7.4: pytest~=7.4.0 pytest7.3: pytest~=7.3.0 pytest7.2: pytest~=7.2.0 pytest7.1: pytest~=7.1.0 pytest7.0: pytest~=7.0.0 pytest6.2: pytest~=6.2.0 coverage: coverage[toml] xdist: pytest-xdist commands = {env:_PYTEST_CMD:pytest} {env:_PYTEST_MORE_ARGS:} {posargs:-vvl} [testenv:mypy] skip_install = true allowlist_externals = poetry commands_pre = poetry install --with=dev commands = mypy