pax_global_header00006660000000000000000000000064144356032170014517gustar00rootroot0000000000000052 comment=375f4d9d579b40879473a7a308b26aeb70a06661 pytest-console-scripts-1.4.1/000077500000000000000000000000001443560321700161775ustar00rootroot00000000000000pytest-console-scripts-1.4.1/.github/000077500000000000000000000000001443560321700175375ustar00rootroot00000000000000pytest-console-scripts-1.4.1/.github/workflows/000077500000000000000000000000001443560321700215745ustar00rootroot00000000000000pytest-console-scripts-1.4.1/.github/workflows/test.yml000066400000000000000000000032011443560321700232720ustar00rootroot00000000000000name: Python package on: push: branches: "*" tags: "*.*.*" pull_request: jobs: tests: runs-on: ${{ matrix.os }} strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11", "pypy3.9"] os: ["ubuntu-latest", "windows-latest"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install tox - name: Test with tox run: tox run -e py,report_ci,lint - uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} package: runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v3 - name: Install build dependencies run: pip install --upgrade build - name: Build distributions run: python -m build - name: Upload packages uses: actions/upload-artifact@v3 with: name: python-dist path: dist/* retention-days: 1 publish: needs: [tests, package] runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags/') environment: name: release url: https://pypi.org/p/pytest-console-scripts permissions: id-token: write steps: - name: Download packages uses: actions/download-artifact@v3 with: name: python-dist path: dist/ - name: Publish package distributions to PyPI uses: pypa/gh-action-pypi-publish@release/v1 pytest-console-scripts-1.4.1/.gitignore000066400000000000000000000015521443560321700201720ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .hypothesis/ .pytest_cache # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask instance folder instance/ # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # PyCharm .idea pytest-console-scripts-1.4.1/CHANGELOG.md000066400000000000000000000055471443560321700200230ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [1.4.1] - 2023-05-29 ### Removed - Dropped support for Python 3.7 [#72](https://github.com/kvas-it/pytest-console-scripts/pull/72) ### Fixed - Fix loading scripts with non-UTF-8 encodings. [#77](https://github.com/kvas-it/pytest-console-scripts/pull/77) - Print output when a subprocess runner with `check=True` fails was missing. [#78](https://github.com/kvas-it/pytest-console-scripts/pull/78) ## [1.4.0] - 2023-05-22 ### Added - Added type-hinting for all types, `pytest_console_scripts.ScriptRunner` can now be used to hint the `script_runner` fixture. [#62](https://github.com/kvas-it/pytest-console-scripts/pull/62) - Added support for the `shell` and `check` keywords for in-process mode. These behave as similarly to `subprocess.run` as possible. - Script runners now take command arguments similar to `subprocess.run`, including support for PathLike objects. [#69](https://github.com/kvas-it/pytest-console-scripts/pull/69) ### Deprecated - Passing command arguments in `*args` is now deprecated and will raise warnings. These should be wrapped in a list or tuple from now on, similar to `subprocess.run`. [#69](https://github.com/kvas-it/pytest-console-scripts/pull/69) ### Removed - Dropped support for Python 3.6 [#61](https://github.com/kvas-it/pytest-console-scripts/pull/61) ### Fixed - Install-time dependencies have been fixed. [#56](https://github.com/kvas-it/pytest-console-scripts/issues/56) ## [1.3.1] - 2022-03-18 ### Changed - Removed `mock` dependency. [#53](https://github.com/kvas-it/pytest-console-scripts/pull/53) ## [1.3.0] - 2022-02-23 ### Changed - Added `python_requires` to the project. [#51](https://github.com/kvas-it/pytest-console-scripts/issues/51) ## [1.2.2] - 2022-01-06 ### Added - Add `print` method to allow results to be manually printed. [#49](https://github.com/kvas-it/pytest-console-scripts/issues/49) ### Fixed - Avoid overwriting the global logging config of tested scripts. [#48](https://github.com/kvas-it/pytest-console-scripts/pull/48) ## [1.2.1] - 2021-09-28 ### Removed - Drop support for Python 3.5 ## [1.2.0] - 2021-04-26 ### Changed - Locate the Python interpreter through sys.executable ### Fixed - Do not rely on the Python interpreter being called `python`, as that command does not exist in certain environments. ## [1.1.0] - 2020-11-20 ### Added - Add option to suppress printing script run results. [#41](https://github.com/kvas-it/pytest-console-scripts/issues/41) ## [1.0.0] - 2020-10-06 ### Added - Support scripts that are not in `console_scripts`. [#17](https://github.com/kvas-it/pytest-console-scripts/issues/17) pytest-console-scripts-1.4.1/LICENSE000066400000000000000000000020721443560321700172050ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2016 Vasily Kuznetsov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.pytest-console-scripts-1.4.1/MANIFEST.in000066400000000000000000000001401443560321700177300ustar00rootroot00000000000000graft tests include tox.ini include LICENSE include *.md global-exclude *.py[cod] __pycache__ pytest-console-scripts-1.4.1/README.md000066400000000000000000000236631443560321700174700ustar00rootroot00000000000000pytest-console-scripts ====================== [![PyPI](https://img.shields.io/pypi/v/pytest-console-scripts)](https://pypi.org/project/pytest-console-scripts/) [![PyPI - License](https://img.shields.io/pypi/l/pytest-console-scripts)](https://github.com/kvas-it/pytest-console-scripts/blob/master/LICENSE) [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/kvas-it/pytest-console-scripts/test.yml)](https://github.com/kvas-it/pytest-console-scripts/actions) [![codecov](https://codecov.io/gh/kvas-it/pytest-console-scripts/branch/master/graph/badge.svg?token=RfELxcqvpF)](https://codecov.io/gh/kvas-it/pytest-console-scripts) [![GitHub issues](https://img.shields.io/github/issues/kvas-it/pytest-console-scripts)](https://github.com/kvas-it/pytest-console-scripts/issues) [![GitHub pull requests](https://img.shields.io/github/issues-pr/kvas-it/pytest-console-scripts)](https://github.com/kvas-it/pytest-console-scripts/pulls) [![GitHub commits since latest release (by date)](https://img.shields.io/github/commits-since/kvas-it/pytest-console-scripts/latest)](https://github.com/kvas-it/pytest-console-scripts/blob/master/CHANGELOG.md) Pytest-console-scripts is a [pytest][1] plugin for running python scripts from within tests. It's quite similar to `subprocess.run()`, but it also has an in-process mode, where the scripts are executed by the interpreter that's running `pytest` (using some amount of sandboxing). In-process mode significantly reduces the run time of the test suites that run many external scripts. This is speeds up development. In the CI environment subprocess mode can be used to make sure the scripts also work (and behave the same) when run by a fresh interpreter. Requirements ------------ - Python 3.8+, or PyPy3, - Pytest 4.0 or newer. Installation ------------ You can install "pytest-console-scripts" via [pip][2] from [PyPI][3]: ```sh $ pip install pytest-console-scripts ``` Normally you would add it as a test dependency in `tox.ini` (see [tox documentation][9]). Usage ----- This plugin will run scripts that are installed via `console_scripts` entry point in `setup.py`, python files in current directory (or anywhere else, if given the path), and Python scripts anywhere else in the path. It will also run executables that are not Python scripts, but only in subprocess mode (there's no benefit in using `pytest-console-scripts` for this, you should just use `subprocess.run`). Here's an example with `console_scripts` entry point. Imagine we have a python package `foo` with the following `setup.py`: ```py setup( name='foo', version='0.0.1', py_modules=['foo'], entry_points={ 'console_scripts': ['foobar=foo:bar'] }, ) ``` We could use pytest-console-scripts to test the `foobar` script: ```py def test_foo_bar(script_runner): result = script_runner.run(['foobar', '--version']) assert result.returncode == 0 assert result.stdout == '3.2.1\n' assert result.stderr == '' script_runner.run('foobar --version', shell=True, check=True) ``` This would use the `script_runner` fixture provided by the plugin to run the script and capture its output. The arguments of `script_runner.run` are the command name of the script and any command line arguments that should be passed to it. Additionally the following keyword arguments can be used: - `cwd` - set the working directory of the script under test. - `env` - a dictionary with environment variables to use instead of the current environment. - `stdin` - a file-like object that will be piped to standard input of the script. - `check` - raises an exception if `returncode != 0`, defaults to False. - `shell` - mimic shell execution, this should work well for simple cases, defaults to False. Type-hinting is also supported. You may type-hint the fixture with the following code: ```py from pytest_console_scripts import ScriptRunner def test_foo_bar(script_runner: ScriptRunner) -> None: ... ``` Configuring script execution mode --------------------------------- In the example above the `foobar` script would run in in-process mode (which is the default). This is fast and good for quick iteration during development. After we're happy with the functionality, it's time to run the script in subprocess mode to simulate real invocation more closely. There are several ways to do this. We can configure it via pytest configuration (for example in `tox.ini`): ```ini [pytest] script_launch_mode = subprocess ``` We can give a command line option to pytest (this will override the configuration file): ```sh $ pytest --script-launch-mode=subprocess test_foobar.py ``` We can also mark individual tests to run in a specific mode: ```py @pytest.mark.script_launch_mode('subprocess') def test_foobar(script_runner): ... ``` Between these three methods the marking of the tests has priority before the command line option that in turn overrides the configuration setting. All three can take three possible values: "inprocess", "subprocess", and "both" (which will cause the test to be run twice: in in-process and in subprocess modes). Interaction with mocking ------------------------ It is possible to mock objects and functions inside of console scripts when they are run using `pytest-console-scripts` but only in inprocess mode. When the script is run in subprocess mode, it is executed by a separate Python interpreter and the test can't mock anything inside of it. Another limitation of mocking is that with simple Python scripts that are not installed via [`console_scripts` entry point][14] mocking of objects inside of the main script will not work. The reason for that is that when we run `myscript.py` with `$ python myscript.py` the script gets imported into `__main__` namespace instead of `myscript` namespace. Our patching of `myscript.myfunction` will have no effect on what the code in `__main__` namespace sees when it's calling `myfunction` defined in the same file. See [this stackoverflow answer](https://stackoverflow.com/a/66693954/1595738) for some ideas of how to get around this. Suppressing the printing of script run results ---------------------------------------------- When tests involving `pytest-console-scripts` fail, it tends to be quite useful to see the output of the scripts that were executed in them. We try to be helpful and print it out just before returning the result from `script_runner.run()`. Normally PyTest [captures][12] all the output during a test run and it's not shown to the user unless some tests fail. This is exactly what we want. However, in some cases it might be useful to disable the output capturing and PyTest provides [ways to do it][13]. When capturing is disabled, all test run results will be printed out and this might make it harder to inspect the other output of the tests. To deal with this, `pytest-console-scripts` has an option to disable the printing of script run results: ```sh $ pytest --hide-run-results test_foobar.py ``` It's also possible to disable it just for one script run: ```py result = script_runner.run('foobar', print_result=False) ``` When printing of script run results is disabled, script output won't be visible even when the test fails. Unfortunately there's no automatic way to print it only if the test fails because by the time a script run completes we don't know whether the test will fail or not. It's possible to do it manually from the test by using: ```py result.print() ``` This, combined with `--hide-run-results` or `print_result=False` can be used to only print interesting run results when capturing is off. Package installation and testing during development --------------------------------------------------- Since `pytest-console-scripts` relies on the scripts being located in the path, it can only run the console scripts from packages that have been installed (if you are interested in working on removing this limitation, take a look at [this ticket](https://github.com/kvas-it/pytest-console-scripts/issues/34) and in particular [this comment](https://github.com/kvas-it/pytest-console-scripts/issues/34#issuecomment-649497564)). If you want to run the tests quickly during development, the additional installation step would add a significant overhead and slow you down. There's a way around this: install your package in [development mode][10] using `pip install -e .`. If you use [tox][9], you can take one of its existing virtualenvs (they live in `.tox/`). Otherwise create a [virtualenv][11] just for development, activate it and run `python setup.py develop` to install your package in development mode. You will need to re-install every time you add a new console script, but otherwise all the changes to your code will be immediately picked up by the tests. Contributing ------------ Contributions are very welcome. Tests can be run with `tox`, please ensure the coverage at least stays the same before you submit a pull request. License ------- Distributed under the terms of the [MIT][8] license, "pytest-console-scripts" is free and open source software. Issues ------ If you encounter any problems, please [file an issue][7] along with a detailed description. ---- Pytest-console-scripts was initially generated with [Cookiecutter][4] along with [@hackebrot][5]'s [Cookiecutter-pytest-plugin][6] template. [1]: https://github.com/pytest-dev/pytest [2]: https://pypi.python.org/pypi/pip/ [3]: https://pypi.python.org/pypi [4]: https://github.com/audreyr/cookiecutter [5]: https://github.com/hackebrot [6]: https://github.com/pytest-dev/cookiecutter-pytest-plugin [7]: https://github.com/kvas-it/pytest-console-scripts/issues [8]: http://opensource.org/licenses/MIT [9]: https://tox.readthedocs.org/en/latest/ [10]: https://setuptools.pypa.io/en/latest/userguide/development_mode.html [11]: https://docs.python.org/3/library/venv.html [12]: https://docs.pytest.org/en/stable/capture.html [13]: https://docs.pytest.org/en/stable/capture.html#setting-capturing-methods-or-disabling-capturing [14]: https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point pytest-console-scripts-1.4.1/pytest_console_scripts/000077500000000000000000000000001443560321700230205ustar00rootroot00000000000000pytest-console-scripts-1.4.1/pytest_console_scripts/__init__.py000066400000000000000000000347521443560321700251440ustar00rootroot00000000000000from __future__ import annotations import contextlib import io import logging import os import shlex import shutil import subprocess import sys import traceback import warnings from pathlib import Path from typing import Any, Callable, Iterator, Sequence, Union from unittest import mock import pytest if sys.version_info < (3, 10): import importlib_metadata else: import importlib.metadata as importlib_metadata _StrOrPath = Union[str, os.PathLike] """A command line argument type as a str or path.""" _Command = Union[_StrOrPath, Sequence[_StrOrPath]] """A command-like type compatible with subprocess.run.""" StreamMock = io.StringIO def pytest_addoption(parser: pytest.Parser) -> None: group = parser.getgroup('console-scripts') group.addoption( '--script-launch-mode', metavar='inprocess|subprocess|both', action='store', dest='script_launch_mode', default=None, help='how to run python scripts under test (default: inprocess)' ) group.addoption( '--hide-run-results', action='store_true', dest='hide_run_results', default=False, help="don't print out script run results on failures or when " 'output capturing is disabled' ) parser.addini( 'script_launch_mode', 'how to run python scripts under test (inprocess|subprocess|both)' ) def pytest_configure(config: pytest.Config) -> None: config.addinivalue_line( 'markers', 'script_launch_mode: how to run python scripts under test ' '(inprocess|subprocess|both)', ) def _get_mark_mode(metafunc: pytest.Metafunc) -> str | None: """Return launch mode as indicated by test function marker or None.""" marker = metafunc.definition.get_closest_marker('script_launch_mode') if marker: return str(marker.args[0]) return None def _is_nonexecutable_python_file(command: _StrOrPath) -> bool: """Check if `command` is a Python file with no executable mode set.""" command = Path(command) mode = command.stat().st_mode if mode & os.X_OK: return False return command.suffix == '.py' def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: """Parametrize script_launch_mode fixture. Checks the configuration sources in this order: - `script_launch_mode` mark on the test, - `--script-launch-mode` option, - `script_launch_mode` configuration option in [pytest] section of the pyest config file. This process yields a value that can be one of: - "inprocess" -- The script will be run via loading its main function into the test runner process and mocking the environment. - "subprocess" -- The script will be run via `subprocess` module. - "both" -- The test will be run twice: once in inprocess mode and once in subprocess mode. - None -- Same as "inprocess". """ if 'script_launch_mode' not in metafunc.fixturenames: return mark_mode = _get_mark_mode(metafunc) option_mode = metafunc.config.option.script_launch_mode config_mode = metafunc.config.getini('script_launch_mode') mode = mark_mode or option_mode or config_mode or 'inprocess' if mode in {'inprocess', 'subprocess'}: metafunc.parametrize('script_launch_mode', [mode], indirect=True) elif mode == 'both': metafunc.parametrize('script_launch_mode', ['inprocess', 'subprocess'], indirect=True) else: raise ValueError(f'Invalid script launch mode: {mode}') class RunResult: """Result of running a script.""" def __init__( self, returncode: int, stdout: str, stderr: str, print_result: bool ) -> None: self.success = returncode == 0 self.returncode = returncode self.stdout = stdout self.stderr = stderr if print_result: self.print() def print(self) -> None: print('# Script return code:', self.returncode) print('# Script stdout:', self.stdout, sep='\n') print('# Script stderr:', self.stderr, sep='\n') def _handle_command_args( command: _Command, *args: _StrOrPath, shell: bool = False, stacklevel: int = 1, ) -> Sequence[_StrOrPath]: """Return command arguments in a consistent list format. If shell=True then this function tries to mimic local shell execution. """ if shell: if args or not isinstance(command, (str, os.PathLike)): command = subprocess.list2cmdline( str(arg) for arg in _handle_command_args( command, *args, shell=False, stacklevel=stacklevel + 1 ) ) command = shlex.split(str(command), posix=os.name == 'posix') args = () if args: warnings.warn( 'script_runner commands should be passed as a single sequence,' ' not as multiple arguments.' '\nReplace `script_runner.run(a, b, c)` calls with' ' `script_runner.run([a, b, c])`', DeprecationWarning, stacklevel=stacklevel + 1, ) if not isinstance(command, (str, os.PathLike)): return [*command, *args] return [command, *args] if isinstance(command, (str, os.PathLike)): return [command] return command @contextlib.contextmanager def _patch_environ(new_environ: dict[str, str] | None) -> Iterator[None]: """Replace the environment for the duration of a context.""" if new_environ is None: yield return old_environ = os.environ.copy() os.environ.clear() os.environ.update(new_environ) yield os.environ.clear() os.environ.update(old_environ) @contextlib.contextmanager def _chdir_context(new_dir: _StrOrPath | None) -> Iterator[None]: """Replace the current directory for the duration of a context.""" if new_dir is None: yield return old_cwd = os.getcwd() os.chdir(new_dir) yield os.chdir(old_cwd) @contextlib.contextmanager def _push_and_reset_logger() -> Iterator[None]: """Do a very basic reset of the root logger and restore its config on exit. This allows scripts to call logging.basicConfig(...) and have it work as expected. It might not work for more sophisticated logging setups but it's simple and covers the basic usage whereas implementing a comprehensive fix is impossible in a compatible way. """ logger = logging.getLogger() old_handlers = logger.handlers old_disabled = logger.disabled old_level = logger.level logger.handlers = [] logger.disabled = False logger.setLevel(logging.NOTSET) yield # Restore logger to previous configuration logger.handlers = old_handlers logger.disabled = old_disabled logger.setLevel(old_level) class ScriptRunner: """Fixture for running python scripts under test.""" def __init__( self, launch_mode: str, rootdir: _StrOrPath, print_result: bool = True ) -> None: assert launch_mode in {'inprocess', 'subprocess'} self.launch_mode = launch_mode self.print_result = print_result self.rootdir = rootdir def __repr__(self) -> str: return f'' def run( self, command: _Command, *arguments: _StrOrPath, print_result: bool | None = None, shell: bool = False, cwd: _StrOrPath | None = None, env: dict[str, str] | None = None, stdin: io.IOBase | None = None, check: bool = False, **options: Any, ) -> RunResult: if print_result is None: print_result = self.print_result if print_result: print('# Running console script:', command, *arguments) if self.launch_mode == 'inprocess': run_function = self.run_inprocess else: run_function = self.run_subprocess return run_function( command, *arguments, print_result=print_result, shell=shell, cwd=cwd, env=env, stdin=stdin, check=check, _stacklevel=2, **options, ) @staticmethod def _locate_script( command: _StrOrPath, *, cwd: _StrOrPath | None, env: dict[str, str] | None, ) -> Path: """Locate script in PATH or in current directory.""" script_path = shutil.which( command, path=env.get('PATH', None) if env is not None else None, ) if script_path is not None: return Path(script_path) cwd = cwd if cwd is not None else os.getcwd() return Path(cwd, command).resolve(strict=True) @classmethod def _load_script( cls, command: _StrOrPath, *, cwd: _StrOrPath | None, env: dict[str, str] | None, ) -> Callable[[], int | None]: """Load target script via entry points or compile/exec.""" if isinstance(command, str): entry_points = tuple( importlib_metadata.entry_points( group='console_scripts', name=command ) ) if entry_points: def console_script() -> int | None: s: Callable[[], int | None] = entry_points[0].load() return s() return console_script script_path = cls._locate_script(command, cwd=cwd, env=env) def exec_script() -> int: compiled = compile( script_path.read_bytes(), str(script_path), 'exec', flags=0 ) exec(compiled, {'__name__': '__main__'}) return 0 return exec_script @classmethod def run_inprocess( cls, command: _Command, *arguments: _StrOrPath, shell: bool = False, cwd: _StrOrPath | None = None, env: dict[str, str] | None = None, print_result: bool = True, stdin: io.IOBase | None = None, check: bool = False, _stacklevel: int = 1, **options: Any, ) -> RunResult: for key in options: warnings.warn( f'Keyword argument {key!r} was ignored.' '\nConsider using subprocess mode or raising an issue.', stacklevel=_stacklevel + 1, ) cmd_args = _handle_command_args( command, *arguments, shell=shell, stacklevel=_stacklevel + 1 ) script = cls._load_script(cmd_args[0], cwd=cwd, env=env) cmd_args = [str(cmd) for cmd in cmd_args] stdin_stream = stdin if stdin is not None else StreamMock() stdout_stream = StreamMock() stderr_stream = StreamMock() with contextlib.ExitStack() as stack: stack.enter_context(mock.patch('sys.stdin', new=stdin_stream)) stack.enter_context(contextlib.redirect_stdout(stdout_stream)) stack.enter_context(contextlib.redirect_stderr(stderr_stream)) stack.enter_context(mock.patch('sys.argv', new=cmd_args)) stack.enter_context(_push_and_reset_logger()) stack.enter_context(_patch_environ(env)) stack.enter_context(_chdir_context(cwd)) try: returncode = script() except SystemExit as exc: returncode = 1 if isinstance(exc.code, str): stderr_stream.write(f'{exc}\n') returncode = 1 else: returncode = exc.code except Exception: returncode = 1 try: et, ev, tb = sys.exc_info() assert tb # Hide current frame from the stack trace. traceback.print_exception(et, ev, tb.tb_next) finally: del tb result = RunResult( returncode or 0, # None also means success stdout_stream.getvalue(), stderr_stream.getvalue(), print_result, ) if check and returncode: raise subprocess.CalledProcessError( returncode, cmd_args, result.stdout, result.stderr, ) return result @classmethod def run_subprocess( cls, command: _Command, *arguments: _StrOrPath, print_result: bool = True, shell: bool = False, cwd: _StrOrPath | None = None, env: dict[str, str] | None = None, stdin: io.IOBase | None = None, check: bool = False, universal_newlines: bool = True, _stacklevel: int = 1, **options: Any, ) -> RunResult: stdin_input: str | bytes | None = None if stdin is not None: stdin_input = stdin.read() script_path = cls._locate_script( _handle_command_args( command, *arguments, shell=shell, stacklevel=_stacklevel + 1 )[0], cwd=cwd, env=env, ) if arguments: command = _handle_command_args( command, *arguments, shell=shell, stacklevel=_stacklevel + 1 ) if _is_nonexecutable_python_file(script_path): command = _handle_command_args( command, shell=shell, stacklevel=_stacklevel + 1 ) command = [sys.executable or 'python', *command] try: cp = subprocess.run( command, input=stdin_input, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=cwd, env=env, check=check, universal_newlines=universal_newlines, **options, ) except subprocess.CalledProcessError as exc: RunResult(exc.returncode, exc.stdout, exc.stderr, print_result) raise return RunResult(cp.returncode, cp.stdout, cp.stderr, print_result) @pytest.fixture def script_launch_mode(request: pytest.FixtureRequest) -> str: return str(request.param) @pytest.fixture def script_cwd(tmp_path: Path) -> Path: work_dir = tmp_path / 'script-cwd' work_dir.mkdir() return work_dir @pytest.fixture def script_runner( request: pytest.FixtureRequest, script_cwd: Path, script_launch_mode: str ) -> ScriptRunner: print_result = not request.config.getoption('--hide-run-results') return ScriptRunner(script_launch_mode, script_cwd, print_result) pytest-console-scripts-1.4.1/pytest_console_scripts/py.typed000066400000000000000000000000001443560321700245050ustar00rootroot00000000000000pytest-console-scripts-1.4.1/setup.cfg000066400000000000000000000000261443560321700200160ustar00rootroot00000000000000[aliases] test=pytest pytest-console-scripts-1.4.1/setup.py000066400000000000000000000033331443560321700177130ustar00rootroot00000000000000from pathlib import Path from setuptools import setup THIS_DIR = Path(__file__).parent README_TEXT = (THIS_DIR / 'README.md').read_text(encoding='utf-8') setup( name='pytest-console-scripts', use_scm_version=True, author='Vasily Kuznetsov', author_email='kvas.it@gmail.com', maintainer='Vasily Kuznetsov, Kyle Benesch', maintainer_email='kvas.it@gmail.com, 4b796c65+github@gmail.com', license='MIT', url='https://github.com/kvas-it/pytest-console-scripts', description='Pytest plugin for testing console scripts', long_description=README_TEXT, long_description_content_type='text/markdown', packages=['pytest_console_scripts'], package_data={'pytest_console_scripts': ['py.typed']}, install_requires=[ 'pytest >=4.0.0', "importlib_metadata >=3.6; python_version < '3.10'", ], python_requires='>=3.8', setup_requires=['setuptools-scm'], classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Pytest', 'Intended Audience :: Developers', 'Topic :: Software Development :: Testing', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Operating System :: OS Independent', 'License :: OSI Approved :: MIT License', ], entry_points={ 'pytest11': [ 'console-scripts = pytest_console_scripts', ], }, ) pytest-console-scripts-1.4.1/tests/000077500000000000000000000000001443560321700173415ustar00rootroot00000000000000pytest-console-scripts-1.4.1/tests/conftest.py000066400000000000000000000000341443560321700215350ustar00rootroot00000000000000pytest_plugins = 'pytester' pytest-console-scripts-1.4.1/tests/test_console_scripts.py000066400000000000000000000070621443560321700241700ustar00rootroot00000000000000from __future__ import annotations import pytest @pytest.fixture(params=[None, 'inprocess', 'subprocess', 'both']) def launch_mode_conf(request: pytest.FixtureRequest) -> str | None: """Configured launch mode (None|'inprocess'|'subprocess'|'both').""" assert request.param is None or isinstance(request.param, str) return request.param @pytest.fixture def launch_modes(launch_mode_conf: str | None) -> set[str]: """Set of launch modes in which the tests will actually be run. The value of this fixture depends on the value of `launch_mode_conf`: - 'inprocess' -> {'inprocess'} - 'subprocess' -> {'subprocess'} - 'both' -> {'inprocess', 'subprocess'} - None -> {'inprocess'} """ if launch_mode_conf == 'both': return {'inprocess', 'subprocess'} if launch_mode_conf is not None: return {launch_mode_conf} return {'inprocess'} class RunTest: def __init__(self, testdir: pytest.Testdir) -> None: self.testdir = testdir def __call__( self, script: str, passed: int = 1, skipped: int = 0, failed: int = 0, launch_mode_conf: str | None = None ) -> pytest.RunResult: self.testdir.makepyfile(script) args = [] if launch_mode_conf is not None: args.append('--script-launch-mode=' + launch_mode_conf) result = self.testdir.runpytest(*args) print('\n'.join(['pytest stdout:'] + result.outlines + ['pytest stderr:'] + result.errlines)) result.assert_outcomes(passed=passed, skipped=skipped, failed=failed) return result @pytest.fixture def run_test(testdir: pytest.Testdir) -> RunTest: return RunTest(testdir) CHECK_LAUNCH_MODE = """ def test_both(script_runner, accumulator=set()): assert script_runner.launch_mode in {} assert script_runner.launch_mode not in accumulator accumulator.add(script_runner.launch_mode) """ def test_command_line_option( run_test: RunTest, launch_mode_conf: str | None, launch_modes: set[str] ) -> None: run_test( CHECK_LAUNCH_MODE.format(launch_modes), passed=len(launch_modes), launch_mode_conf=launch_mode_conf ) def test_config_option( run_test: RunTest, testdir: pytest.Testdir, launch_mode_conf: str | None, launch_modes: set[str], ) -> None: if launch_mode_conf is not None: testdir.makeini(f""" [pytest] script_launch_mode = {launch_mode_conf} """) run_test( CHECK_LAUNCH_MODE.format(launch_modes), passed=len(launch_modes) ) def test_override_launch_mode_with_mark( run_test: RunTest, launch_mode_conf: str | None ) -> None: run_test( """ import pytest @pytest.mark.script_launch_mode('inprocess') def test_inprocess(script_runner): assert script_runner.launch_mode == 'inprocess' @pytest.mark.script_launch_mode('subprocess') def test_subprocess(script_runner): assert script_runner.launch_mode == 'subprocess' @pytest.mark.script_launch_mode('both') def test_both(script_runner, accumulator=set()): assert script_runner.launch_mode not in accumulator accumulator.add(script_runner.launch_mode) """, passed=4, launch_mode_conf=launch_mode_conf ) def test_help_message(testdir: pytest.Testdir) -> None: result = testdir.runpytest( '--help', ) # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines([ 'console-scripts:', '*--script-launch-mode=*', '*--hide-run-results*', ]) pytest-console-scripts-1.4.1/tests/test_run_scripts.py000066400000000000000000000352441443560321700233350ustar00rootroot00000000000000"""Test running of scripts with various modes and options.""" from __future__ import annotations import contextlib import importlib import io import os import sys from pathlib import Path from subprocess import CalledProcessError from types import ModuleType from typing import Any, ContextManager from unittest import mock import pytest from pytest_console_scripts import ScriptRunner @pytest.fixture(params=['inprocess', 'subprocess']) def launch_mode(request: pytest.FixtureRequest) -> str: """Launch mode: inprocess|subprocess.""" return str(request.param) @pytest.fixture() def console_script(tmp_path: Path) -> Path: """Python script to use in tests.""" script = tmp_path / 'script.py' script.write_text('#!/usr/bin/env python\nprint("foo")') return script @pytest.mark.script_launch_mode('both') def test_not_installed( console_script: Path, script_runner: ScriptRunner ) -> None: result = script_runner.run(str(console_script)) assert result.success assert result.stdout == 'foo\n' assert result.stderr == '' @pytest.mark.xfail( sys.platform == "win32", reason="Windows does not treat Python scripts as executables." ) @pytest.mark.script_launch_mode('both') def test_elsewhere_in_the_path( console_script: Path, script_runner: ScriptRunner ) -> None: console_script.chmod(0o777) env = os.environ.copy() env["PATH"] = f"{console_script.parent}{os.pathsep}{env['PATH']}" result = script_runner.run(console_script.name, env=env) assert result.success assert result.stdout == 'foo\n' assert result.stderr == '' @pytest.mark.script_launch_mode('both') def test_run_pytest( tmp_path: Path, console_script: Path, script_runner: ScriptRunner, launch_mode: str ) -> None: console_script.write_text('import os;print(os.getpid())') test = tmp_path / f'test_{launch_mode}.py' compare = '==' if launch_mode == 'inprocess' else '!=' test.write_text( f""" import os def test_script(script_runner): result = script_runner.run(R'''{console_script}''') assert result.success assert result.stdout {compare} str(os.getpid()) + '\\n' assert result.stderr == '' """ ) # Here we're testing two things: # # - pytest is a Python script that's installed in the test environment, so # we'll use `script_runner` fixture to run it -- this tests execution of # installed scripts from the path. # - The pytest that we run will run a test that uses `script_runner` # fixture to run another script. We're going to pass --script-launch-mode # option to pytest and will check that the execution of the inner script # is performed in accordance with its value. # # We're also testing all 4 combinations of inprocess/subprocess modes for # inner and outer script runners. result = script_runner.run( ['pytest', test, f'--script-launch-mode={launch_mode}'] ) assert result.success @pytest.mark.script_launch_mode('inprocess') def test_return_None( console_script: Path, script_runner: ScriptRunner ) -> None: """Check that entry point function returning None is counted as success.""" # Many console_scripts entry point functions return 0 on success but not # all of them do. Returning `None` is also allowed and would be translated # to return code 0 when run normally via wrapper. This test checks that we # handle this case properly in inprocess mode. console_script.write_text( """ import sys print("Foo") sys.exit(None) """ ) result = script_runner.run(str(console_script)) assert result.success assert 'Foo' in result.stdout @pytest.mark.script_launch_mode('inprocess') def test_return_code_uncommon( console_script: Path, script_runner: ScriptRunner ) -> None: """Check uncommon return codes.""" console_script.write_text( """ import sys sys.exit(2) """ ) assert script_runner.run(str(console_script)).returncode == 2 @pytest.mark.script_launch_mode('both') def test_abnormal_exit( console_script: Path, script_runner: ScriptRunner ) -> None: console_script.write_text('import sys;sys.exit("boom")') result = script_runner.run(str(console_script)) assert not result.success assert result.stdout == '' assert result.stderr == 'boom\n' @pytest.mark.script_launch_mode('both') def test_exception(console_script: Path, script_runner: ScriptRunner) -> None: console_script.write_text('raise TypeError("boom")') result = script_runner.run(str(console_script)) assert not result.success assert result.stdout == '' assert 'TypeError: boom' in result.stderr def test_cwd( console_script: Path, script_runner: ScriptRunner, tmp_path: Path, ) -> None: """Script starts in dir given by cwd arg and cwd changes are contained.""" dir1 = tmp_path / 'dir1' dir1.mkdir() dir2 = tmp_path / 'dir2' dir2.mkdir() console_script.write_text( f""" import os print(os.getcwd()) os.chdir(R'''{dir2}''') print(os.getcwd()) """ ) mydir = os.getcwd() result = script_runner.run(str(console_script), cwd=str(dir1)) assert result.success assert result.stdout == f'{dir1}\n{dir2}\n' assert os.getcwd() == mydir @pytest.mark.script_launch_mode('both') def test_env(console_script: Path, script_runner: ScriptRunner) -> None: """Script receives environment and env changes don't escape to test.""" console_script.write_text( """ import os print(os.environ['FOO']) os.environ['FOO'] = 'baz' """ ) env = os.environ.copy() env['FOO'] = 'bar' result = script_runner.run(str(console_script), env=env) assert result.success assert result.stdout == 'bar\n' assert 'FOO' not in os.environ @pytest.mark.script_launch_mode('both') def test_stdin(console_script: Path, script_runner: ScriptRunner) -> None: console_script.write_text( """ import sys for line in sys.stdin: sys.stdout.write('simon says ' + line) sys.stderr.write('error says ' + line) """ ) stdin = io.StringIO('foo\nbar') result = script_runner.run(str(console_script), stdin=stdin) assert result.success assert result.stdout == 'simon says foo\nsimon says bar' assert result.stderr == 'error says foo\nerror says bar' def test_logging(console_script: Path, script_runner: ScriptRunner) -> None: """Test that the script can perform logging initialization.""" console_script.write_text( """ import logging, sys logging.basicConfig(stream=sys.stderr, level=logging.INFO) logging.debug('hidden') logging.info('shown') """ ) result = script_runner.run(str(console_script)) assert result.success assert result.stderr == 'INFO:root:shown\n' @pytest.mark.parametrize('fail', [True, False]) @pytest.mark.parametrize('check', [True, False]) def test_print_stdio_on_error( console_script: Path, script_runner: ScriptRunner, tmp_path: Path, fail: bool, check: bool, launch_mode: str, ) -> None: """Output of the script is printed when the test fails.""" console_script.write_text('print("12345")\nraise Exception("54321")') test = tmp_path / f'test_{fail}_{check}_{launch_mode}.py' command = [str(console_script), 'arg'] test.write_text( f""" import subprocess def test_fail(script_runner): try: ret = script_runner.run({command}, check={check}) except subprocess.CalledProcessError as exc: assert (exc.returncode == 0) is {fail} else: assert ret.success is {fail} """ ) result = script_runner.run( ['pytest', test, f'--script-launch-mode={launch_mode}'] ) assert result.success != fail if fail: assert (f'# Running console script: {command}\n' in result.stdout) assert '# Script return code: 1\n' in result.stdout assert '# Script stdout:\n12345\n' in result.stdout assert '# Script stderr:\nTraceback' in result.stdout assert 'Exception: 54321' in result.stdout else: assert '# Running console script' not in result.stdout assert '12345' not in result.stdout assert '54321' not in result.stdout @pytest.mark.script_launch_mode('inprocess') def test_mocking( console_script: Path, script_runner: ScriptRunner, monkeypatch: pytest.MonkeyPatch ) -> None: """Test mocking in of console scripts (in-process mode only). Note: we can't mock objects in the script itself because it will not be imported via normal import system but we can mock anything in the modules that the script imports. """ console_script.write_text( """ import os print(os.path.basename('foo')) """ ) monkeypatch.setattr(os.path, 'basename', lambda foo: 'bar') result = script_runner.run(str(console_script)) assert result.success assert result.stdout == 'bar\n' def test_hide_run_result_arg( tmp_path: Path, console_script: Path, script_runner: ScriptRunner ) -> None: """Disable printing of the RunResult to stdout with print_result=False.""" console_script.write_text('print("the answer is 42")') test = tmp_path / 'test_hrra.py' test.write_text( f""" import pytest @pytest.mark.script_launch_mode('both') def test_script(script_runner): script_runner.run(R'''{console_script}''', print_result=False) """ ) result = script_runner.run(['pytest', '-s', test]) assert result.success assert 'the answer is 42' not in result.stdout assert 'Running console script' not in result.stdout def test_hide_run_result_opt( tmp_path: Path, console_script: Path, script_runner: ScriptRunner ) -> None: """Disable printing of the RunResult to stdout with print_result=False.""" console_script.write_text('print("the answer is 42")') test = tmp_path / 'test_hrro.py' test.write_text( f""" import pytest @pytest.mark.script_launch_mode('both') def test_script(script_runner): script_runner.run(R'''{console_script}''') """ ) result = script_runner.run(['pytest', '-s', '--hide-run-results', test]) assert result.success assert 'the answer is 42' not in result.stdout assert 'Running console script' not in result.stdout class MockEntryPoint: module: ModuleType def __init__(self, exec_path: str | Path): self.exec_path = exec_path def load(self) -> Any: base, module = os.path.split(self.exec_path) module_name, _ = os.path.splitext(module) sys.path.append(base) self.module = importlib.import_module(module_name) sys.path.pop(-1) return self.module.run @pytest.mark.script_launch_mode('inprocess') def test_global_logging( tmp_path: Path, console_script: Path, script_runner: ScriptRunner ) -> None: """Load global values when executing from importlib.metadata""" test = tmp_path / 'test_entry_point.py' test.write_text( """ import logging logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) def run() -> None: LOGGER.debug('DEBUG') LOGGER.info('INFO') LOGGER.warning('WARNING') """ ) if sys.version_info < (3, 10): patched_func = 'importlib_metadata.entry_points' else: patched_func = 'importlib.metadata.entry_points' with mock.patch( patched_func, mock.MagicMock(return_value=[MockEntryPoint(str(test))]), ): result = script_runner.run(str(console_script)) assert result.success assert 'INFO:test_entry_point:INFO\n' in result.stderr assert 'DEBUG\n' not in result.stderr @pytest.mark.script_launch_mode('both') def test_shell( console_script: Path, script_runner: ScriptRunner ) -> None: console_script.chmod(0o777) result = script_runner.run( f"{console_script} --test", shell=True, check=True ) assert result.stdout == 'foo\n' assert result.stderr == '' result = script_runner.run( [str(console_script), "--test"], shell=True, check=True ) assert result.stdout == 'foo\n' assert result.stderr == '' @pytest.mark.script_launch_mode('both') def test_deprecated_args( console_script: Path, script_runner: ScriptRunner ) -> None: console_script.write_text( """ import sys print(sys.argv[1:]) """ ) with pytest.warns(match=r".*multiple arguments."): result = script_runner.run(console_script, 'A', 'B', check=True) assert result.stdout == "['A', 'B']\n" with pytest.warns(match=r".*multiple arguments."): result = script_runner.run([console_script, 'C'], 'D', check=True) assert result.stdout == "['C', 'D']\n" @pytest.mark.script_launch_mode('both') def test_check( console_script: Path, script_runner: ScriptRunner ) -> None: console_script.write_text("""import sys; sys.exit(1)""") with pytest.raises(CalledProcessError, match='.*non-zero exit status 1'): script_runner.run(str(console_script), check=True) @pytest.mark.script_launch_mode('both') def test_ignore_universal_newlines( console_script: Path, script_runner: ScriptRunner ) -> None: expectation: dict[str, ContextManager[Any]] = { 'inprocess': pytest.warns(match=r"Keyword argument .* was ignored"), 'subprocess': contextlib.nullcontext(), } with expectation[script_runner.launch_mode]: result = script_runner.run( str(console_script), check=True, universal_newlines=True ) assert result.stdout == 'foo\n' assert result.stderr == '' @pytest.mark.script_launch_mode('subprocess') def test_disable_universal_newlines( console_script: Path, script_runner: ScriptRunner ) -> None: result = script_runner.run( str(console_script), check=True, universal_newlines=False ) assert isinstance(result.stdout, bytes) assert isinstance(result.stderr, bytes) assert result.stdout.strip() == b'foo' assert result.stderr == b'' @pytest.mark.script_launch_mode('both') def test_run_path( console_script: Path, script_runner: ScriptRunner ) -> None: result = script_runner.run(console_script, check=True) assert result.stdout == 'foo\n' assert result.stderr == '' console_script.chmod(0o777) result = script_runner.run(console_script, check=True) assert result.stdout == 'foo\n' assert result.stderr == '' @pytest.mark.script_launch_mode('both') def test_run_script_codecs( console_script: Path, script_runner: ScriptRunner ) -> None: """Check that non-UTF-8 scripts can load""" console_script.write_text( """\ # -*- coding: cp437 -*- import sys # Non UTF-8 characters -> ≡≡≡ print('foo') """, encoding="cp437", ) result = script_runner.run(console_script, check=True) assert result.stdout == 'foo\n' assert result.stderr == '' pytest-console-scripts-1.4.1/tox.ini000066400000000000000000000021041443560321700175070ustar00rootroot00000000000000# For more information about tox, see https://tox.readthedocs.org/en/latest/ [tox] envlist = clean,lint,py38,py39,py310,py311,pypy3,report [testenv] deps = pytest pytest-cov usedevelop = true commands = pytest tests --cov=pytest_console_scripts --cov-append --cov-report=term-missing {posargs} depends = {py38,py39,py310,py311,pypy3}: clean report: py38,py39,py310,py311,pypy3 [testenv:clean] deps = coverage skip_install = true commands = coverage erase [testenv:report] deps = coverage skip_install = true commands = coverage report coverage html [testenv:report_ci] deps = coverage skip_install = true commands = coverage xml [testenv:lint] basepython = python usedevelop = true deps = check-manifest readme_renderer[md] flake8 flake8-docstrings flake8-commas pep8-naming mypy types-setuptools commands = check-manifest --ignore *.ini,tests*,.*.yml,demo* flake8 pytest_console_scripts setup.py tests mypy pytest_console_scripts setup.py tests [flake8] exclude = .tox,*.egg,build select = E,W,F ignore = W503,W504