pax_global_header00006660000000000000000000000064142165752310014520gustar00rootroot0000000000000052 comment=f15aaa5bb8748693498c25aeca19e3362083b2c7 python-procrunner-2.3.3/000077500000000000000000000000001421657523100152415ustar00rootroot00000000000000python-procrunner-2.3.3/.azure-pipelines.yml000066400000000000000000000060261421657523100211620ustar00rootroot00000000000000trigger: branches: include: - '*' tags: include: - '*' stages: - stage: static displayName: Static Analysis jobs: - job: checks displayName: static code analysis pool: vmImage: ubuntu-latest steps: # Run syntax validation using oldest and latest Python - task: UsePythonVersion@0 displayName: Set up python inputs: versionSpec: 3.6 - bash: python .azure-pipelines/syntax-validation.py displayName: Syntax validation (3.6) - task: UsePythonVersion@0 displayName: Set up python inputs: versionSpec: 3.10 - bash: python .azure-pipelines/syntax-validation.py displayName: Syntax validation (3.10) # Run flake8 validation - bash: | pip install --disable-pip-version-check flake8 && \ python .azure-pipelines/flake8-validation.py displayName: Flake8 validation - stage: tests displayName: Run unit tests jobs: - job: linux pool: vmImage: ubuntu-latest strategy: matrix: python36: PYTHON_VERSION: 3.6 python37: PYTHON_VERSION: 3.7 python38: PYTHON_VERSION: 3.8 python39: PYTHON_VERSION: 3.9 python310: PYTHON_VERSION: 3.10 steps: - template: .azure-pipelines/ci.yml - job: macOS pool: vmImage: macOS-latest strategy: matrix: python37: PYTHON_VERSION: 3.7 python38: PYTHON_VERSION: 3.8 python39: PYTHON_VERSION: 3.9 python310: PYTHON_VERSION: 3.10 steps: - template: .azure-pipelines/ci.yml - job: windows pool: vmImage: windows-latest strategy: matrix: python37: PYTHON_VERSION: 3.7 python38: PYTHON_VERSION: 3.8 python39: PYTHON_VERSION: 3.9 python310: PYTHON_VERSION: 3.10 steps: - template: .azure-pipelines/ci.yml - stage: deploy displayName: Publish release dependsOn: - tests - static condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/')) jobs: - job: pypi displayName: Publish pypi release pool: vmImage: ubuntu-latest steps: - task: UsePythonVersion@0 displayName: Set up python inputs: versionSpec: 3.9 - bash: | python -m pip install -r requirements_dev.txt displayName: Install dependencies - bash: | python setup.py sdist bdist_wheel ls -la dist displayName: Build python package - task: PublishBuildArtifacts@1 inputs: pathToPublish: dist/ artifactName: python-release - task: TwineAuthenticate@1 displayName: Set up credentials inputs: pythonUploadServiceConnection: pypi-procrunner - bash: | python -m twine upload -r pypi-procrunner --config-file $(PYPIRC_PATH) dist/*.tar.gz dist/*.whl displayName: Publish package python-procrunner-2.3.3/.azure-pipelines/000077500000000000000000000000001421657523100204335ustar00rootroot00000000000000python-procrunner-2.3.3/.azure-pipelines/ci.yml000066400000000000000000000007221421657523100215520ustar00rootroot00000000000000steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(PYTHON_VERSION)' displayName: 'Use Python $(PYTHON_VERSION)' - script: | python -m pip install --upgrade pip pip install tox displayName: "Set up tox" - script: | tox -e azure displayName: "Run tests" - task: PublishTestResults@2 condition: succeededOrFailed() inputs: testResultsFiles: '**/test-*.xml' testRunTitle: 'Publish test results for Python $(PYTHON_VERSION)' python-procrunner-2.3.3/.azure-pipelines/flake8-validation.py000066400000000000000000000022571421657523100243150ustar00rootroot00000000000000import os import subprocess # Flake8 validation failures = 0 try: flake8 = subprocess.run( [ "flake8", "--exit-zero", ], capture_output=True, check=True, encoding="latin-1", timeout=300, ) except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e: print( "##vso[task.logissue type=error;]flake8 validation failed with", str(e.__class__.__name__), ) print(e.stdout) print(e.stderr) print("##vso[task.complete result=Failed;]flake8 validation failed") exit() for line in flake8.stdout.split("\n"): if ":" not in line: continue filename, lineno, column, error = line.split(":", maxsplit=3) errcode, error = error.strip().split(" ", maxsplit=1) filename = os.path.normpath(filename) failures += 1 print( f"##vso[task.logissue type=error;sourcepath={filename};" f"linenumber={lineno};columnnumber={column};code={errcode};]" + error ) if failures: print(f"##vso[task.logissue type=warning]Found {failures} flake8 violation(s)") print(f"##vso[task.complete result=Failed;]Found {failures} flake8 violation(s)") python-procrunner-2.3.3/.azure-pipelines/syntax-validation.py000066400000000000000000000016621421657523100244700ustar00rootroot00000000000000import ast import os import sys print("Python", sys.version, "\n") failures = 0 for base, _, files in os.walk("."): for f in files: if not f.endswith(".py"): continue filename = os.path.normpath(os.path.join(base, f)) try: with open(filename) as fh: ast.parse(fh.read()) except SyntaxError as se: failures += 1 print( f"##vso[task.logissue type=error;sourcepath={filename};" f"linenumber={se.lineno};columnnumber={se.offset};]" f"SyntaxError: {se.msg}" ) print(" " + se.text + " " * se.offset + "^") print(f"SyntaxError: {se.msg} in {filename} line {se.lineno}") print() if failures: print(f"##vso[task.logissue type=warning]Found {failures} syntax error(s)") print(f"##vso[task.complete result=Failed;]Found {failures} syntax error(s)") python-procrunner-2.3.3/.bumpversion.cfg000066400000000000000000000004421421657523100203510ustar00rootroot00000000000000[bumpversion] current_version = 2.3.3 commit = True tag = True [bumpversion:file:setup.cfg] search = version = {current_version} replace = version = {new_version} [bumpversion:file:procrunner/__init__.py] search = __version__ = "{current_version}" replace = __version__ = "{new_version}" python-procrunner-2.3.3/.editorconfig000066400000000000000000000004441421657523100177200ustar00rootroot00000000000000# http://editorconfig.org root = true [*] indent_style = space indent_size = 4 trim_trailing_whitespace = true insert_final_newline = true charset = utf-8 end_of_line = lf [*.bat] indent_style = tab end_of_line = crlf [LICENSE] insert_final_newline = false [Makefile] indent_style = tab python-procrunner-2.3.3/.github/000077500000000000000000000000001421657523100166015ustar00rootroot00000000000000python-procrunner-2.3.3/.github/ISSUE_TEMPLATE.md000066400000000000000000000005011421657523100213020ustar00rootroot00000000000000* ProcRunner version: * Python version: * Operating System: ### Description Describe what you were trying to get done. Tell us what happened, what went wrong, and what you expected to happen. ### What I Did ``` Paste the command(s) you ran and the output. If there was a crash, please include the traceback here. ``` python-procrunner-2.3.3/.github/renovate.json000066400000000000000000000013171421657523100213210ustar00rootroot00000000000000{ "extends": [ "config:base" ], "labels": [ "dependencies" ], "pip_requirements": { "fileMatch": [ "^requirements.*\\.txt$" ], "groupName": "all dependencies", "groupSlug": "all", "packageRules": [ { "groupName": "all dependencies", "groupSlug": "all", "matchPackagePatterns": [ "*" ] } ] }, "prCreation": "not-pending", "prHourlyLimit": 2, "pre-commit": { "schedule": [ "after 10am and before 4pm every 3 months on the first day of the month" ], "stabilityDays": 10 }, "schedule": [ "after 7am and before 4pm every monday" ], "stabilityDays": 2, "timezone": "Europe/London" } python-procrunner-2.3.3/.gitignore000066400000000000000000000022621421657523100172330ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Temporary files *.sw[op] *~ # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # dotenv .env # virtualenv .venv venv/ ENV/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ python-procrunner-2.3.3/.pre-commit-config.yaml000066400000000000000000000012371421657523100215250ustar00rootroot00000000000000repos: # Automatically sort imports - repo: https://github.com/PyCQA/isort rev: 5.9.3 hooks: - id: isort # Automatic source code formatting - repo: https://github.com/psf/black rev: 21.6b0 hooks: - id: black args: [--safe, --quiet] # Linting - repo: https://github.com/PyCQA/flake8 rev: 3.9.2 hooks: - id: flake8 additional_dependencies: ['flake8-comprehensions==3.5.0'] # Syntax validation and some basic sanity checks - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.0.1 hooks: - id: check-merge-conflict - id: check-ast - id: check-json - id: check-added-large-files args: ['--maxkb=200'] - id: check-yaml python-procrunner-2.3.3/AUTHORS.rst000066400000000000000000000001461421657523100171210ustar00rootroot00000000000000======= Credits ======= * Markus Gerstel Contributors ------------ None yet. Why not be the first? python-procrunner-2.3.3/CONTRIBUTING.rst000066400000000000000000000064651421657523100177150ustar00rootroot00000000000000.. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/DiamondLightSource/python-procrunner/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ ProcRunner could always use more documentation, whether as part of the official ProcRunner docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/DiamondLightSource/python-procrunner/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `procrunner` for local development. 1. Fork the `procrunner` repo on GitHub. 2. Clone your fork locally:: $ git clone git@github.com:your_name_here/python-procrunner.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv procrunner $ cd procrunner/ $ python setup.py develop 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 procrunner tests $ python setup.py test or py.test $ tox To get flake8 and tox, just pip install them into your virtualenv. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in HISTORY.rst/README.rst. Tips ---- To run a subset of tests:: $ py.test tests.test_procrunner Deploying --------- A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run:: $ bumpversion patch # possible: major / minor / patch $ git push $ git push --tags Travis will then deploy to PyPI if tests pass. python-procrunner-2.3.3/HISTORY.rst000066400000000000000000000116061421657523100171400ustar00rootroot00000000000000======= History ======= 2.3.3 (2022-03-23) ------------------ * Allow specifying 'preexec_fn' and 'creationflags' keywords, which will be passed through to the subprocess call 2.3.2 (2022-01-28) ------------------ * The run() function now understands stdin=subprocess.DEVNULL to close the subprocess stdin, rather than to connect through the existing stdin, which is the current default 2.3.1 (2021-10-25) ------------------ * Add Python 3.10 support 2.3.0 (2020-10-29) ------------------ * Add Python 3.9 support, drop Python 3.5 support * Fix a file descriptor leak on subprocess execution 2.2.0 (2020-09-07) ------------------ * Calling the run() function with unnamed arguments (other than the command list as the first argument) is now deprecated. As a number of arguments will be removed in a future version the use of unnamed arguments will cause future confusion. `Use explicit keyword arguments instead (#62). `_ * `The run() function debug argument has been deprecated (#63). `_ This is only used to debug the NonBlockingStream* classes. Those are due to be replaced in a future release, so the argument will no longer serve a purpose. Debugging information remains available via standard logging mechanisms. * Final version supporting Python 3.5 2.1.0 (2020-09-05) ------------------ * `Deprecated array access on the return object (#60). `_ The return object will become a subprocess.CompletedProcess in a future release, which no longer allows array-based access. For a translation table of array elements to attributes please see the pull request linked above. * Add a `new parameter 'raise_timeout_exception' (#61). `_ When set to 'True' a subprocess.TimeoutExpired exception is raised when the process runtime exceeds the timeout threshold. This defaults to 'False' and will be set to 'True' in a future release. 2.0.0 (2020-06-24) ------------------ * Python 3.5+ only, support for Python 2.7 has been dropped * Deprecated function alias run_process() has been removed * Fixed a stability issue on Windows 1.1.0 (2019-11-04) ------------------ * Add Python 3.8 support, drop Python 3.4 support 1.0.2 (2019-05-20) ------------------ * Stop environment override variables leaking into the process environment 1.0.1 (2019-04-16) ------------------ * Minor fixes on the return object (implement equality, mark as unhashable) 1.0.0 (2019-03-25) ------------------ * Support file system path objects (PEP-519) in arguments * Change the return object to make it similar to subprocess.CompletedProcess, introduced with Python 3.5+ 0.9.1 (2019-02-22) ------------------ * Have deprecation warnings point to correct code locations 0.9.0 (2018-12-07) ------------------ * Trap UnicodeEncodeError when printing output. Offending characters are replaced and a warning is logged once. Hints at incorrectly set PYTHONIOENCODING. 0.8.1 (2018-12-04) ------------------ * Fix a few deprecation warnings 0.8.0 (2018-10-09) ------------------ * Add parameter working_directory to set the working directory of the subprocess 0.7.2 (2018-10-05) ------------------ * Officially support Python 3.7 0.7.1 (2018-09-03) ------------------ * Accept environment variable overriding with numeric values. 0.7.0 (2018-05-13) ------------------ * Unicode fixes. Fix crash on invalid UTF-8 input. * Clarify that stdout/stderr values are returned as bytestrings. * Callbacks receive the data decoded as UTF-8 unicode strings with unknown characters replaced by \ufffd (unicode replacement character). Same applies to printing of output. * Mark stdin broken on Windows. 0.6.1 (2018-05-02) ------------------ * Maintenance release to add some tests for executable resolution. 0.6.0 (2018-05-02) ------------------ * Fix Win32 API executable resolution for commands containing a dot ('.') in addition to a file extension (say '.bat'). 0.5.1 (2018-04-27) ------------------ * Fix Win32API dependency installation on Windows. 0.5.0 (2018-04-26) ------------------ * New keyword 'win32resolve' which only takes effect on Windows and is enabled by default. This causes procrunner to call the Win32 API FindExecutable() function to try and lookup non-.exe files with the corresponding name. This means .bat/.cmd/etc.. files can now be run without explicitly specifying their extension. Only supported on Python 2.7 and 3.5+. 0.4.0 (2018-04-23) ------------------ * Python 2.7 support on Windows. Python3 not yet supported on Windows. 0.3.0 (2018-04-17) ------------------ * run_process() renamed to run() * Python3 compatibility fixes 0.2.0 (2018-03-12) ------------------ * Procrunner is now Python3 3.3-3.6 compatible. 0.1.0 (2018-03-12) ------------------ * First release on PyPI. python-procrunner-2.3.3/LICENSE000066400000000000000000000027161421657523100162540ustar00rootroot00000000000000Copyright (c) 2018-2021 Diamond Light Source. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. python-procrunner-2.3.3/MANIFEST.in000066400000000000000000000004061421657523100167770ustar00rootroot00000000000000include AUTHORS.rst include CONTRIBUTING.rst include HISTORY.rst include LICENSE include README.rst recursive-include tests * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif python-procrunner-2.3.3/Makefile000066400000000000000000000042741421657523100167100ustar00rootroot00000000000000.PHONY: clean clean-test clean-pyc clean-build docs help .DEFAULT_GOAL := help define BROWSER_PYSCRIPT import os, webbrowser, sys try: from urllib import pathname2url except: from urllib.request import pathname2url webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) endef export BROWSER_PYSCRIPT define PRINT_HELP_PYSCRIPT import re, sys for line in sys.stdin: match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) if match: target, help = match.groups() print("%-20s %s" % (target, help)) endef export PRINT_HELP_PYSCRIPT BROWSER := python -c "$$BROWSER_PYSCRIPT" help: @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts clean-build: ## remove build artifacts rm -fr build/ rm -fr dist/ rm -fr .eggs/ find . -name '*.egg-info' -exec rm -fr {} + find . -name '*.egg' -exec rm -f {} + clean-pyc: ## remove Python file artifacts find . -name '*.pyc' -exec rm -f {} + find . -name '*.pyo' -exec rm -f {} + find . -name '*~' -exec rm -f {} + find . -name '__pycache__' -exec rm -fr {} + clean-test: ## remove test and coverage artifacts rm -fr .tox/ rm -f .coverage rm -fr htmlcov/ lint: ## check style with flake8 flake8 procrunner tests test: ## run tests quickly with the default Python py.test test-all: ## run tests on every Python version with tox tox coverage: ## check code coverage quickly with the default Python coverage run --source procrunner -m pytest coverage report -m coverage html $(BROWSER) htmlcov/index.html docs: ## generate Sphinx HTML documentation, including API docs rm -f docs/procrunner.rst rm -f docs/modules.rst sphinx-apidoc -o docs/ procrunner $(MAKE) -C docs clean $(MAKE) -C docs html $(BROWSER) docs/_build/html/index.html servedocs: docs ## compile the docs watching for changes watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D . release: clean ## package and upload a release twine upload dist/* dist: clean ## builds source and wheel package python setup.py sdist python setup.py bdist_wheel ls -l dist install: clean ## install the package to the active Python's site-packages python setup.py install python-procrunner-2.3.3/README.rst000066400000000000000000000045101421657523100167300ustar00rootroot00000000000000========== ProcRunner ========== .. image:: https://img.shields.io/pypi/v/procrunner.svg :target: https://pypi.python.org/pypi/procrunner :alt: PyPI release .. image:: https://img.shields.io/conda/vn/conda-forge/procrunner.svg :target: https://anaconda.org/conda-forge/procrunner :alt: Conda Version .. image:: https://dev.azure.com/DLS-tooling/procrunner/_apis/build/status/CI?branchName=master :target: https://github.com/DiamondLightSource/python-procrunner/commits/master :alt: Build status .. image:: https://ci.appveyor.com/api/projects/status/jtq4brwri5q18d0u/branch/master :target: https://ci.appveyor.com/project/Anthchirp/python-procrunner :alt: Build status .. image:: https://readthedocs.org/projects/procrunner/badge/?version=latest :target: https://procrunner.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status .. image:: https://img.shields.io/pypi/pyversions/procrunner.svg :target: https://pypi.python.org/pypi/procrunner :alt: Supported Python versions .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/ambv/black :alt: Code style: black Versatile utility function to run external processes * Free software: BSD license * Documentation: https://procrunner.readthedocs.io. Features -------- * runs an external process and waits for it to finish * does not deadlock, no matter the process stdout/stderr output behaviour * returns the exit code, stdout, stderr (separately, both as bytestrings), as a subprocess.CompletedProcess object * process can run in a custom environment, either as a modification of the current environment or in a new environment from scratch * stdin can be fed to the process * stdout and stderr is printed by default, can be disabled * stdout and stderr can be passed to any arbitrary function for live processing (separately, both as unicode strings) * optionally enforces a time limit on the process, raising a subprocess.TimeoutExpired exception if it is exceeded. Credits ------- This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. .. _Cookiecutter: https://github.com/audreyr/cookiecutter .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage python-procrunner-2.3.3/appveyor.yml000066400000000000000000000023011421657523100176250ustar00rootroot00000000000000environment: matrix: # For Python versions available on Appveyor, see # http://www.appveyor.com/docs/installed-software#python - PYTHON: "C:\\Python36" - PYTHON: "C:\\Python37" - PYTHON: "C:\\Python38" - PYTHON: "C:\\Python36-x64" - PYTHON: "C:\\Python37-x64" - PYTHON: "C:\\Python38-x64" matrix: allow_failures: - UNSTABLE: 1 install: # Upgrade to the latest pip. - '%PYTHON%\\python.exe -m pip install -U pip setuptools wheel' - '%PYTHON%\\python.exe -m pip install -r requirements_dev.txt' build: off test_script: # Note that you must use the environment variable %PYTHON% to refer to # the interpreter you're using - Appveyor does not do anything special # to put the Python version you want to use on PATH. - "%PYTHON%\\python.exe -m pytest" after_test: # This step builds your wheels. - "%PYTHON%\\python.exe setup.py bdist_wheel" artifacts: # bdist_wheel puts your built wheel in the dist directory - path: dist\* #on_success: # You can use this step to upload your artifacts to a public website. # See Appveyor's documentation for more details. Or you can simply # access your wheels from the Appveyor "artifacts" tab for your build. python-procrunner-2.3.3/docs/000077500000000000000000000000001421657523100161715ustar00rootroot00000000000000python-procrunner-2.3.3/docs/.gitignore000066400000000000000000000000571421657523100201630ustar00rootroot00000000000000/procrunner.rst /procrunner.*.rst /modules.rst python-procrunner-2.3.3/docs/Makefile000066400000000000000000000011431421657523100176300ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = python -msphinx SPHINXPROJ = procrunner SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) python-procrunner-2.3.3/docs/api.rst000066400000000000000000000001161421657523100174720ustar00rootroot00000000000000=== API === .. automodule:: procrunner :members: :show-inheritance: python-procrunner-2.3.3/docs/authors.rst000066400000000000000000000000341421657523100204050ustar00rootroot00000000000000.. include:: ../AUTHORS.rst python-procrunner-2.3.3/docs/conf.py000077500000000000000000000115351421657523100175000ustar00rootroot00000000000000#!/usr/bin/env python # # procrunner documentation build configuration file, created by # sphinx-quickstart on Fri Jun 9 13:47:02 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath("..")) import procrunner # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "ProcRunner" copyright = "2020, Diamond Light Source" author = "Diamond Light Source - Scientific Software" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = procrunner.__version__ # The full version, including alpha/beta/rc tags. release = procrunner.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = "procrunnerdoc" # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ ( master_doc, "procrunner.tex", "procrunner Documentation", "Diamond Light Source - Scientific Software", "manual", ) ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "procrunner", "procrunner Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "procrunner", "procrunner Documentation", author, "procrunner", "Versatile utility function to run external processes", "Miscellaneous", ) ] python-procrunner-2.3.3/docs/contributing.rst000066400000000000000000000000411421657523100214250ustar00rootroot00000000000000.. include:: ../CONTRIBUTING.rst python-procrunner-2.3.3/docs/history.rst000066400000000000000000000000341421657523100204210ustar00rootroot00000000000000.. include:: ../HISTORY.rst python-procrunner-2.3.3/docs/index.rst000066400000000000000000000004131421657523100200300ustar00rootroot00000000000000Welcome to ProcRunner's documentation! ====================================== .. toctree:: :maxdepth: 2 :caption: Contents: readme installation usage api contributing authors history Indices and tables ================== * :ref:`search` python-procrunner-2.3.3/docs/installation.rst000066400000000000000000000022661421657523100214320ustar00rootroot00000000000000.. highlight:: shell ============ Installation ============ Stable release -------------- To install ProcRunner, run this command in your terminal: .. code-block:: console $ pip install procrunner This is the preferred method to install ProcRunner, as it will always install the most recent stable release. If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ From sources ------------ The sources for ProcRunner can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/DiamondLightSource/python-procrunner Or download the `tarball`_: .. code-block:: console $ curl -OL https://github.com/DiamondLightSource/python-procrunner/tarball/master Once you have a copy of the source, you can install it with: .. code-block:: console $ python setup.py install .. _Github repo: https://github.com/DiamondLightSource/python-procrunner .. _tarball: https://github.com/DiamondLightSource/python-procrunner/tarball/master python-procrunner-2.3.3/docs/make.bat000066400000000000000000000014041421657523100175750ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=python -msphinx ) set SOURCEDIR=. set BUILDDIR=_build set SPHINXPROJ=procrunner if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The Sphinx module was not found. Make sure you have Sphinx installed, echo.then set the SPHINXBUILD environment variable to point to the full echo.path of the 'sphinx-build' executable. Alternatively you may add the echo.Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd python-procrunner-2.3.3/docs/readme.rst000066400000000000000000000000331421657523100201540ustar00rootroot00000000000000.. include:: ../README.rst python-procrunner-2.3.3/docs/usage.rst000066400000000000000000000014311421657523100200260ustar00rootroot00000000000000===== Usage ===== To use ProcRunner in a project:: import procrunner result = procrunner.run(['/bin/ls', '/some/path/containing spaces']) To test for successful completion:: assert not result.returncode assert result.returncode == 0 # alternatively result.check_returncode() # raises subprocess.CalledProcessError() To test for no STDERR output:: assert not result.stderr assert result.stderr == b'' # alternatively To run with a specific environment variable set:: result = procrunner.run(..., environment_override={ 'VARIABLE': 'value' }) To run with a specific environment:: result = procrunner.run(..., environment={ 'VARIABLE': 'value' }) To run in a specific directory:: result = procrunner.run(..., working_directory='/some/path') python-procrunner-2.3.3/procrunner/000077500000000000000000000000001421657523100174365ustar00rootroot00000000000000python-procrunner-2.3.3/procrunner/__init__.py000066400000000000000000000572121421657523100215560ustar00rootroot00000000000000import codecs import functools import io import logging import os import select import shutil import subprocess import sys import time import timeit import warnings from multiprocessing import Pipe from threading import Thread # # run() - A function to synchronously run an external process, supporting # the following features: # # - runs an external process and waits for it to finish # - does not deadlock, no matter the process stdout/stderr output behaviour # - returns the exit code, stdout, stderr (separately) as a # subprocess.CompletedProcess object # - process can run in a custom environment, either as a modification of # the current environment or in a new environment from scratch # - stdin can be fed to the process # - stdout and stderr is printed by default, can be disabled # - stdout and stderr can be passed to any arbitrary function for # live processing # - optionally enforces a time limit on the process # # # Usage example: # # import procrunner # result = procrunner.run(['/bin/ls', '/some/path/containing spaces']) # # Returns: # # ReturnObject( # args=('/bin/ls', '/some/path/containing spaces'), # returncode=2, # stdout=b'', # stderr=b'/bin/ls: cannot access /some/path/containing spaces: No such file or directory\n' # ) # # which also offers (albeit deprecated) # # result.runtime == 0.12990689277648926 # result.time_end == '2017-11-12 19:54:49 GMT' # result.time_start == '2017-11-12 19:54:49 GMT' # result.timeout == False __author__ = """Markus Gerstel""" __email__ = "scientificsoftware@diamond.ac.uk" __version__ = "2.3.3" logger = logging.getLogger("procrunner") logger.addHandler(logging.NullHandler()) class _LineAggregator: """ Buffer that can be filled with stream data and will aggregate complete lines. Lines can be printed or passed to an arbitrary callback function. The lines passed to the callback function are UTF-8 decoded and do not contain a trailing newline character. """ def __init__(self, print_line=False, callback=None): """Create aggregator object.""" self._buffer = "" self._print = print_line self._callback = callback self._decoder = codecs.getincrementaldecoder("utf-8")("replace") def add(self, data): """ Add a single character to buffer. If one or more full lines are found, print them (if desired) and pass to callback function. """ data = self._decoder.decode(data) if not data: return self._buffer += data if "\n" in data: to_print, remainder = self._buffer.rsplit("\n") if self._print: try: print(to_print) except UnicodeEncodeError: print(to_print.encode(sys.getdefaultencoding(), errors="replace")) if not hasattr(self, "_warned"): logger.warning("output encoding error, characters replaced") setattr(self, "_warned", True) if self._callback: self._callback(to_print) self._buffer = remainder def flush(self): """Print/send any remaining data to callback function.""" self._buffer += self._decoder.decode(b"", final=True) if self._buffer: if self._print: print(self._buffer) if self._callback: self._callback(self._buffer) self._buffer = "" class _NonBlockingStreamReader: """Reads a stream in a thread to avoid blocking/deadlocks""" def __init__(self, stream, output=True, debug=False, notify=None, callback=None): """Creates and starts a thread which reads from a stream.""" self._buffer = io.BytesIO() self._closed = False self._closing = False self._debug = debug self._stream = stream self._terminated = False def _thread_write_stream_to_buffer(): la = _LineAggregator(print_line=output, callback=callback) char = True while char: if select.select([self._stream], [], [], 0.1)[0]: char = self._stream.read(1) if char: self._buffer.write(char) la.add(char) else: if self._closing: break self._stream.close() self._terminated = True la.flush() if self._debug: logger.debug("Stream reader terminated") if notify: notify() def _thread_write_stream_to_buffer_windows(): line = True while line: line = self._stream.readline() if line: self._buffer.write(line) if output or callback: linedecode = line.decode("utf-8", "replace") if output: print(linedecode) if callback: callback(linedecode) self._stream.close() self._terminated = True if self._debug: logger.debug("Stream reader terminated") if notify: notify() if os.name == "nt": self._thread = Thread(target=_thread_write_stream_to_buffer_windows) else: self._thread = Thread(target=_thread_write_stream_to_buffer) self._thread.daemon = True self._thread.start() def has_finished(self): """ Returns whether the thread reading from the stream is still alive. """ return self._terminated def get_output(self): """ Retrieve the stored data in full. This call may block if the reading thread has not yet terminated. """ self._closing = True if not self.has_finished(): if self._debug: # Main thread overtook stream reading thread. underrun_debug_timer = timeit.default_timer() logger.warning("NBSR underrun") self._thread.join() if not self.has_finished(): if self._debug: logger.debug( "NBSR join after %f seconds, underrun not resolved", timeit.default_timer() - underrun_debug_timer, ) raise Exception("thread did not terminate") if self._debug: logger.debug( "NBSR underrun resolved after %f seconds", timeit.default_timer() - underrun_debug_timer, ) if self._closed: raise Exception("streamreader double-closed") self._closed = True data = self._buffer.getvalue() self._buffer.close() return data class _NonBlockingStreamWriter: """Writes to a stream in a thread to avoid blocking/deadlocks""" def __init__(self, stream, data, debug=False, notify=None): """Creates and starts a thread which writes data to stream.""" self._buffer = data self._buffer_len = len(data) self._buffer_pos = 0 self._max_block_len = 4096 self._stream = stream self._terminated = False def _thread_write_buffer_to_stream(): while self._buffer_pos < self._buffer_len: if (self._buffer_len - self._buffer_pos) > self._max_block_len: block = self._buffer[ self._buffer_pos : (self._buffer_pos + self._max_block_len) ] else: block = self._buffer[self._buffer_pos :] try: self._stream.write(block) except OSError as e: if ( e.errno == 32 ): # broken pipe, ie. process terminated without reading entire stdin self._stream.close() self._terminated = True if notify: notify() return raise self._buffer_pos += len(block) if debug: logger.debug("wrote %d bytes to stream", len(block)) self._stream.close() self._terminated = True if notify: notify() self._thread = Thread(target=_thread_write_buffer_to_stream) self._thread.daemon = True self._thread.start() def has_finished(self): """Returns whether the thread writing to the stream is still alive.""" return self._terminated def bytes_sent(self): """Return the number of bytes written so far.""" return self._buffer_pos def bytes_remaining(self): """Return the number of bytes still to be written.""" return self._buffer_len - self._buffer_pos def _path_resolve(obj): """ Resolve file system path (PEP-519) objects to strings. :param obj: A file system path object or something else. :return: A string representation of a file system path object or, for anything that was not a file system path object, the original object. """ if obj and hasattr(obj, "__fspath__"): return obj.__fspath__() return obj def _windows_resolve(command, path=None): """ Try and find the full path and file extension of the executable to run. This is so that e.g. calls to 'somescript' will point at 'somescript.cmd' without the need to set shell=True in the subprocess. :param command: The command array to be run, with the first element being the command with or w/o path, with or w/o extension. :return: Returns the command array with the executable resolved with the correct extension. If the executable cannot be resolved for any reason the original command array is returned. """ if not command or not isinstance(command[0], str): return command found_executable = shutil.which(command[0], path=path) if found_executable: logger.debug("Resolved %s as %s", command[0], found_executable) return (found_executable, *command[1:]) if "\\" in command[0]: # Special case. shutil.which may not detect file extensions if a full # path is given, so try to resolve the executable explicitly for extension in os.getenv("PATHEXT").split(os.pathsep): found_executable = shutil.which(command[0] + extension, path=path) if found_executable: return (found_executable, *command[1:]) logger.warning("Error trying to resolve the executable: %s", command[0]) return command class ReturnObject(subprocess.CompletedProcess): """ A subprocess.CompletedProcess-like object containing the executed command, stdout and stderr (both as bytestrings), and the exitcode. The check_returncode() function raises an exception if the process exited with a non-zero exit code. """ def __init__(self, exitcode=None, command=None, stdout=None, stderr=None, **kw): super().__init__( args=command, returncode=exitcode, stdout=stdout, stderr=stderr ) self._extras = { "timeout": kw.get("timeout"), "runtime": kw.get("runtime"), "time_start": kw.get("time_start"), "time_end": kw.get("time_end"), } def __getitem__(self, key): warnings.warn( "dictionary access to a procrunner return object is deprecated", DeprecationWarning, stacklevel=2, ) if key in self._extras: return self._extras[key] if not hasattr(self, key): raise KeyError(f"Unknown attribute {key}") return getattr(self, key) def __eq__(self, other): """Override equality operator to account for added fields""" if type(other) is type(self): return self.__dict__ == other.__dict__ return False def __hash__(self): """This object is not immutable, so mark it as unhashable""" return None @property def cmd(self): warnings.warn( "procrunner return object .cmd is deprecated, use .args", DeprecationWarning, stacklevel=2, ) return self.args @property def command(self): warnings.warn( "procrunner return object .command is deprecated, use .args", DeprecationWarning, stacklevel=2, ) return self.args @property def exitcode(self): warnings.warn( "procrunner return object .exitcode is deprecated, use .returncode", DeprecationWarning, stacklevel=2, ) return self.returncode @property def timeout(self): warnings.warn( "procrunner return object .timeout is deprecated", DeprecationWarning, stacklevel=2, ) return self._extras["timeout"] @property def runtime(self): warnings.warn( "procrunner return object .runtime is deprecated", DeprecationWarning, stacklevel=2, ) return self._extras["runtime"] @property def time_start(self): warnings.warn( "procrunner return object .time_start is deprecated", DeprecationWarning, stacklevel=2, ) return self._extras["time_start"] @property def time_end(self): warnings.warn( "procrunner return object .time_end is deprecated", DeprecationWarning, stacklevel=2, ) return self._extras["time_end"] def update(self, dictionary): self._extras.update(dictionary) def _deprecate_argument_calling(f): @functools.wraps(f) def wrapper(*args, **kwargs): if len(args) > 1: warnings.warn( "Calling procrunner.run() with unnamed arguments (apart from " "the command) is deprecated. Use keyword arguments instead.", DeprecationWarning, stacklevel=2, ) return f(*args, **kwargs) return wrapper @_deprecate_argument_calling def run( command, timeout=None, debug=None, stdin=None, print_stdout=True, print_stderr=True, callback_stdout=None, callback_stderr=None, environment=None, environment_override=None, win32resolve=True, working_directory=None, raise_timeout_exception=False, creationflags=0, preexec_fn=None, ): """ Run an external process. File system path objects (PEP-519) are accepted in the command, environment, and working directory arguments. :param array command: Command line to be run, specified as array. :param timeout: Terminate program execution after this many seconds. :param boolean debug: Enable further debug messages. (deprecated) :param stdin: Optional bytestring that is passed to command stdin, or subprocess.DEVNULL to disable stdin. :param boolean print_stdout: Pass stdout through to sys.stdout. :param boolean print_stderr: Pass stderr through to sys.stderr. :param callback_stdout: Optional function which is called for each stdout line. :param callback_stderr: Optional function which is called for each stderr line. :param dict environment: The full execution environment for the command. :param dict environment_override: Change environment variables from the current values for command execution. :param boolean win32resolve: If on Windows, find the appropriate executable first. This allows running of .bat, .cmd, etc. files without explicitly specifying their extension. :param string working_directory: If specified, run the executable from within this working directory. :param boolean raise_timeout_exception: Forward compatibility flag. If set then a subprocess.TimeoutExpired exception is raised instead of returning an object that can be checked for a timeout condition. Defaults to False, will be changed to True in a future release. :param preexec_fn: pre-execution function, will be passed to subprocess call :param creationflags: flags that will be passed to subprocess call :return: The exit code, stdout, stderr (separately, as byte strings) as a subprocess.CompletedProcess object. """ time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) logger.debug("Starting external process: %s", command) if stdin is None: stdin_pipe = None elif isinstance(stdin, int): assert ( stdin == subprocess.DEVNULL ), "stdin argument only allows subprocess.DEVNULL as numeric argument" stdin_pipe = subprocess.DEVNULL stdin = None else: assert sys.platform != "win32", "stdin argument not supported on Windows" stdin_pipe = subprocess.PIPE if debug is not None: warnings.warn( "Use of the debug parameter is deprecated", DeprecationWarning, stacklevel=3 ) start_time = timeit.default_timer() if timeout is not None: max_time = start_time + timeout if not raise_timeout_exception: warnings.warn( "Using procrunner with timeout and without raise_timeout_exception set is deprecated", DeprecationWarning, stacklevel=3, ) if environment is not None: env = {key: _path_resolve(environment[key]) for key in environment} else: env = {key: value for key, value in os.environ.items()} if environment_override: env.update( { key: str(_path_resolve(environment_override[key])) for key in environment_override } ) command = tuple(_path_resolve(part) for part in command) if win32resolve and sys.platform == "win32": command = _windows_resolve(command) if working_directory and sys.version_info < (3, 7): working_directory = os.fspath(working_directory) p = subprocess.Popen( command, shell=False, cwd=working_directory, env=env, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=creationflags, preexec_fn=preexec_fn, ) thread_pipe_pool = [] notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stdout = _NonBlockingStreamReader( p.stdout, output=print_stdout, debug=debug, notify=notifier.close, callback=callback_stdout, ) notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stderr = _NonBlockingStreamReader( p.stderr, output=print_stderr, debug=debug, notify=notifier.close, callback=callback_stderr, ) if stdin is not None: notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stdin = _NonBlockingStreamWriter( p.stdin, data=stdin, debug=debug, notify=notifier.close ) timeout_encountered = False while (p.returncode is None) and ( (timeout is None) or (timeit.default_timer() < max_time) ): if debug and timeout is not None: logger.debug("still running (T%.2fs)", timeit.default_timer() - max_time) # wait for some time or until a stream is closed try: if thread_pipe_pool: # Wait for up to 0.5 seconds or for a signal on a remaining stream, # which could indicate that the process has terminated. try: event = thread_pipe_pool[0].poll(0.5) except BrokenPipeError as e: # on Windows this raises "BrokenPipeError: [Errno 109] The pipe has been ended" # which is for all intents and purposes equivalent to a True return value. if e.winerror != 109: raise event = True if event: # One-shot, so remove stream and watch remaining streams thread_pipe_pool.pop(0) if debug: logger.debug("Event received from stream thread") else: time.sleep(0.5) except KeyboardInterrupt: p.kill() # if user pressed Ctrl+C we won't be able to produce a proper report anyway # but at least make sure the child process dies with us raise # check if process is still running p.poll() if p.returncode is None: # timeout condition timeout_encountered = True if debug: logger.debug("timeout (T%.2fs)", timeit.default_timer() - max_time) # send terminate signal and wait some time for buffers to be read p.terminate() if thread_pipe_pool: try: thread_pipe_pool[0].poll(0.5) except BrokenPipeError as e: # on Windows this raises "BrokenPipeError: [Errno 109] The pipe has been ended" # which is for all intents and purposes equivalent to a True return value. if e.winerror != 109: raise thread_pipe_pool.pop(0) if not stdout.has_finished() or not stderr.has_finished(): time.sleep(2) p.poll() if p.returncode is None: # thread still alive # send kill signal and wait some more time for buffers to be read p.kill() if thread_pipe_pool: try: thread_pipe_pool[0].poll(0.5) except BrokenPipeError as e: # on Windows this raises "BrokenPipeError: [Errno 109] The pipe has been ended" # which is for all intents and purposes equivalent to a True return value. if e.winerror != 109: raise thread_pipe_pool.pop(0) if not stdout.has_finished() or not stderr.has_finished(): time.sleep(5) p.poll() if p.returncode is None: raise RuntimeError("Process won't terminate") runtime = timeit.default_timer() - start_time if timeout is not None: logger.debug( "Process ended after %.1f seconds with exit code %d (T%.2fs)", runtime, p.returncode, timeit.default_timer() - max_time, ) else: logger.debug( "Process ended after %.1f seconds with exit code %d", runtime, p.returncode ) stdout = stdout.get_output() stderr = stderr.get_output() if timeout_encountered and raise_timeout_exception: raise subprocess.TimeoutExpired( cmd=command, timeout=timeout, output=stdout, stderr=stderr ) time_end = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) result = ReturnObject( exitcode=p.returncode, command=command, stdout=stdout, stderr=stderr, timeout=timeout_encountered, runtime=runtime, time_start=time_start, time_end=time_end, ) if stdin is not None: result.update( { "stdin_bytes_sent": stdin.bytes_sent(), "stdin_bytes_remain": stdin.bytes_remaining(), } ) return result python-procrunner-2.3.3/pyproject.toml000066400000000000000000000002021421657523100201470ustar00rootroot00000000000000[build-system] requires = ["setuptools >= 40.6.0", "wheel"] build-backend = "setuptools.build_meta" [tool.isort] profile="black" python-procrunner-2.3.3/pytest.ini000066400000000000000000000000531421657523100172700ustar00rootroot00000000000000[pytest] addopts = -ra junit_family=xunit2 python-procrunner-2.3.3/requirements.txt000066400000000000000000000000001421657523100205130ustar00rootroot00000000000000python-procrunner-2.3.3/requirements_dev.txt000066400000000000000000000001631421657523100213630ustar00rootroot00000000000000bump2version==1.0.1 coverage==6.0.2 pip==21.3.1 pytest==6.2.5 Sphinx==4.2.0 tox==3.24.4 twine==3.4.2 wheel==0.37.0 python-procrunner-2.3.3/setup.cfg000066400000000000000000000030471421657523100170660ustar00rootroot00000000000000[metadata] name = procrunner description = Versatile utility function to run external processes version = 2.3.3 classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Developers License :: OSI Approved :: BSD License Natural Language :: English Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Operating System :: OS Independent Topic :: Software Development :: Libraries :: Python Modules license = BSD license_file = LICENSE project-urls = Download = https://github.com/DiamondLightSource/python-procrunner/tags Documentation = https://procrunner.readthedocs.io/ GitHub = https://github.com/DiamondLightSource/python-procrunner Bug-Tracker = https://github.com/DiamondLightSource/python-procrunner/issues [flake8] # Black disagrees with flake8 on a few points. Ignore those. ignore = E203, E266, E501, W503 # E203 whitespace before ':' # E266 too many leading '#' for block comment # E501 line too long # W503 line break before binary operator max-line-length = 88 select = E401,E711,E712,E713,E714,E721,E722,E901, F401,F402,F403,F405,F541,F631,F632,F633,F811,F812,F821,F822,F841,F901, W191,W291,W292,W293,W602,W603,W604,W605,W606, # flake8-comprehensions, https://github.com/adamchainz/flake8-comprehensions C4, [aliases] test = pytest [tool:pytest] collect_ignore = ['setup.py'] python-procrunner-2.3.3/setup.py000066400000000000000000000014621421657523100167560ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import find_packages, setup with open("README.rst") as readme_file: readme = readme_file.read() with open("HISTORY.rst") as history_file: history = history_file.read() requirements = [] setup_requirements = [] test_requirements = ["pytest"] setup( author="Markus Gerstel", author_email="scientificsoftware@diamond.ac.uk", install_requires=requirements, long_description=readme + "\n\n" + history, include_package_data=True, keywords="procrunner", packages=find_packages(include=["procrunner"]), python_requires=">=3.6", setup_requires=setup_requirements, test_suite="tests", tests_require=test_requirements, url="https://github.com/DiamondLightSource/python-procrunner", zip_safe=False, ) python-procrunner-2.3.3/tests/000077500000000000000000000000001421657523100164035ustar00rootroot00000000000000python-procrunner-2.3.3/tests/test_procrunner.py000066400000000000000000000272611421657523100222210ustar00rootroot00000000000000import copy import os import pathlib import sys from unittest import mock import pytest import procrunner @mock.patch("procrunner._NonBlockingStreamReader") @mock.patch("procrunner.time") @mock.patch("procrunner.subprocess") @mock.patch("procrunner.Pipe") def test_run_command_aborts_after_timeout_legacy( mock_pipe, mock_subprocess, mock_time, mock_streamreader ): mock_pipe.return_value = mock.Mock(), mock.Mock() mock_process = mock.Mock() mock_process.returncode = None mock_subprocess.Popen.return_value = mock_process task = ["___"] with pytest.raises(RuntimeError): with pytest.warns(DeprecationWarning, match="timeout"): procrunner.run(task, timeout=-1, debug=False) assert mock_subprocess.Popen.called assert mock_process.terminate.called assert mock_process.kill.called @mock.patch("procrunner._NonBlockingStreamReader") @mock.patch("procrunner.time") @mock.patch("procrunner.subprocess") @mock.patch("procrunner.Pipe") def test_run_command_aborts_after_timeout( mock_pipe, mock_subprocess, mock_time, mock_streamreader ): mock_pipe.return_value = mock.Mock(), mock.Mock() mock_process = mock.Mock() mock_process.returncode = None mock_subprocess.Popen.return_value = mock_process task = ["___"] with pytest.raises(RuntimeError): procrunner.run(task, timeout=-1, raise_timeout_exception=True) assert mock_subprocess.Popen.called assert mock_process.terminate.called assert mock_process.kill.called @mock.patch("procrunner._NonBlockingStreamReader") @mock.patch("procrunner.subprocess") def test_run_command_runs_command_and_directs_pipelines( mock_subprocess, mock_streamreader ): (mock_stdout, mock_stderr) = (mock.Mock(), mock.Mock()) mock_stdout.get_output.return_value = mock.sentinel.proc_stdout mock_stderr.get_output.return_value = mock.sentinel.proc_stderr (stream_stdout, stream_stderr) = (mock.sentinel.stdout, mock.sentinel.stderr) mock_process = mock.Mock() mock_process.stdout = stream_stdout mock_process.stderr = stream_stderr mock_process.returncode = 99 command = ["___"] def streamreader_processing(*args, **kwargs): return {(stream_stdout,): mock_stdout, (stream_stderr,): mock_stderr}[args] mock_streamreader.side_effect = streamreader_processing mock_subprocess.Popen.return_value = mock_process expected = { "stderr": mock.sentinel.proc_stderr, "stdout": mock.sentinel.proc_stdout, "exitcode": mock_process.returncode, "command": tuple(command), "runtime": mock.ANY, "timeout": False, "time_start": mock.ANY, "time_end": mock.ANY, } actual = procrunner.run( command, timeout=0.5, callback_stdout=mock.sentinel.callback_stdout, callback_stderr=mock.sentinel.callback_stderr, working_directory=pathlib.Path("somecwd"), raise_timeout_exception=True, ) assert mock_subprocess.Popen.called assert mock_subprocess.Popen.call_args[1]["env"] == os.environ assert mock_subprocess.Popen.call_args[1]["cwd"] in ( pathlib.Path("somecwd"), "somecwd", ) mock_streamreader.assert_has_calls( [ mock.call( stream_stdout, output=mock.ANY, debug=None, notify=mock.ANY, callback=mock.sentinel.callback_stdout, ), mock.call( stream_stderr, output=mock.ANY, debug=None, notify=mock.ANY, callback=mock.sentinel.callback_stderr, ), ], any_order=True, ) assert not mock_process.terminate.called assert not mock_process.kill.called for key in expected: with pytest.warns(DeprecationWarning): assert actual[key] == expected[key] assert actual.args == tuple(command) assert actual.returncode == mock_process.returncode assert actual.stdout == mock.sentinel.proc_stdout assert actual.stderr == mock.sentinel.proc_stderr @mock.patch("procrunner.subprocess") def test_default_process_environment_is_parent_environment(mock_subprocess): mock_subprocess.Popen.side_effect = NotImplementedError() # cut calls short with pytest.raises(NotImplementedError): procrunner.run([mock.Mock()], timeout=-1, raise_timeout_exception=True) assert mock_subprocess.Popen.call_args[1]["env"] == os.environ @mock.patch("procrunner.subprocess") def test_using_debug_parameter_raises_warning(mock_subprocess): mock_subprocess.Popen.side_effect = NotImplementedError() # cut calls short with pytest.warns(DeprecationWarning, match="debug"): with pytest.raises(NotImplementedError): procrunner.run([mock.Mock()], debug=True) with pytest.warns(DeprecationWarning, match="debug"): with pytest.raises(NotImplementedError): procrunner.run([mock.Mock()], debug=False) @mock.patch("procrunner.subprocess") def test_pass_custom_environment_to_process(mock_subprocess): mock_subprocess.Popen.side_effect = NotImplementedError() # cut calls short mock_env = {"key": mock.sentinel.key} # Pass an environment dictionary with pytest.raises(NotImplementedError): procrunner.run( [mock.Mock()], timeout=-1, environment=copy.copy(mock_env), raise_timeout_exception=True, ) assert mock_subprocess.Popen.call_args[1]["env"] == mock_env @mock.patch("procrunner.subprocess") def test_pass_custom_environment_to_process_and_add_another_value(mock_subprocess): mock_subprocess.Popen.side_effect = NotImplementedError() # cut calls short mock_env1 = {"keyA": mock.sentinel.keyA} mock_env2 = {"keyB": mock.sentinel.keyB, "number": 5} # Pass an environment dictionary with pytest.raises(NotImplementedError): procrunner.run( [mock.Mock()], timeout=-1, environment=copy.copy(mock_env1), environment_override=copy.copy(mock_env2), raise_timeout_exception=True, ) mock_env_sum = copy.copy(mock_env1) mock_env_sum.update({key: str(mock_env2[key]) for key in mock_env2}) assert mock_subprocess.Popen.call_args[1]["env"] == mock_env_sum @mock.patch("procrunner.subprocess") def test_use_default_process_environment_and_add_another_value(mock_subprocess): mock_subprocess.Popen.side_effect = NotImplementedError() # cut calls short mock_env2 = {"keyB": str(mock.sentinel.keyB)} with pytest.raises(NotImplementedError): procrunner.run( [mock.Mock()], timeout=-1, environment_override=copy.copy(mock_env2), raise_timeout_exception=True, ) random_environment_variable = list(os.environ)[0] if random_environment_variable == list(mock_env2)[0]: random_environment_variable = list(os.environ)[1] assert ( random_environment_variable and random_environment_variable != list(mock_env2)[0] ) assert ( mock_subprocess.Popen.call_args[1]["env"][list(mock_env2)[0]] == mock_env2[list(mock_env2)[0]] ) assert mock_subprocess.Popen.call_args[1]["env"][ random_environment_variable ] == os.getenv(random_environment_variable) @mock.patch("procrunner.subprocess") def test_use_default_process_environment_and_override_a_value(mock_subprocess): mock_subprocess.Popen.side_effect = NotImplementedError() # cut calls short random_environment_variable = list(os.environ)[0] random_environment_value = os.getenv(random_environment_variable) with pytest.raises(NotImplementedError): procrunner.run( [mock.Mock()], timeout=-1, environment_override={ random_environment_variable: "X" + random_environment_value }, raise_timeout_exception=True, ) assert ( mock_subprocess.Popen.call_args[1]["env"][random_environment_variable] == "X" + random_environment_value ) @mock.patch("procrunner.select") @pytest.mark.skipif( sys.platform == "win32", reason="test only relevant on platforms supporting select()", ) def test_nonblockingstreamreader_can_read(mock_select): import time class _stream: def __init__(self): self.data = b"" self.closed = False def write(self, string): self.data = self.data + string def read(self, n): if self.closed: return b"" if self.data == b"": time.sleep(0.01) return b"" if len(self.data) < n: data = self.data self.data = b"" else: data = self.data[:n] self.data = self.data[n:] return data def close(self): self.closed = True teststream = _stream() def select_replacement(rlist, wlist, xlist, timeout): assert teststream in rlist if teststream.closed: return ([teststream], [], []) if teststream.data == b"": return ([], [], []) return ([teststream], [], []) mock_select.select = select_replacement streamreader = procrunner._NonBlockingStreamReader(teststream, output=False) assert not streamreader.has_finished() time.sleep(0.1) testdata = b"abc\n" * 1024 teststream.write(testdata) time.sleep(0.2) teststream.close() time.sleep(0.1) assert streamreader.has_finished() output = streamreader.get_output() assert len(output) == len(testdata) assert output == testdata def test_lineaggregator_aggregates_data(): callback = mock.Mock() aggregator = procrunner._LineAggregator(callback=callback) aggregator.add(b"some") aggregator.add(b"string") callback.assert_not_called() aggregator.add(b"\n") callback.assert_called_once_with("somestring") callback.reset_mock() aggregator.add(b"more") aggregator.add(b"stuff") callback.assert_not_called() aggregator.flush() callback.assert_called_once_with("morestuff") def test_return_object_semantics(): ro = procrunner.ReturnObject( command=mock.sentinel.command, exitcode=0, stdout=mock.sentinel.stdout, stderr=mock.sentinel.stderr, ) with pytest.warns(DeprecationWarning): assert ro["command"] == mock.sentinel.command assert ro.args == mock.sentinel.command with pytest.warns(DeprecationWarning): assert ro["exitcode"] == 0 assert ro.returncode == 0 with pytest.warns(DeprecationWarning): assert ro["stdout"] == mock.sentinel.stdout assert ro.stdout == mock.sentinel.stdout with pytest.warns(DeprecationWarning): assert ro["stderr"] == mock.sentinel.stderr assert ro.stderr == mock.sentinel.stderr with pytest.raises(KeyError): with pytest.warns(DeprecationWarning): ro["unknownkey"] ro.update({"unknownkey": mock.sentinel.key}) with pytest.warns(DeprecationWarning): assert ro["unknownkey"] == mock.sentinel.key def test_return_object_check_function_passes_on_success(): ro = procrunner.ReturnObject( command=mock.sentinel.command, exitcode=0, stdout=mock.sentinel.stdout, stderr=mock.sentinel.stderr, ) ro.check_returncode() def test_return_object_check_function_raises_on_error(): ro = procrunner.ReturnObject( command=mock.sentinel.command, exitcode=1, stdout=mock.sentinel.stdout, stderr=mock.sentinel.stderr, ) with pytest.raises(Exception) as e: ro.check_returncode() assert repr(mock.sentinel.command) in str(e.value) assert "1" in str(e.value) python-procrunner-2.3.3/tests/test_procrunner_resolution.py000066400000000000000000000044341421657523100245010ustar00rootroot00000000000000import os import sys import pytest import procrunner def PEP519(path): class MockObject: @staticmethod def __fspath__(): return path def __repr__(self): return "" % path return MockObject() @pytest.mark.parametrize( "obj", ( None, True, False, 1, 1.0, ["thing"], {}, {1}, {"thing": "thing"}, "string", b"bytes", RuntimeError(), ["thing", PEP519("thing")], # no recursive resolution ), ) def test_path_object_resolution_for_non_path_objs_does_not_modify_objects(obj): assert procrunner._path_resolve(obj) is obj def test_path_object_resolution_of_path_objects(): assert procrunner._path_resolve(PEP519("thing")) == "thing" @pytest.mark.skipif(sys.platform != "win32", reason="windows specific test only") def test_name_resolution_for_simple_exe(): command = ["cmd.exe", "/c", "echo", "hello"] resolved = procrunner._windows_resolve(command) # command should be replaced with full path to cmd.exe assert resolved[0].lower().endswith("\\cmd.exe") assert os.path.exists(resolved[0]) # parameters are unchanged assert resolved[1:] == tuple(command[1:]) @pytest.mark.skipif(sys.platform != "win32", reason="windows specific test only") def test_name_resolution_for_complex_cases(tmp_path): bat = "simple_bat_extension" cmd = "simple_cmd_extension" exe = "simple_exe_extension" dotshort = "more_complex_filename_with_a.dot" dotlong = "more_complex_filename.withadot" (tmp_path / (bat + ".bat")).touch() (tmp_path / (cmd + ".cmd")).touch() (tmp_path / (exe + ".exe")).touch() (tmp_path / (dotshort + ".bat")).touch() (tmp_path / (dotlong + ".cmd")).touch() def is_valid(command): assert len(command) == 1 assert os.path.exists(tmp_path / command[0]) is_valid(procrunner._windows_resolve([bat], path=os.fspath(tmp_path))) is_valid(procrunner._windows_resolve([cmd], path=os.fspath(tmp_path))) is_valid(procrunner._windows_resolve([exe], path=os.fspath(tmp_path))) is_valid(procrunner._windows_resolve([dotshort], path=os.fspath(tmp_path))) is_valid(procrunner._windows_resolve([dotlong], path=os.fspath(tmp_path))) python-procrunner-2.3.3/tests/test_procrunner_system.py000066400000000000000000000106441421657523100236220ustar00rootroot00000000000000import os import subprocess import sys import timeit import pytest import procrunner def test_simple_command_invocation(): if os.name == "nt": command = ["cmd.exe", "/c", "echo", "hello"] else: command = ["echo", "hello"] result = procrunner.run(command) assert result.returncode == 0 assert result.stdout == b"hello" + os.linesep.encode("utf-8") assert result.stderr == b"" def test_simple_command_invocation_with_closed_stdin(): if os.name == "nt": command = ["cmd.exe", "/c", "echo", "hello"] else: command = ["echo", "hello"] result = procrunner.run(command, stdin=subprocess.DEVNULL) assert result.returncode == 0 assert result.stdout == b"hello" + os.linesep.encode("utf-8") assert result.stderr == b"" def test_decode_invalid_utf8_input(capsys): test_string = b"test\xa0string\n" if os.name == "nt": pytest.xfail("Test requires stdin feature which does not work on Windows") command = ["cmd.exe", "/c", "type", "CON"] else: command = ["cat"] result = procrunner.run(command, stdin=test_string) assert result.returncode == 0 assert not result.stderr if os.name == "nt": # Windows modifies line endings assert result.stdout == test_string[:-1] + b"\r\n" else: assert result.stdout == test_string out, err = capsys.readouterr() assert out == "test\ufffdstring\n" assert err == "" def test_running_wget(tmp_path): command = ["wget", "https://www.google.com", "-O", "-"] try: result = procrunner.run(command, working_directory=tmp_path) except OSError as e: if e.errno == 2: pytest.skip("wget not available") raise assert result.returncode == 0 assert b"http" in result.stderr assert b"google" in result.stdout def test_path_object_resolution(tmp_path): sentinel_value = b"sentinel" tmp_path.joinpath("tempfile").write_bytes(sentinel_value) tmp_path.joinpath("reader.py").write_text( "with open('tempfile') as fh:\n print(fh.read())" ) assert "LEAK_DETECTOR" not in os.environ result = procrunner.run( [sys.executable, tmp_path / "reader.py"], environment_override={"PYTHONHASHSEED": "random", "LEAK_DETECTOR": "1"}, working_directory=tmp_path, ) assert result.returncode == 0 assert not result.stderr assert sentinel_value == result.stdout.strip() assert ( "LEAK_DETECTOR" not in os.environ ), "overridden environment variable leaked into parent process" def test_timeout_behaviour_legacy(tmp_path): start = timeit.default_timer() try: with pytest.warns(DeprecationWarning, match="timeout"): result = procrunner.run( [sys.executable, "-c", "import time; time.sleep(5)"], timeout=0.1, working_directory=tmp_path, raise_timeout_exception=False, ) except RuntimeError: # This test sometimes fails with a RuntimeError. runtime = timeit.default_timer() - start assert runtime < 3 return runtime = timeit.default_timer() - start with pytest.warns(DeprecationWarning, match="\\.timeout"): assert result.timeout assert runtime < 3 assert not result.stdout assert not result.stderr assert result.returncode def test_timeout_behaviour(tmp_path): command = (sys.executable, "-c", "import time; time.sleep(5)") start = timeit.default_timer() try: with pytest.raises(subprocess.TimeoutExpired) as te: procrunner.run( command, timeout=0.1, working_directory=tmp_path, raise_timeout_exception=True, ) except RuntimeError: # This test sometimes fails with a RuntimeError. runtime = timeit.default_timer() - start assert runtime < 3 return runtime = timeit.default_timer() - start assert runtime < 3 assert te.value.stdout == b"" assert te.value.stderr == b"" assert te.value.timeout == 0.1 assert te.value.cmd == command def test_argument_deprecation(tmp_path): with pytest.warns(DeprecationWarning, match="keyword arguments"): result = procrunner.run( [sys.executable, "-V"], None, working_directory=tmp_path, ) assert not result.returncode assert result.stderr or result.stdout python-procrunner-2.3.3/tox.ini000066400000000000000000000013511421657523100165540ustar00rootroot00000000000000[tox] envlist = py36, py37, py38, flake8 [testenv:azure] basepython = python deps = pytest-azurepipelines pytest-cov -r{toxinidir}/requirements_dev.txt setenv = PYTHONDEVMODE = 1 commands = pytest -ra --basetemp={envtmpdir} --cov=procrunner --cov-report=html --cov-report=xml --cov-branch [testenv:flake8] basepython = python deps = flake8 commands = flake8 procrunner [testenv] setenv = PYTHONPATH = {toxinidir} PYTHONDEVMODE = 1 deps = -r{toxinidir}/requirements_dev.txt ; If you want to make tox run the tests with the same versions, create a ; requirements.txt with the pinned versions and uncomment the following line: ; -r{toxinidir}/requirements.txt commands = pytest -ra --basetemp={envtmpdir}