pax_global_header00006660000000000000000000000064144535655100014522gustar00rootroot0000000000000052 comment=07b836f2663bb073a7bcef3d6c454e1dc6b867ae invoke-2.2.0/000077500000000000000000000000001445356551000130165ustar00rootroot00000000000000invoke-2.2.0/.circleci/000077500000000000000000000000001445356551000146515ustar00rootroot00000000000000invoke-2.2.0/.circleci/config.yml000066400000000000000000000025231445356551000166430ustar00rootroot00000000000000version: 2.1 orbs: orb: invocations/orb@1.3.1 jobs: # Unit+integration tests, with coverage coverage: executor: name: orb/default version: "3.6" steps: - orb/setup - run: inv ci.make-sudouser - orb/sudo-coverage - orb/debug regression: executor: name: orb/default version: "3.6" steps: - orb/setup - run: inv regression - orb/debug doctests: executor: name: orb/default version: "3.6" steps: - orb/setup - run: inv www.doctest - orb/debug typecheck: executor: name: orb/default version: "3.6" steps: - orb/setup - run: mypy . - orb/debug workflows: main: jobs: - orb/lint: name: Lint - orb/format: name: Style check - typecheck: name: Types check - coverage: name: Test - regression: name: Regression tests - orb/test-release: name: Release test - orb/test: name: Test << matrix.version >> requires: ["Test"] matrix: parameters: version: ["3.7", "3.8", "3.9", "3.10", "3.11"] - orb/docs: name: "Docs" requires: ["Test"] - doctests: name: "Doctests" requires: ["Docs"] invoke-2.2.0/.codecov.yml000066400000000000000000000000501445356551000152340ustar00rootroot00000000000000comment: false coverage: precision: 0 invoke-2.2.0/.coveragerc000066400000000000000000000001161445356551000151350ustar00rootroot00000000000000[run] branch = True include = invoke/* tests/* omit = invoke/vendor/* invoke-2.2.0/.flake8000066400000000000000000000002301445356551000141640ustar00rootroot00000000000000[flake8] exclude = invoke/vendor,sites,.git,build,dist,alt_env,appveyor ignore = E124,E125,E128,E261,E301,E302,E303,E306,W503,E731 max-line-length = 79 invoke-2.2.0/.gitignore000066400000000000000000000001461445356551000150070ustar00rootroot00000000000000_build build/ dist/ .coverage .tox *.egg-info *.py[cod] src/ htmlcov coverage.xml .cache .mypy_cache/ invoke-2.2.0/LICENSE000066400000000000000000000024421445356551000140250ustar00rootroot00000000000000Copyright (c) 2020 Jeff Forcier. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. invoke-2.2.0/MANIFEST.in000066400000000000000000000004561445356551000145610ustar00rootroot00000000000000include LICENSE include README.rst include tasks.py recursive-include invoke/completion * recursive-include sites * recursive-exclude sites/*/_build * include dev-requirements.txt recursive-include * py.typed recursive-include tests * recursive-exclude * *.pyc *.pyo recursive-exclude **/__pycache__ * invoke-2.2.0/README.rst000066400000000000000000000024601445356551000145070ustar00rootroot00000000000000|version| |python| |license| |ci| |coverage| .. |version| image:: https://img.shields.io/pypi/v/invoke :target: https://pypi.org/project/invoke/ :alt: PyPI - Package Version .. |python| image:: https://img.shields.io/pypi/pyversions/invoke :target: https://pypi.org/project/invoke/ :alt: PyPI - Python Version .. |license| image:: https://img.shields.io/pypi/l/invoke :target: https://github.com/pyinvoke/invoke/blob/main/LICENSE :alt: PyPI - License .. |ci| image:: https://img.shields.io/circleci/build/github/pyinvoke/invoke/main :target: https://app.circleci.com/pipelines/github/pyinvoke/invoke :alt: CircleCI .. |coverage| image:: https://img.shields.io/codecov/c/gh/pyinvoke/invoke :target: https://app.codecov.io/gh/pyinvoke/invoke :alt: Codecov Welcome to Invoke! ================== Invoke is a Python (2.7 and 3.4+) library for managing shell-oriented subprocesses and organizing executable Python code into CLI-invokable tasks. It draws inspiration from various sources (``make``/``rake``, Fabric 1.x, etc) to arrive at a powerful & clean feature set. To find out what's new in this version of Invoke, please see `the changelog `_. The project maintainer keeps a `roadmap `_ on his website. invoke-2.2.0/THOUGHTS.rst000066400000000000000000000147351445356551000150270ustar00rootroot00000000000000============================================== Random thoughts unsuitable for public docs yet ============================================== CLI type mapping ================ Some loose thoughts on bridging the "shell is strings, Python wants lists/dicts/integers/bools/etc" problem. Methodologies ------------- * Explicit mapping, as with ``argparse``: this particular flag turns into a list/boolean/int/whatever. Because we're specifically mapping to function keyword arguments, a little of that complexity can be removed, but generally it'll look very similar. E.g.:: @args(foo=int) def mytask(foo): ... would turn this:: $ invoke mytask --foo 7 into ``7``, not ``"7"``. * Introspection-based mapping, i.e. introspecting the default values of a function signature and automatically transforming the CLI input. E.g.:: def mytask(foo=5): ... invoked as:: $ invoke mytask --foo 7 results in the Python value ``7`` instead of ``"7"``, just as with the explicit example above. * Formatting-based mapping, i.e. having (optional) conventions in the string format of an incoming flag argument that cause transformations to occur. E.g. we could say that commas in an argument automatically trigger transformation into a list of strings; thus the invocation:: $ invoke mytask --items a,b,c would on the Python end turn into a call like this:: mytask(items=['a', 'b', 'c']) What to do? ~~~~~~~~~~~ We haven't decided exactly how many of these to use -- we may end up using all three of them as appropriate, with some useful/sensible default and the option to enable/disable things for power users. The trick is to balance power/features with becoming overly complicated to understand or utilize. Other types ----------- Those examples cover integers/numbers, and lists/iterables. Strings are obviously easy/the default. What else is there? * Booleans: these are relatively simple too, either a flag exists (``True``) or is omitted (``False``). * Could also work in a ``--foo`` vs ``--no-foo`` convention to help with the inverse, i.e. values which should default to ``True`` and then need to be turned "off" on the command line. E.g.:: def mytask(option=True): ... could result in having a flag called ``--no-option`` instead of ``--option``. (Or possibly both.) * Dicts: these are tougher, but we could potentially use something like:: $ invoke mytask --dictopt key1=val1,key2=val2 resulting in:: mytask(dictopt={'key1': 'val1', 'key2': 'val2'}) Parameterizing tasks ==================== Old "previous example" (at time the below was split out of live docs, the actual previous example had been changed a lot and no longer applied):: $ invoke test --module=foo test --module=bar Cleaning Testing foo Cleaning Testing bar The previous example had a bit of duplication in how it was invoked; an intermediate use case is to bundle up that sort of parameterization into a "meta" task that itself invokes other tasks in a parameterized fashion. TK: API for this? at CLI level would have to be unorthodox invocation, e.g.:: @task def foo(bar): print(bar) $ invoke --parameterize foo --param bar --values 1 2 3 4 1 2 3 4 Note how there's no "real" invocation of ``foo`` in the normal sense. How to handle partial application (e.g. runtime selection of other non-parameterized arguments)? E.g.:: @task def foo(bar, biz): print("%s %s" % (bar, biz)) $ invoke --parameterize foo --param bar --values 1 2 3 4 --biz "And a" And a 1 And a 2 And a 3 And a 4 That's pretty clunky and foregoes any multi-task invocation. But how could we handle multiple tasks here? If we gave each individual task flags for this, like so:: $ invoke foo --biz "And a" --param foo --values 1 2 3 4 We could do multiple tasks, but then we're stomping on tasks' argument namespaces (we've taken over ``param`` and ``values``). Really hate that. **IDEALLY** we'd still limit parameterization to library use since it's an advanced-ish feature and frequently the parameterization vector is dynamic (aka not the sort of thing you'd give at CLI anyway) Probably best to leave that in the intermediate docs and keep it lib level; it's mostly there for Fabric and advanced users, not something the average Invoke-only user would care about. Not worth the effort to make it work on CLI at this point. :: @task def stuff(var): print(var) # NOTE: may need to be part of base executor since Collection has to know # to pass the parameterization option/values into Executor().execute()? class ParameterizedExecutor(Executor): # NOTE: assumes single dimension of parameterization. # Realistically would want e.g. {'name': [values], ...} structure and # then do cross product or something def execute(self, task, args, kwargs, parameter=None, values=None): # Would be nice to generalize this? if parameter: # TODO: handle non-None parameter w/ None values (error) # NOTE: this is where parallelization would occur; probably # need to move into sub-method for value in values: my_kwargs = dict(kwargs) my_kwargs[parameter] = value super(self, ParameterizedExecutor).execute(task, kwargs=my_kwargs) else: super(self, ParameterizedExecutor).execute(task, args, kwargs) Getting hairy: one task, with one pre-task, parameterized ========================================================= :: @task def setup(): print("Yay") @task(pre=[setup]) def build(): print("Woo") class OhGodExecutor(Executor): def execute(self, task, args, kwargs, parameter, values): # assume always parameterized meh # Run pretasks once only, instead of once per parameter value for pre in task.pre: self.execute(self.collection[pre]) for value in values: my_kwargs = dict(kwargs) my_kwargs[parameter] = value super(self, OhGodExecutor).execute(task, kwargs=my_kwargs) Still hairy: one task, with a pre-task that itself has a pre-task ================================================================= All the things: two tasks, each with pre-tasks, both parameterized ================================================================== invoke-2.2.0/codecov.yml000066400000000000000000000001331445356551000151600ustar00rootroot00000000000000# No codecov comments at all, please - just the github 'checks' is sufficient comment: off invoke-2.2.0/dev-requirements.txt000066400000000000000000000006631445356551000170630ustar00rootroot00000000000000# Install self before invocations to save a bit of time -e . # Invocations, for all sorts of things invocations>=3.3 # Coverage! coverage>=6.2,<7 # Docs releases>=2 alabaster==0.7.12 # Testing pytest-relaxed>=2 pytest-cov>=4 # Formatting # Flake8 5.x seems to have an odd importlib-metadata incompatibility? flake8>=4,<5 black>=22.8,<22.9 # Packaging setuptools>56 # Debuggery icecream>=2.1 # typing mypy==0.971 types-PyYAML==6.0.12.4 invoke-2.2.0/integration/000077500000000000000000000000001445356551000153415ustar00rootroot00000000000000invoke-2.2.0/integration/_explicit.py000066400000000000000000000001311445356551000176660ustar00rootroot00000000000000from invoke import task @task def foo(c): """ Frobazz """ print("Yup") invoke-2.2.0/integration/_support/000077500000000000000000000000001445356551000172145ustar00rootroot00000000000000invoke-2.2.0/integration/_support/busywork.py000066400000000000000000000011571445356551000214570ustar00rootroot00000000000000""" Program that just does busywork, yields stdout/stderr and ignores stdin. Useful for measuring CPU usage of the code interfacing with it without expecting the test environment to have much of anything. Accepts a single argv argument, which is the number of cycles to run. """ import sys import time num_cycles = int(sys.argv[1]) for i in range(num_cycles): out = "[{}] This is my stdout, there are many like it, but...\n".format(i) print(out, file=sys.stdout, flush=True) err = "[{}] To err is human, to stderr is superhuman\n".format(i) print(out, file=sys.stderr, flush=True) time.sleep(0.1) invoke-2.2.0/integration/_support/err.py000066400000000000000000000002111445356551000203500ustar00rootroot00000000000000#!/usr/bin/env python import sys stream = sys.stderr stream.write(" ".join(sys.argv[1:]) + "\n") stream.flush() # vim:set ft=python : invoke-2.2.0/integration/_support/nested_or_piped.py000066400000000000000000000002041445356551000227250ustar00rootroot00000000000000from invoke import task @task def calls_foo(c): c.run("inv -c nested_or_piped foo") @task def foo(c): c.run("echo meh") invoke-2.2.0/integration/_support/package/000077500000000000000000000000001445356551000206075ustar00rootroot00000000000000invoke-2.2.0/integration/_support/package/tasks/000077500000000000000000000000001445356551000217345ustar00rootroot00000000000000invoke-2.2.0/integration/_support/package/tasks/__init__.py000066400000000000000000000003371445356551000240500ustar00rootroot00000000000000from invoke import Collection # Issue #934 (from #919) only seems to trigger on this style of 'from . import # xxx' - a vanilla self-contained tasks/__init__.py is still fine! from . import module ns = Collection(module) invoke-2.2.0/integration/_support/package/tasks/module.py000066400000000000000000000001001445356551000235620ustar00rootroot00000000000000from invoke import task @task def mytask(c): print("hi!") invoke-2.2.0/integration/_support/parsing.py000066400000000000000000000001301445356551000212230ustar00rootroot00000000000000from invoke import task @task(optional=["meh"]) def foo(c, meh=False): print(meh) invoke-2.2.0/integration/_support/regression.py000066400000000000000000000021371445356551000217510ustar00rootroot00000000000000""" Barebones regression-catching script that looks for ephemeral run() failures. Intended to be run from top level of project via ``inv regression``. In an ideal world this would be truly part of the integration test suite, but: - something about the outer invoke or pytest environment seems to prevent such issues from appearing reliably (see eg issue #660) - it can take quite a while to run, even compared to other integration tests. """ import sys from invoke import task @task def check(c): count = 0 failures = [] for _ in range(0, 1000): count += 1 try: # 'ls' chosen as an arbitrary, fast-enough-for-looping but # does-some-real-work example (where eg 'sleep' is less useful) response = c.run("ls", hide=True) if not response.ok: failures.append(response) except Exception as e: failures.append(e) if failures: print("run() FAILED {}/{} times!".format(len(failures), count)) sys.exit(1) else: print("No failures detected after {} runs, A-OK".format(count)) invoke-2.2.0/integration/_support/respond_base.py000066400000000000000000000001141445356551000222260ustar00rootroot00000000000000import sys if input("What's the password?") != "Rosebud": sys.exit(1) invoke-2.2.0/integration/_support/respond_both.py000066400000000000000000000003451445356551000222560ustar00rootroot00000000000000import sys if input("standard out") != "with it": sys.exit(1) # Since raw_input(text) defaults to stdout... sys.stderr.write("standard error") sys.stderr.flush() if input() != "between chair and keyboard": sys.exit(1) invoke-2.2.0/integration/_support/respond_fail.py000066400000000000000000000004341445356551000222340ustar00rootroot00000000000000if input("What's the password?") == "Rosebud": print("You're not Citizen Kane!") # This should sit around forever like e.g. a bad sudo prompt would, but the # responder ought to be looking for the above and aborting instead. input("Seriously, what's the password???") invoke-2.2.0/integration/_support/tasks.py000066400000000000000000000003361445356551000207150ustar00rootroot00000000000000""" Tasks module for use within the integration tests. """ from invoke import task @task def print_foo(c): print("foo") @task def print_name(c, name): print(name) @task def print_config(c): print(c.foo) invoke-2.2.0/integration/_support/tree.out000066400000000000000000000016461445356551000207130ustar00rootroot00000000000000docs ├── api │   ├── cli.rst │   ├── collection.rst │   ├── exceptions.rst │   ├── loader.rst │   ├── parser │   │   ├── argument.rst │   │   ├── context.rst │   │   └── parser.rst │   ├── parser.rst │   ├── runner.rst │   ├── tasks.rst │   └── util.rst ├── api.rst ├── concepts │   ├── cli │   │   ├── background.rst │   │   ├── execution.rst │   │   ├── intro.rst │   │   └── type_mapping.rst │   ├── cli.rst │   ├── execution.rst │   ├── loading.rst │   └── namespaces.rst ├── concepts.rst ├── conf.py ├── contributing.rst ├── index.rst └── prior_art.rst 4 directories, 25 files invoke-2.2.0/integration/_util.py000066400000000000000000000027171445356551000170360ustar00rootroot00000000000000from contextlib import contextmanager from functools import wraps from resource import getrusage, RUSAGE_SELF import sys import time from pytest import skip def current_cpu_usage(): rusage = getrusage(RUSAGE_SELF) return rusage.ru_utime + rusage.ru_stime @contextmanager def assert_cpu_usage(lt, verbose=False): """ Execute wrapped block, asserting CPU utilization was less than ``lt``%. :param float lt: CPU use percentage above which failure will occur. :param bool verbose: Whether to print out the calculated percentage. """ start_usage = current_cpu_usage() start_time = time.time() yield end_usage = current_cpu_usage() end_time = time.time() usage_diff = end_usage - start_usage time_diff = end_time - start_time if time_diff == 0: # Apparently possible! time_diff = 0.000001 percentage = (usage_diff / time_diff) * 100.0 if verbose: print("Used {0:.2}% CPU over {1:.2}s".format(percentage, time_diff)) assert percentage < lt def only_utf8(f): """ Decorator causing tests to skip if local shell pipes aren't UTF-8. """ # TODO: use actual test selection labels or whatever nose has @wraps(f) def inner(*args, **kwargs): if getattr(sys.stdout, "encoding", None) == "UTF-8": return f(*args, **kwargs) # TODO: could remove this so they show green, but figure yellow is more # appropriate skip() return inner invoke-2.2.0/integration/context.py000066400000000000000000000015051445356551000174000ustar00rootroot00000000000000from invoke import Context, Config from invocations import ci as ci_mod class Context_: class sudo: def base_case(self): c = Context() # Grab CI-oriented sudo user/pass direct from invocations.ci # TODO: might be nice to give Collection a way to get a Config # object direct, instead of a dict? ci_conf = Config(ci_mod.ns.configuration()).ci.sudo user = ci_conf.user c.config.sudo.password = ci_conf.password # Safety 1: ensure configured user even exists assert c.run("id {}".format(user), warn=True) # Safety 2: make sure we ARE them (and not eg root already) assert c.run("whoami", hide=True).stdout.strip() == user assert c.sudo("whoami", hide=True).stdout.strip() == "root" invoke-2.2.0/integration/main.py000066400000000000000000000127201445356551000166410ustar00rootroot00000000000000import os from pathlib import Path import sys import pytest from pytest_relaxed import trap from invoke import run from invoke._version import __version__ from invoke.terminals import WINDOWS from _util import only_utf8 def _output_eq(cmd, expected): assert run(cmd, hide=True).stdout == expected class Main: def setup_method(self): self.cwd = os.getcwd() # Enter integration/_support as all support files are in there now os.chdir(Path(__file__).parent / "_support") def teardown_method(self): os.chdir(self.cwd) class basics: @trap def basic_invocation(self): _output_eq("invoke print-foo", "foo\n") @trap def version_output(self): _output_eq("invoke --version", "Invoke {}\n".format(__version__)) @trap def help_output(self): assert "Usage: inv[oke] " in run("invoke --help").stdout @trap def per_task_help(self): assert "Frobazz" in run("invoke -c _explicit foo --help").stdout @trap def shorthand_binary_name(self): _output_eq("inv print-foo", "foo\n") @trap def explicit_task_module(self): _output_eq("inv --collection _explicit foo", "Yup\n") @trap def invocation_with_args(self): _output_eq("inv print-name --name whatevs", "whatevs\n") @trap def bad_collection_exits_nonzero(self): result = run("inv -c nope -l", warn=True) assert result.exited == 1 assert not result.stdout assert result.stderr @trap def package_style_collections_internally_importable(self): # After merging #919 blew this up and unit tests did not detect! result = run("cd package && inv -l") assert "mytask" in result.stdout def loads_real_user_config(self): path = os.path.expanduser("~/.invoke.yaml") try: with open(path, "w") as fd: fd.write("foo: bar") _output_eq("inv print-config", "bar\n") finally: try: os.unlink(path) except OSError: pass @trap def invocable_via_python_dash_m(self): _output_eq( "python -m invoke print-name --name mainline", "mainline\n" ) class funky_characters_in_stdout: @only_utf8 def basic_nonstandard_characters(self): # Crummy "doesn't explode with decode errors" test cmd = ("type" if WINDOWS else "cat") + " tree.out" run(cmd, hide="stderr") @only_utf8 def nonprinting_bytes(self): # Seriously non-printing characters (i.e. non UTF8) also don't # asplode (they would print as escapes normally, but still) run("echo '\xff'", hide="stderr") @only_utf8 def nonprinting_bytes_pty(self): if WINDOWS: return # PTY use adds another utf-8 decode spot which can also fail. run("echo '\xff'", pty=True, hide="stderr") class ptys: def complex_nesting_under_ptys_doesnt_break(self): if WINDOWS: # Not sure how to make this work on Windows return # GH issue 191 substr = " hello\t\t\nworld with spaces" cmd = """ eval 'echo "{}" ' """.format(substr) expected = " hello\t\t\r\nworld with spaces\r\n" assert run(cmd, pty=True, hide="both").stdout == expected def pty_puts_both_streams_in_stdout(self): if WINDOWS: return err_echo = "{} err.py".format(sys.executable) command = "echo foo && {} bar".format(err_echo) r = run(command, hide="both", pty=True) assert r.stdout == "foo\r\nbar\r\n" assert r.stderr == "" def simple_command_with_pty(self): """ Run command under PTY """ # Most Unix systems should have stty, which asplodes when not run # under a pty, and prints useful info otherwise result = run("stty -a", hide=True, pty=True) # PTYs use \r\n, not \n, line separation assert "\r\n" in result.stdout assert result.pty is True @pytest.mark.skip(reason="CircleCI env actually does have 0x0 stty") def pty_size_is_realistic(self): # When we don't explicitly set pty size, 'stty size' sees it as # 0x0. # When we do set it, it should be some non 0x0, non 80x24 (the # default) value. (yes, this means it fails if you really do have # an 80x24 terminal. but who does that?) size = run("stty size", hide=True, pty=True).stdout.strip() assert size != "" assert size != "0 0" assert size != "24 80" class parsing: def false_as_optional_arg_default_value_works_okay(self): # (Dis)proves #416. When bug present, parser gets very confused, # asks "what the hell is 'whee'?". See also a unit test for # Task.get_arguments. for argstr, expected in ( ("", "False"), ("--meh", "True"), ("--meh=whee", "whee"), ): _output_eq( "inv -c parsing foo {}".format(argstr), expected + "\n" ) invoke-2.2.0/integration/runners.py000066400000000000000000000116261445356551000174150ustar00rootroot00000000000000import os import platform import time from unittest.mock import Mock from pytest import skip, raises from invoke import ( run, Local, Context, ThreadException, Responder, FailingResponder, WatcherError, Failure, CommandTimedOut, ) from _util import assert_cpu_usage PYPY = platform.python_implementation() == "PyPy" class Runner_: def setup(self): os.chdir(os.path.join(os.path.dirname(__file__), "_support")) class responding: def base_case(self): # Basic "doesn't explode" test: respond.py will exit nonzero unless # this works, causing a Failure. watcher = Responder(r"What's the password\?", "Rosebud\n") # Gotta give -u or Python will line-buffer its stdout, so we'll # never actually see the prompt. run( "python -u respond_base.py", watchers=[watcher], hide=True, timeout=5, ) def both_streams(self): watchers = [ Responder("standard out", "with it\n"), Responder("standard error", "between chair and keyboard\n"), ] run( "python -u respond_both.py", watchers=watchers, hide=True, timeout=5, ) def watcher_errors_become_Failures(self): watcher = FailingResponder( pattern=r"What's the password\?", response="Rosebud\n", sentinel="You're not Citizen Kane!", ) try: run( "python -u respond_fail.py", watchers=[watcher], hide=True, timeout=5, ) except Failure as e: assert isinstance(e.reason, WatcherError) assert e.result.exited is None else: assert False, "Did not raise Failure!" class stdin_mirroring: def piped_stdin_is_not_conflated_with_mocked_stdin(self): # Re: GH issue #308 # Will die on broken-pipe OSError if bug is present. run("echo 'lollerskates' | inv -c nested_or_piped foo", hide=True) def nested_invoke_sessions_not_conflated_with_mocked_stdin(self): # Also re: GH issue #308. This one will just hang forever. Woo! run("inv -c nested_or_piped calls-foo", hide=True) def isnt_cpu_heavy(self): "stdin mirroring isn't CPU-heavy" # CPU measurement under PyPy is...rather different. NBD. if PYPY: skip() # Python 3.5 has been seen using up to ~6.0s CPU time under Travis with assert_cpu_usage(lt=7.0): run("python -u busywork.py 10", pty=True, hide=True) def doesnt_break_when_stdin_exists_but_null(self): # Re: #425 - IOError occurs when bug present run("inv -c nested_or_piped foo < /dev/null", hide=True) class IO_hangs: "IO hangs" def _hang_on_full_pipe(self, pty): class Whoops(Exception): pass runner = Local(Context()) # Force runner IO thread-body method to raise an exception to mimic # real world encoding explosions/etc. When bug is present, this # will make the test hang until forcibly terminated. runner.handle_stdout = Mock(side_effect=Whoops, __name__="sigh") # NOTE: both Darwin (10.10) and Linux (Travis' docker image) have # this file. It's plenty large enough to fill most pipe buffers, # which is the triggering behavior. try: runner.run("cat /usr/share/dict/words", pty=pty) except ThreadException as e: assert len(e.exceptions) == 1 assert e.exceptions[0].type is Whoops else: assert False, "Did not receive expected ThreadException!" def pty_subproc_should_not_hang_if_IO_thread_has_an_exception(self): self._hang_on_full_pipe(pty=True) def nonpty_subproc_should_not_hang_if_IO_thread_has_an_exception(self): self._hang_on_full_pipe(pty=False) class timeouts: def does_not_fire_when_command_quick(self): assert run("sleep 1", timeout=5) def triggers_exception_when_command_slow(self): before = time.time() with raises(CommandTimedOut) as info: run("sleep 5", timeout=0.5) after = time.time() # Fudge real time check a bit, <=0.5 typically fails due to # overhead etc. May need raising further to avoid races? Meh. assert (after - before) <= 0.75 # Sanity checks of the exception obj assert info.value.timeout == 0.5 assert info.value.result.command == "sleep 5" invoke-2.2.0/invoke/000077500000000000000000000000001445356551000143115ustar00rootroot00000000000000invoke-2.2.0/invoke/__init__.py000066400000000000000000000042651445356551000164310ustar00rootroot00000000000000from typing import Any, Optional from ._version import __version_info__, __version__ # noqa from .collection import Collection # noqa from .config import Config # noqa from .context import Context, MockContext # noqa from .exceptions import ( # noqa AmbiguousEnvVar, AuthFailure, CollectionNotFound, Exit, ParseError, PlatformError, ResponseNotAccepted, SubprocessPipeError, ThreadException, UncastableEnvVar, UnexpectedExit, UnknownFileType, UnpicklableConfigMember, WatcherError, CommandTimedOut, ) from .executor import Executor # noqa from .loader import FilesystemLoader # noqa from .parser import Argument, Parser, ParserContext, ParseResult # noqa from .program import Program # noqa from .runners import Runner, Local, Failure, Result, Promise # noqa from .tasks import task, call, Call, Task # noqa from .terminals import pty_size # noqa from .watchers import FailingResponder, Responder, StreamWatcher # noqa def run(command: str, **kwargs: Any) -> Optional[Result]: """ Run ``command`` in a subprocess and return a `.Result` object. See `.Runner.run` for API details. .. note:: This function is a convenience wrapper around Invoke's `.Context` and `.Runner` APIs. Specifically, it creates an anonymous `.Context` instance and calls its `~.Context.run` method, which in turn defaults to using a `.Local` runner subclass for command execution. .. versionadded:: 1.0 """ return Context().run(command, **kwargs) def sudo(command: str, **kwargs: Any) -> Optional[Result]: """ Run ``command`` in a ``sudo`` subprocess and return a `.Result` object. See `.Context.sudo` for API details, such as the ``password`` kwarg. .. note:: This function is a convenience wrapper around Invoke's `.Context` and `.Runner` APIs. Specifically, it creates an anonymous `.Context` instance and calls its `~.Context.sudo` method, which in turn defaults to using a `.Local` runner subclass for command execution (plus sudo-related bits & pieces). .. versionadded:: 1.4 """ return Context().sudo(command, **kwargs) invoke-2.2.0/invoke/__main__.py000066400000000000000000000000571445356551000164050ustar00rootroot00000000000000from invoke.main import program program.run() invoke-2.2.0/invoke/_version.py000066400000000000000000000001201445356551000165000ustar00rootroot00000000000000__version_info__ = (2, 2, 0) __version__ = ".".join(map(str, __version_info__)) invoke-2.2.0/invoke/collection.py000066400000000000000000000550241445356551000170240ustar00rootroot00000000000000import copy from types import ModuleType from typing import Any, Callable, Dict, List, Optional, Tuple from .util import Lexicon, helpline from .config import merge_dicts, copy_dict from .parser import Context as ParserContext from .tasks import Task class Collection: """ A collection of executable tasks. See :doc:`/concepts/namespaces`. .. versionadded:: 1.0 """ def __init__(self, *args: Any, **kwargs: Any) -> None: """ Create a new task collection/namespace. `.Collection` offers a set of methods for building a collection of tasks from scratch, plus a convenient constructor wrapping said API. In either case: * The first positional argument may be a string, which (if given) is used as the collection's default name when performing namespace lookups; * A ``loaded_from`` keyword argument may be given, which sets metadata indicating the filesystem path the collection was loaded from. This is used as a guide when loading per-project :ref:`configuration files `. * An ``auto_dash_names`` kwarg may be given, controlling whether task and collection names have underscores turned to dashes in most cases; it defaults to ``True`` but may be set to ``False`` to disable. The CLI machinery will pass in the value of the ``tasks.auto_dash_names`` config value to this kwarg. **The method approach** May initialize with no arguments and use methods (e.g. `.add_task`/`.add_collection`) to insert objects:: c = Collection() c.add_task(some_task) If an initial string argument is given, it is used as the default name for this collection, should it be inserted into another collection as a sub-namespace:: docs = Collection('docs') docs.add_task(doc_task) ns = Collection() ns.add_task(top_level_task) ns.add_collection(docs) # Valid identifiers are now 'top_level_task' and 'docs.doc_task' # (assuming the task objects were actually named the same as the # variables we're using :)) For details, see the API docs for the rest of the class. **The constructor approach** All ``*args`` given to `.Collection` (besides the abovementioned optional positional 'name' argument and ``loaded_from`` kwarg) are expected to be `.Task` or `.Collection` instances which will be passed to `.add_task`/`.add_collection` as appropriate. Module objects are also valid (as they are for `.add_collection`). For example, the below snippet results in the same two task identifiers as the one above:: ns = Collection(top_level_task, Collection('docs', doc_task)) If any ``**kwargs`` are given, the keywords are used as the initial name arguments for the respective values:: ns = Collection( top_level_task=some_other_task, docs=Collection(doc_task) ) That's exactly equivalent to:: docs = Collection(doc_task) ns = Collection() ns.add_task(some_other_task, 'top_level_task') ns.add_collection(docs, 'docs') See individual methods' API docs for details. """ # Initialize self.tasks = Lexicon() self.collections = Lexicon() self.default: Optional[str] = None self.name = None self._configuration: Dict[str, Any] = {} # Specific kwargs if applicable self.loaded_from = kwargs.pop("loaded_from", None) self.auto_dash_names = kwargs.pop("auto_dash_names", None) # splat-kwargs version of default value (auto_dash_names=True) if self.auto_dash_names is None: self.auto_dash_names = True # Name if applicable _args = list(args) if _args and isinstance(args[0], str): self.name = self.transform(_args.pop(0)) # Dispatch args/kwargs for arg in _args: self._add_object(arg) # Dispatch kwargs for name, obj in kwargs.items(): self._add_object(obj, name) def _add_object(self, obj: Any, name: Optional[str] = None) -> None: method: Callable if isinstance(obj, Task): method = self.add_task elif isinstance(obj, (Collection, ModuleType)): method = self.add_collection else: raise TypeError("No idea how to insert {!r}!".format(type(obj))) method(obj, name=name) def __repr__(self) -> str: task_names = list(self.tasks.keys()) collections = ["{}...".format(x) for x in self.collections.keys()] return "".format( self.name, ", ".join(sorted(task_names) + sorted(collections)) ) def __eq__(self, other: object) -> bool: if isinstance(other, Collection): return ( self.name == other.name and self.tasks == other.tasks and self.collections == other.collections ) return False def __bool__(self) -> bool: return bool(self.task_names) @classmethod def from_module( cls, module: ModuleType, name: Optional[str] = None, config: Optional[Dict[str, Any]] = None, loaded_from: Optional[str] = None, auto_dash_names: Optional[bool] = None, ) -> "Collection": """ Return a new `.Collection` created from ``module``. Inspects ``module`` for any `.Task` instances and adds them to a new `.Collection`, returning it. If any explicit namespace collections exist (named ``ns`` or ``namespace``) a copy of that collection object is preferentially loaded instead. When the implicit/default collection is generated, it will be named after the module's ``__name__`` attribute, or its last dotted section if it's a submodule. (I.e. it should usually map to the actual ``.py`` filename.) Explicitly given collections will only be given that module-derived name if they don't already have a valid ``.name`` attribute. If the module has a docstring (``__doc__``) it is copied onto the resulting `.Collection` (and used for display in help, list etc output.) :param str name: A string, which if given will override any automatically derived collection name (or name set on the module's root namespace, if it has one.) :param dict config: Used to set config options on the newly created `.Collection` before returning it (saving you a call to `.configure`.) If the imported module had a root namespace object, ``config`` is merged on top of it (i.e. overriding any conflicts.) :param str loaded_from: Identical to the same-named kwarg from the regular class constructor - should be the path where the module was found. :param bool auto_dash_names: Identical to the same-named kwarg from the regular class constructor - determines whether emitted names are auto-dashed. .. versionadded:: 1.0 """ module_name = module.__name__.split(".")[-1] def instantiate(obj_name: Optional[str] = None) -> "Collection": # Explicitly given name wins over root ns name (if applicable), # which wins over actual module name. args = [name or obj_name or module_name] kwargs = dict( loaded_from=loaded_from, auto_dash_names=auto_dash_names ) instance = cls(*args, **kwargs) instance.__doc__ = module.__doc__ return instance # See if the module provides a default NS to use in lieu of creating # our own collection. for candidate in ("ns", "namespace"): obj = getattr(module, candidate, None) if obj and isinstance(obj, Collection): # TODO: make this into Collection.clone() or similar? ret = instantiate(obj_name=obj.name) ret.tasks = ret._transform_lexicon(obj.tasks) ret.collections = ret._transform_lexicon(obj.collections) ret.default = ( ret.transform(obj.default) if obj.default else None ) # Explicitly given config wins over root ns config obj_config = copy_dict(obj._configuration) if config: merge_dicts(obj_config, config) ret._configuration = obj_config return ret # Failing that, make our own collection from the module's tasks. tasks = filter(lambda x: isinstance(x, Task), vars(module).values()) # Again, explicit name wins over implicit one from module path collection = instantiate() for task in tasks: collection.add_task(task) if config: collection.configure(config) return collection def add_task( self, task: "Task", name: Optional[str] = None, aliases: Optional[Tuple[str, ...]] = None, default: Optional[bool] = None, ) -> None: """ Add `.Task` ``task`` to this collection. :param task: The `.Task` object to add to this collection. :param name: Optional string name to bind to (overrides the task's own self-defined ``name`` attribute and/or any Python identifier (i.e. ``.func_name``.) :param aliases: Optional iterable of additional names to bind the task as, on top of the primary name. These will be used in addition to any aliases the task itself declares internally. :param default: Whether this task should be the collection default. .. versionadded:: 1.0 """ if name is None: if task.name: name = task.name # XXX https://github.com/python/mypy/issues/1424 elif hasattr(task.body, "func_name"): name = task.body.func_name # type: ignore elif hasattr(task.body, "__name__"): name = task.__name__ else: raise ValueError("Could not obtain a name for this task!") name = self.transform(name) if name in self.collections: err = "Name conflict: this collection has a sub-collection named {!r} already" # noqa raise ValueError(err.format(name)) self.tasks[name] = task for alias in list(task.aliases) + list(aliases or []): self.tasks.alias(self.transform(alias), to=name) if default is True or (default is None and task.is_default): self._check_default_collision(name) self.default = name def add_collection( self, coll: "Collection", name: Optional[str] = None, default: Optional[bool] = None, ) -> None: """ Add `.Collection` ``coll`` as a sub-collection of this one. :param coll: The `.Collection` to add. :param str name: The name to attach the collection as. Defaults to the collection's own internal name. :param default: Whether this sub-collection('s default task-or-collection) should be the default invocation of the parent collection. .. versionadded:: 1.0 .. versionchanged:: 1.5 Added the ``default`` parameter. """ # Handle module-as-collection if isinstance(coll, ModuleType): coll = Collection.from_module(coll) # Ensure we have a name, or die trying name = name or coll.name if not name: raise ValueError("Non-root collections must have a name!") name = self.transform(name) # Test for conflict if name in self.tasks: err = "Name conflict: this collection has a task named {!r} already" # noqa raise ValueError(err.format(name)) # Insert self.collections[name] = coll if default: self._check_default_collision(name) self.default = name def _check_default_collision(self, name: str) -> None: if self.default: msg = "'{}' cannot be the default because '{}' already is!" raise ValueError(msg.format(name, self.default)) def _split_path(self, path: str) -> Tuple[str, str]: """ Obtain first collection + remainder, of a task path. E.g. for ``"subcollection.taskname"``, return ``("subcollection", "taskname")``; for ``"subcollection.nested.taskname"`` return ``("subcollection", "nested.taskname")``, etc. An empty path becomes simply ``('', '')``. """ parts = path.split(".") coll = parts.pop(0) rest = ".".join(parts) return coll, rest def subcollection_from_path(self, path: str) -> "Collection": """ Given a ``path`` to a subcollection, return that subcollection. .. versionadded:: 1.0 """ parts = path.split(".") collection = self while parts: collection = collection.collections[parts.pop(0)] return collection def __getitem__(self, name: Optional[str] = None) -> Any: """ Returns task named ``name``. Honors aliases and subcollections. If this collection has a default task, it is returned when ``name`` is empty or ``None``. If empty input is given and no task has been selected as the default, ValueError will be raised. Tasks within subcollections should be given in dotted form, e.g. 'foo.bar'. Subcollection default tasks will be returned on the subcollection's name. .. versionadded:: 1.0 """ return self.task_with_config(name)[0] def _task_with_merged_config( self, coll: str, rest: str, ours: Dict[str, Any] ) -> Tuple[str, Dict[str, Any]]: task, config = self.collections[coll].task_with_config(rest) return task, dict(config, **ours) def task_with_config( self, name: Optional[str] ) -> Tuple[str, Dict[str, Any]]: """ Return task named ``name`` plus its configuration dict. E.g. in a deeply nested tree, this method returns the `.Task`, and a configuration dict created by merging that of this `.Collection` and any nested `Collections <.Collection>`, up through the one actually holding the `.Task`. See `~.Collection.__getitem__` for semantics of the ``name`` argument. :returns: Two-tuple of (`.Task`, `dict`). .. versionadded:: 1.0 """ # Our top level configuration ours = self.configuration() # Default task for this collection itself if not name: if not self.default: raise ValueError("This collection has no default task.") return self[self.default], ours # Normalize name to the format we're expecting name = self.transform(name) # Non-default tasks within subcollections -> recurse (sorta) if "." in name: coll, rest = self._split_path(name) return self._task_with_merged_config(coll, rest, ours) # Default task for subcollections (via empty-name lookup) if name in self.collections: return self._task_with_merged_config(name, "", ours) # Regular task lookup return self.tasks[name], ours def __contains__(self, name: str) -> bool: try: self[name] return True except KeyError: return False def to_contexts( self, ignore_unknown_help: Optional[bool] = None ) -> List[ParserContext]: """ Returns all contained tasks and subtasks as a list of parser contexts. :param bool ignore_unknown_help: Passed on to each task's ``get_arguments()`` method. See the config option by the same name for details. .. versionadded:: 1.0 .. versionchanged:: 1.7 Added the ``ignore_unknown_help`` kwarg. """ result = [] for primary, aliases in self.task_names.items(): task = self[primary] result.append( ParserContext( name=primary, aliases=aliases, args=task.get_arguments( ignore_unknown_help=ignore_unknown_help ), ) ) return result def subtask_name(self, collection_name: str, task_name: str) -> str: return ".".join( [self.transform(collection_name), self.transform(task_name)] ) def transform(self, name: str) -> str: """ Transform ``name`` with the configured auto-dashes behavior. If the collection's ``auto_dash_names`` attribute is ``True`` (default), all non leading/trailing underscores are turned into dashes. (Leading/trailing underscores tend to get stripped elsewhere in the stack.) If it is ``False``, the inverse is applied - all dashes are turned into underscores. .. versionadded:: 1.0 """ # Short-circuit on anything non-applicable, e.g. empty strings, bools, # None, etc. if not name: return name from_, to = "_", "-" if not self.auto_dash_names: from_, to = "-", "_" replaced = [] end = len(name) - 1 for i, char in enumerate(name): # Don't replace leading or trailing underscores (+ taking dotted # names into account) # TODO: not 100% convinced of this / it may be exposing a # discrepancy between this level & higher levels which tend to # strip out leading/trailing underscores entirely. if ( i not in (0, end) and char == from_ and name[i - 1] != "." and name[i + 1] != "." ): char = to replaced.append(char) return "".join(replaced) def _transform_lexicon(self, old: Lexicon) -> Lexicon: """ Take a Lexicon and apply `.transform` to its keys and aliases. :returns: A new Lexicon. """ new = Lexicon() # Lexicons exhibit only their real keys in most places, so this will # only grab those, not aliases. for key, value in old.items(): # Deepcopy the value so we're not just copying a reference new[self.transform(key)] = copy.deepcopy(value) # Also copy all aliases, which are string-to-string key mappings for key, value in old.aliases.items(): new.alias(from_=self.transform(key), to=self.transform(value)) return new @property def task_names(self) -> Dict[str, List[str]]: """ Return all task identifiers for this collection as a one-level dict. Specifically, a dict with the primary/"real" task names as the key, and any aliases as a list value. It basically collapses the namespace tree into a single easily-scannable collection of invocation strings, and is thus suitable for things like flat-style task listings or transformation into parser contexts. .. versionadded:: 1.0 """ ret = {} # Our own tasks get no prefix, just go in as-is: {name: [aliases]} for name, task in self.tasks.items(): ret[name] = list(map(self.transform, task.aliases)) # Subcollection tasks get both name + aliases prefixed for coll_name, coll in self.collections.items(): for task_name, aliases in coll.task_names.items(): aliases = list( map(lambda x: self.subtask_name(coll_name, x), aliases) ) # Tack on collection name to alias list if this task is the # collection's default. if coll.default == task_name: aliases += (coll_name,) ret[self.subtask_name(coll_name, task_name)] = aliases return ret def configuration(self, taskpath: Optional[str] = None) -> Dict[str, Any]: """ Obtain merged configuration values from collection & children. :param taskpath: (Optional) Task name/path, identical to that used for `~.Collection.__getitem__` (e.g. may be dotted for nested tasks, etc.) Used to decide which path to follow in the collection tree when merging config values. :returns: A `dict` containing configuration values. .. versionadded:: 1.0 """ if taskpath is None: return copy_dict(self._configuration) return self.task_with_config(taskpath)[1] def configure(self, options: Dict[str, Any]) -> None: """ (Recursively) merge ``options`` into the current `.configuration`. Options configured this way will be available to all tasks. It is recommended to use unique keys to avoid potential clashes with other config options For example, if you were configuring a Sphinx docs build target directory, it's better to use a key like ``'sphinx.target'`` than simply ``'target'``. :param options: An object implementing the dictionary protocol. :returns: ``None``. .. versionadded:: 1.0 """ merge_dicts(self._configuration, options) def serialized(self) -> Dict[str, Any]: """ Return an appropriate-for-serialization version of this object. See the documentation for `.Program` and its ``json`` task listing format; this method is the driver for that functionality. .. versionadded:: 1.0 """ return { "name": self.name, "help": helpline(self), "default": self.default, "tasks": [ { "name": self.transform(x.name), "help": helpline(x), "aliases": [self.transform(y) for y in x.aliases], } for x in sorted(self.tasks.values(), key=lambda x: x.name) ], "collections": [ x.serialized() for x in sorted( self.collections.values(), key=lambda x: x.name or "" ) ], } invoke-2.2.0/invoke/completion/000077500000000000000000000000001445356551000164625ustar00rootroot00000000000000invoke-2.2.0/invoke/completion/__init__.py000066400000000000000000000000001445356551000205610ustar00rootroot00000000000000invoke-2.2.0/invoke/completion/bash.completion000066400000000000000000000025141445356551000214740ustar00rootroot00000000000000# Invoke tab-completion script to be sourced with Bash shell. # Known to work on Bash 3.x, untested on 4.x. _complete_{binary}() {{ local candidates # COMP_WORDS contains the entire command string up til now (including # program name). # We hand it to Invoke so it can figure out the current context: spit back # core options, task names, the current task's options, or some combo. candidates=`{binary} --complete -- ${{COMP_WORDS[*]}}` # `compgen -W` takes list of valid options & a partial word & spits back # possible matches. Necessary for any partial word completions (vs # completions performed when no partial words are present). # # $2 is the current word or token being tabbed on, either empty string or a # partial word, and thus wants to be compgen'd to arrive at some subset of # our candidate list which actually matches. # # COMPREPLY is the list of valid completions handed back to `complete`. COMPREPLY=( $(compgen -W "${{candidates}}" -- $2) ) }} # Tell shell builtin to use the above for completing our invocations. # * -F: use given function name to generate completions. # * -o default: when function generates no results, use filenames. # * positional args: program names to complete for. complete -F _complete_{binary} -o default {spaced_names} # vim: set ft=sh : invoke-2.2.0/invoke/completion/complete.py000066400000000000000000000121461445356551000206500ustar00rootroot00000000000000""" Command-line completion mechanisms, executed by the core ``--complete`` flag. """ from typing import List import glob import os import re import shlex from typing import TYPE_CHECKING from ..exceptions import Exit, ParseError from ..util import debug, task_name_sort_key if TYPE_CHECKING: from ..collection import Collection from ..parser import Parser, ParseResult, ParserContext def complete( names: List[str], core: "ParseResult", initial_context: "ParserContext", collection: "Collection", parser: "Parser", ) -> Exit: # Strip out program name (scripts give us full command line) # TODO: this may not handle path/to/script though? invocation = re.sub(r"^({}) ".format("|".join(names)), "", core.remainder) debug("Completing for invocation: {!r}".format(invocation)) # Tokenize (shlex will have to do) tokens = shlex.split(invocation) # Handle flags (partial or otherwise) if tokens and tokens[-1].startswith("-"): tail = tokens[-1] debug("Invocation's tail {!r} is flag-like".format(tail)) # Gently parse invocation to obtain 'current' context. # Use last seen context in case of failure (required for # otherwise-invalid partial invocations being completed). contexts: List[ParserContext] try: debug("Seeking context name in tokens: {!r}".format(tokens)) contexts = parser.parse_argv(tokens) except ParseError as e: msg = "Got parser error ({!r}), grabbing its last-seen context {!r}" # noqa debug(msg.format(e, e.context)) contexts = [e.context] if e.context is not None else [] # Fall back to core context if no context seen. debug("Parsed invocation, contexts: {!r}".format(contexts)) if not contexts or not contexts[-1]: context = initial_context else: context = contexts[-1] debug("Selected context: {!r}".format(context)) # Unknown flags (could be e.g. only partially typed out; could be # wholly invalid; doesn't matter) complete with flags. debug("Looking for {!r} in {!r}".format(tail, context.flags)) if tail not in context.flags: debug("Not found, completing with flag names") # Long flags - partial or just the dashes - complete w/ long flags if tail.startswith("--"): for name in filter( lambda x: x.startswith("--"), context.flag_names() ): print(name) # Just a dash, completes with all flags elif tail == "-": for name in context.flag_names(): print(name) # Otherwise, it's something entirely invalid (a shortflag not # recognized, or a java style flag like -foo) so return nothing # (the shell will still try completing with files, but that doesn't # hurt really.) else: pass # Known flags complete w/ nothing or tasks, depending else: # Flags expecting values: do nothing, to let default (usually # file) shell completion occur (which we actively want in this # case.) if context.flags[tail].takes_value: debug("Found, and it takes a value, so no completion") pass # Not taking values (eg bools): print task names else: debug("Found, takes no value, printing task names") print_task_names(collection) # If not a flag, is either task name or a flag value, so just complete # task names. else: debug("Last token isn't flag-like, just printing task names") print_task_names(collection) raise Exit def print_task_names(collection: "Collection") -> None: for name in sorted(collection.task_names, key=task_name_sort_key): print(name) # Just stick aliases after the thing they're aliased to. Sorting isn't # so important that it's worth bending over backwards here. for alias in collection.task_names[name]: print(alias) def print_completion_script(shell: str, names: List[str]) -> None: # Grab all .completion files in invoke/completion/. (These used to have no # suffix, but surprise, that's super fragile. completions = { os.path.splitext(os.path.basename(x))[0]: x for x in glob.glob( os.path.join( os.path.dirname(os.path.realpath(__file__)), "*.completion" ) ) } try: path = completions[shell] except KeyError: err = 'Completion for shell "{}" not supported (options are: {}).' raise ParseError(err.format(shell, ", ".join(sorted(completions)))) debug("Printing completion script from {}".format(path)) # Choose one arbitrary program name for script's own internal invocation # (also used to construct completion function names when necessary) binary = names[0] with open(path, "r") as script: print( script.read().format(binary=binary, spaced_names=" ".join(names)) ) invoke-2.2.0/invoke/completion/fish.completion000066400000000000000000000005761445356551000215160ustar00rootroot00000000000000# Invoke tab-completion script for the fish shell # Copy it to the ~/.config/fish/completions directory function __complete_{binary} {binary} --complete -- (commandline --tokenize) end # --no-files: Don't complete files unless invoke gives an empty result # TODO: find a way to honor all binary_names complete --command {binary} --no-files --arguments '(__complete_{binary})' invoke-2.2.0/invoke/completion/zsh.completion000066400000000000000000000026251445356551000213660ustar00rootroot00000000000000# Invoke tab-completion script to be sourced with the Z shell. # Known to work on zsh 5.0.x, probably works on later 4.x releases as well (as # it uses the older compctl completion system). _complete_{binary}() {{ # `words` contains the entire command string up til now (including # program name). # # We hand it to Invoke so it can figure out the current context: spit back # core options, task names, the current task's options, or some combo. # # Before doing so, we attempt to tease out any collection flag+arg so we # can ensure it is applied correctly. collection_arg='' if [[ "${{words}}" =~ "(-c|--collection) [^ ]+" ]]; then collection_arg=$MATCH fi # `reply` is the array of valid completions handed back to `compctl`. # Use ${{=...}} to force whitespace splitting in expansion of # $collection_arg reply=( $({binary} ${{=collection_arg}} --complete -- ${{words}}) ) }} # Tell shell builtin to use the above for completing our given binary name(s). # * -K: use given function name to generate completions. # * +: specifies 'alternative' completion, where options after the '+' are only # used if the completion from the options before the '+' result in no matches. # * -f: when function generates no results, use filenames. # * positional args: program names to complete for. compctl -K _complete_{binary} + -f {spaced_names} # vim: set ft=sh : invoke-2.2.0/invoke/config.py000066400000000000000000001407651445356551000161450ustar00rootroot00000000000000import copy import json import os import types from importlib.util import spec_from_loader from os import PathLike from os.path import join, splitext, expanduser from types import ModuleType from typing import Any, Dict, Iterator, Optional, Tuple, Type, Union from .env import Environment from .exceptions import UnknownFileType, UnpicklableConfigMember from .runners import Local from .terminals import WINDOWS from .util import debug, yaml try: from importlib.machinery import SourceFileLoader except ImportError: # PyPy3 from importlib._bootstrap import ( # type: ignore[no-redef] _SourceFileLoader as SourceFileLoader, ) def load_source(name: str, path: str) -> Dict[str, Any]: if not os.path.exists(path): return {} loader = SourceFileLoader("mod", path) mod = ModuleType("mod") mod.__spec__ = spec_from_loader("mod", loader) loader.exec_module(mod) return vars(mod) class DataProxy: """ Helper class implementing nested dict+attr access for `.Config`. Specifically, is used both for `.Config` itself, and to wrap any other dicts assigned as config values (recursively). .. warning:: All methods (of this object or in subclasses) must take care to initialize new attributes via ``self._set(name='value')``, or they'll run into recursion errors! .. versionadded:: 1.0 """ # Attributes which get proxied through to inner merged-dict config obj. _proxies = ( tuple( """ get has_key items iteritems iterkeys itervalues keys values """.split() ) + tuple( "__{}__".format(x) for x in """ cmp contains iter sizeof """.split() ) ) @classmethod def from_data( cls, data: Dict[str, Any], root: Optional["DataProxy"] = None, keypath: Tuple[str, ...] = tuple(), ) -> "DataProxy": """ Alternate constructor for 'baby' DataProxies used as sub-dict values. Allows creating standalone DataProxy objects while also letting subclasses like `.Config` define their own ``__init__`` without muddling the two. :param dict data: This particular DataProxy's personal data. Required, it's the Data being Proxied. :param root: Optional handle on a root DataProxy/Config which needs notification on data updates. :param tuple keypath: Optional tuple describing the path of keys leading to this DataProxy's location inside the ``root`` structure. Required if ``root`` was given (and vice versa.) .. versionadded:: 1.0 """ obj = cls() obj._set(_config=data) obj._set(_root=root) obj._set(_keypath=keypath) return obj def __getattr__(self, key: str) -> Any: # NOTE: due to default Python attribute-lookup semantics, "real" # attributes will always be yielded on attribute access and this method # is skipped. That behavior is good for us (it's more intuitive than # having a config key accidentally shadow a real attribute or method). try: return self._get(key) except KeyError: # Proxy most special vars to config for dict procotol. if key in self._proxies: return getattr(self._config, key) # Otherwise, raise useful AttributeError to follow getattr proto. err = "No attribute or config key found for {!r}".format(key) attrs = [x for x in dir(self.__class__) if not x.startswith("_")] err += "\n\nValid keys: {!r}".format( sorted(list(self._config.keys())) ) err += "\n\nValid real attributes: {!r}".format(attrs) raise AttributeError(err) def __setattr__(self, key: str, value: Any) -> None: # Turn attribute-sets into config updates anytime we don't have a real # attribute with the given name/key. has_real_attr = key in dir(self) if not has_real_attr: # Make sure to trigger our own __setitem__ instead of going direct # to our internal dict/cache self[key] = value else: super().__setattr__(key, value) def __iter__(self) -> Iterator[Dict[str, Any]]: # For some reason Python is ignoring our __hasattr__ when determining # whether we support __iter__. BOO return iter(self._config) def __eq__(self, other: object) -> bool: # NOTE: Can't proxy __eq__ because the RHS will always be an obj of the # current class, not the proxied-to class, and that causes # NotImplemented. # Try comparing to other objects like ourselves, falling back to a not # very comparable value (None) so comparison fails. other_val = getattr(other, "_config", None) # But we can compare to vanilla dicts just fine, since our _config is # itself just a dict. if isinstance(other, dict): other_val = other return bool(self._config == other_val) def __len__(self) -> int: return len(self._config) def __setitem__(self, key: str, value: str) -> None: self._config[key] = value self._track_modification_of(key, value) def __getitem__(self, key: str) -> Any: return self._get(key) def _get(self, key: str) -> Any: # Short-circuit if pickling/copying mechanisms are asking if we've got # __setstate__ etc; they'll ask this w/o calling our __init__ first, so # we'd be in a RecursionError-causing catch-22 otherwise. if key in ("__setstate__",): raise AttributeError(key) # At this point we should be able to assume a self._config... value = self._config[key] if isinstance(value, dict): # New object's keypath is simply the key, prepended with our own # keypath if we've got one. keypath = (key,) if hasattr(self, "_keypath"): keypath = self._keypath + keypath # If we have no _root, we must be the root, so it's us. Otherwise, # pass along our handle on the root. root = getattr(self, "_root", self) value = DataProxy.from_data(data=value, root=root, keypath=keypath) return value def _set(self, *args: Any, **kwargs: Any) -> None: """ Convenience workaround of default 'attrs are config keys' behavior. Uses `object.__setattr__` to work around the class' normal proxying behavior, but is less verbose than using that directly. Has two modes (which may be combined if you really want): - ``self._set('attrname', value)``, just like ``__setattr__`` - ``self._set(attname=value)`` (i.e. kwargs), even less typing. """ if args: object.__setattr__(self, *args) for key, value in kwargs.items(): object.__setattr__(self, key, value) def __repr__(self) -> str: return "<{}: {}>".format(self.__class__.__name__, self._config) def __contains__(self, key: str) -> bool: return key in self._config @property def _is_leaf(self) -> bool: return hasattr(self, "_root") @property def _is_root(self) -> bool: return hasattr(self, "_modify") def _track_removal_of(self, key: str) -> None: # Grab the root object responsible for tracking removals; either the # referenced root (if we're a leaf) or ourselves (if we're not). # (Intermediate nodes never have anything but __getitem__ called on # them, otherwise they're by definition being treated as a leaf.) target = None if self._is_leaf: target = self._root elif self._is_root: target = self if target is not None: target._remove(getattr(self, "_keypath", tuple()), key) def _track_modification_of(self, key: str, value: str) -> None: target = None if self._is_leaf: target = self._root elif self._is_root: target = self if target is not None: target._modify(getattr(self, "_keypath", tuple()), key, value) def __delitem__(self, key: str) -> None: del self._config[key] self._track_removal_of(key) def __delattr__(self, name: str) -> None: # Make sure we don't screw up true attribute deletion for the # situations that actually want it. (Uncommon, but not rare.) if name in self: del self[name] else: object.__delattr__(self, name) def clear(self) -> None: keys = list(self.keys()) for key in keys: del self[key] def pop(self, *args: Any) -> Any: # Must test this up front before (possibly) mutating self._config key_existed = args and args[0] in self._config # We always have a _config (whether it's a real dict or a cache of # merged levels) so we can fall back to it for all the corner case # handling re: args (arity, handling a default, raising KeyError, etc) ret = self._config.pop(*args) # If it looks like no popping occurred (key wasn't there), presumably # user gave default, so we can short-circuit return here - no need to # track a deletion that did not happen. if not key_existed: return ret # Here, we can assume at least the 1st posarg (key) existed. self._track_removal_of(args[0]) # In all cases, return the popped value. return ret def popitem(self) -> Any: ret = self._config.popitem() self._track_removal_of(ret[0]) return ret def setdefault(self, *args: Any) -> Any: # Must test up front whether the key existed beforehand key_existed = args and args[0] in self._config # Run locally ret = self._config.setdefault(*args) # Key already existed -> nothing was mutated, short-circuit if key_existed: return ret # Here, we can assume the key did not exist and thus user must have # supplied a 'default' (if they did not, the real setdefault() above # would have excepted.) key, default = args self._track_modification_of(key, default) return ret def update(self, *args: Any, **kwargs: Any) -> None: if kwargs: for key, value in kwargs.items(): self[key] = value elif args: # TODO: complain if arity>1 arg = args[0] if isinstance(arg, dict): for key in arg: self[key] = arg[key] else: # TODO: be stricter about input in this case for pair in arg: self[pair[0]] = pair[1] class Config(DataProxy): """ Invoke's primary configuration handling class. See :doc:`/concepts/configuration` for details on the configuration system this class implements, including the :ref:`configuration hierarchy `. The rest of this class' documentation assumes familiarity with that document. **Access** Configuration values may be accessed and/or updated using dict syntax:: config['foo'] or attribute syntax:: config.foo Nesting works the same way - dict config values are turned into objects which honor both the dictionary protocol and the attribute-access method:: config['foo']['bar'] config.foo.bar **A note about attribute access and methods** This class implements the entire dictionary protocol: methods such as ``keys``, ``values``, ``items``, ``pop`` and so forth should all function as they do on regular dicts. It also implements new config-specific methods such as `load_system`, `load_collection`, `merge`, `clone`, etc. .. warning:: Accordingly, this means that if you have configuration options sharing names with these methods, you **must** use dictionary syntax (e.g. ``myconfig['keys']``) to access the configuration data. **Lifecycle** At initialization time, `.Config`: - creates per-level data structures; - stores any levels supplied to `__init__`, such as defaults or overrides, as well as the various config file paths/filename patterns; - and loads config files, if found (though typically this just means system and user-level files, as project and runtime files need more info before they can be found and loaded.) - This step can be skipped by specifying ``lazy=True``. At this point, `.Config` is fully usable - and because it pre-emptively loads some config files, those config files can affect anything that comes after, like CLI parsing or loading of task collections. In the CLI use case, further processing is done after instantiation, using the ``load_*`` methods such as `load_overrides`, `load_project`, etc: - the result of argument/option parsing is applied to the overrides level; - a project-level config file is loaded, as it's dependent on a loaded tasks collection; - a runtime config file is loaded, if its flag was supplied; - then, for each task being executed: - per-collection data is loaded (only possible now that we have collection & task in hand); - shell environment data is loaded (must be done at end of process due to using the rest of the config as a guide for interpreting env var names.) At this point, the config object is handed to the task being executed, as part of its execution `.Context`. Any modifications made directly to the `.Config` itself after this point end up stored in their own (topmost) config level, making it easier to debug final values. Finally, any *deletions* made to the `.Config` (e.g. applications of dict-style mutators like ``pop``, ``clear`` etc) are also tracked in their own structure, allowing the config object to honor such method calls without mutating the underlying source data. **Special class attributes** The following class-level attributes are used for low-level configuration of the config system itself, such as which file paths to load. They are primarily intended for overriding by subclasses. - ``prefix``: Supplies the default value for ``file_prefix`` (directly) and ``env_prefix`` (uppercased). See their descriptions for details. Its default value is ``"invoke"``. - ``file_prefix``: The config file 'basename' default (though it is not a literal basename; it can contain path parts if desired) which is appended to the configured values of ``system_prefix``, ``user_prefix``, etc, to arrive at the final (pre-extension) file paths. Thus, by default, a system-level config file path concatenates the ``system_prefix`` of ``/etc/`` with the ``file_prefix`` of ``invoke`` to arrive at paths like ``/etc/invoke.json``. Defaults to ``None``, meaning to use the value of ``prefix``. - ``env_prefix``: A prefix used (along with a joining underscore) to determine which environment variables are loaded as the env var configuration level. Since its default is the value of ``prefix`` capitalized, this means env vars like ``INVOKE_RUN_ECHO`` are sought by default. Defaults to ``None``, meaning to use the value of ``prefix``. .. versionadded:: 1.0 """ prefix = "invoke" file_prefix = None env_prefix = None @staticmethod def global_defaults() -> Dict[str, Any]: """ Return the core default settings for Invoke. Generally only for use by `.Config` internals. For descriptions of these values, see :ref:`default-values`. Subclasses may choose to override this method, calling ``Config.global_defaults`` and applying `.merge_dicts` to the result, to add to or modify these values. .. versionadded:: 1.0 """ # On Windows, which won't have /bin/bash, check for a set COMSPEC env # var (https://en.wikipedia.org/wiki/COMSPEC) or fallback to an # unqualified cmd.exe otherwise. if WINDOWS: shell = os.environ.get("COMSPEC", "cmd.exe") # Else, assume Unix, most distros of which have /bin/bash available. # TODO: consider an automatic fallback to /bin/sh for systems lacking # /bin/bash; however users may configure run.shell quite easily, so... else: shell = "/bin/bash" return { # TODO: we document 'debug' but it's not truly implemented outside # of env var and CLI flag. If we honor it, we have to go around and # figure out at what points we might want to call # `util.enable_logging`: # - just using it as a fallback default for arg parsing isn't much # use, as at that point the config holds nothing but defaults & CLI # flag values # - doing it at file load time might be somewhat useful, though # where this happens may be subject to change soon # - doing it at env var load time seems a bit silly given the # existing support for at-startup testing for INVOKE_DEBUG # 'debug': False, # TODO: I feel like we want these to be more consistent re: default # values stored here vs 'stored' as logic where they are # referenced, there are probably some bits that are all "if None -> # default" that could go here. Alternately, make _more_ of these # default to None? "run": { "asynchronous": False, "disown": False, "dry": False, "echo": False, "echo_stdin": None, "encoding": None, "env": {}, "err_stream": None, "fallback": True, "hide": None, "in_stream": None, "out_stream": None, "echo_format": "\033[1;37m{command}\033[0m", "pty": False, "replace_env": False, "shell": shell, "warn": False, "watchers": [], }, # This doesn't live inside the 'run' tree; otherwise it'd make it # somewhat harder to extend/override in Fabric 2 which has a split # local/remote runner situation. "runners": {"local": Local}, "sudo": { "password": None, "prompt": "[sudo] password: ", "user": None, }, "tasks": { "auto_dash_names": True, "collection_name": "tasks", "dedupe": True, "executor_class": None, "ignore_unknown_help": False, "search_root": None, }, "timeouts": {"command": None}, } def __init__( self, overrides: Optional[Dict[str, Any]] = None, defaults: Optional[Dict[str, Any]] = None, system_prefix: Optional[str] = None, user_prefix: Optional[str] = None, project_location: Optional[PathLike] = None, runtime_path: Optional[PathLike] = None, lazy: bool = False, ): """ Creates a new config object. :param dict defaults: A dict containing default (lowest level) config data. Default: `global_defaults`. :param dict overrides: A dict containing override-level config data. Default: ``{}``. :param str system_prefix: Base path for the global config file location; combined with the prefix and file suffixes to arrive at final file path candidates. Default: ``/etc/`` (thus e.g. ``/etc/invoke.yaml`` or ``/etc/invoke.json``). :param str user_prefix: Like ``system_prefix`` but for the per-user config file. These variables are joined as strings, not via path-style joins, so they may contain partial file paths; for the per-user config file this often means a leading dot, to make the final result a hidden file on most systems. Default: ``~/.`` (e.g. ``~/.invoke.yaml``). :param str project_location: Optional directory path of the currently loaded `.Collection` (as loaded by `.Loader`). When non-empty, will trigger seeking of per-project config files in this directory. :param str runtime_path: Optional file path to a runtime configuration file. Used to fill the penultimate slot in the config hierarchy. Should be a full file path to an existing file, not a directory path or a prefix. :param bool lazy: Whether to automatically load some of the lower config levels. By default (``lazy=False``), ``__init__`` automatically calls `load_system` and `load_user` to load system and user config files, respectively. For more control over what is loaded when, you can say ``lazy=True``, and no automatic loading is done. .. note:: If you give ``defaults`` and/or ``overrides`` as ``__init__`` kwargs instead of waiting to use `load_defaults` or `load_overrides` afterwards, those *will* still end up 'loaded' immediately. """ # Technically an implementation detail - do not expose in public API. # Stores merged configs and is accessed via DataProxy. self._set(_config={}) # Config file suffixes to search, in preference order. self._set(_file_suffixes=("yaml", "yml", "json", "py")) # Default configuration values, typically a copy of `global_defaults`. if defaults is None: defaults = copy_dict(self.global_defaults()) self._set(_defaults=defaults) # Collection-driven config data, gathered from the collection tree # containing the currently executing task. self._set(_collection={}) # Path prefix searched for the system config file. # NOTE: There is no default system prefix on Windows. if system_prefix is None and not WINDOWS: system_prefix = "/etc/" self._set(_system_prefix=system_prefix) # Path to loaded system config file, if any. self._set(_system_path=None) # Whether the system config file has been loaded or not (or ``None`` if # no loading has been attempted yet.) self._set(_system_found=None) # Data loaded from the system config file. self._set(_system={}) # Path prefix searched for per-user config files. if user_prefix is None: user_prefix = "~/." self._set(_user_prefix=user_prefix) # Path to loaded user config file, if any. self._set(_user_path=None) # Whether the user config file has been loaded or not (or ``None`` if # no loading has been attempted yet.) self._set(_user_found=None) # Data loaded from the per-user config file. self._set(_user={}) # As it may want to be set post-init, project conf file related attrs # get initialized or overwritten via a specific method. self.set_project_location(project_location) # Environment variable name prefix env_prefix = self.env_prefix if env_prefix is None: env_prefix = self.prefix env_prefix = "{}_".format(env_prefix.upper()) self._set(_env_prefix=env_prefix) # Config data loaded from the shell environment. self._set(_env={}) # As it may want to be set post-init, runtime conf file related attrs # get initialized or overwritten via a specific method. self.set_runtime_path(runtime_path) # Overrides - highest normal config level. Typically filled in from # command-line flags. if overrides is None: overrides = {} self._set(_overrides=overrides) # Absolute highest level: user modifications. self._set(_modifications={}) # And its sibling: user deletions. (stored as a flat dict of keypath # keys and dummy values, for constant-time membership testing/removal # w/ no messy recursion. TODO: maybe redo _everything_ that way? in # _modifications and other levels, the values would of course be # valuable and not just None) self._set(_deletions={}) # Convenience loading of user and system files, since those require no # other levels in order to function. if not lazy: self.load_base_conf_files() # Always merge, otherwise defaults, etc are not usable until creator or # a subroutine does so. self.merge() def load_base_conf_files(self) -> None: # Just a refactor of something done in unlazy init or in clone() self.load_system(merge=False) self.load_user(merge=False) def load_defaults(self, data: Dict[str, Any], merge: bool = True) -> None: """ Set or replace the 'defaults' configuration level, from ``data``. :param dict data: The config data to load as the defaults level. :param bool merge: Whether to merge the loaded data into the central config. Default: ``True``. :returns: ``None``. .. versionadded:: 1.0 """ self._set(_defaults=data) if merge: self.merge() def load_overrides(self, data: Dict[str, Any], merge: bool = True) -> None: """ Set or replace the 'overrides' configuration level, from ``data``. :param dict data: The config data to load as the overrides level. :param bool merge: Whether to merge the loaded data into the central config. Default: ``True``. :returns: ``None``. .. versionadded:: 1.0 """ self._set(_overrides=data) if merge: self.merge() def load_system(self, merge: bool = True) -> None: """ Load a system-level config file, if possible. Checks the configured ``_system_prefix`` path, which defaults to ``/etc``, and will thus load files like ``/etc/invoke.yml``. :param bool merge: Whether to merge the loaded data into the central config. Default: ``True``. :returns: ``None``. .. versionadded:: 1.0 """ self._load_file(prefix="system", merge=merge) def load_user(self, merge: bool = True) -> None: """ Load a user-level config file, if possible. Checks the configured ``_user_prefix`` path, which defaults to ``~/.``, and will thus load files like ``~/.invoke.yml``. :param bool merge: Whether to merge the loaded data into the central config. Default: ``True``. :returns: ``None``. .. versionadded:: 1.0 """ self._load_file(prefix="user", merge=merge) def load_project(self, merge: bool = True) -> None: """ Load a project-level config file, if possible. Checks the configured ``_project_prefix`` value derived from the path given to `set_project_location`, which is typically set to the directory containing the loaded task collection. Thus, if one were to run the CLI tool against a tasks collection ``/home/myuser/code/tasks.py``, `load_project` would seek out files like ``/home/myuser/code/invoke.yml``. :param bool merge: Whether to merge the loaded data into the central config. Default: ``True``. :returns: ``None``. .. versionadded:: 1.0 """ self._load_file(prefix="project", merge=merge) def set_runtime_path(self, path: Optional[PathLike]) -> None: """ Set the runtime config file path. .. versionadded:: 1.0 """ # Path to the user-specified runtime config file. self._set(_runtime_path=path) # Data loaded from the runtime config file. self._set(_runtime={}) # Whether the runtime config file has been loaded or not (or ``None`` # if no loading has been attempted yet.) self._set(_runtime_found=None) def load_runtime(self, merge: bool = True) -> None: """ Load a runtime-level config file, if one was specified. When the CLI framework creates a `Config`, it sets ``_runtime_path``, which is a full path to the requested config file. This method attempts to load that file. :param bool merge: Whether to merge the loaded data into the central config. Default: ``True``. :returns: ``None``. .. versionadded:: 1.0 """ self._load_file(prefix="runtime", absolute=True, merge=merge) def load_shell_env(self) -> None: """ Load values from the shell environment. `.load_shell_env` is intended for execution late in a `.Config` object's lifecycle, once all other sources (such as a runtime config file or per-collection configurations) have been loaded. Loading from the shell is not terrifically expensive, but must be done at a specific point in time to ensure the "only known config keys are loaded from the env" behavior works correctly. See :ref:`env-vars` for details on this design decision and other info re: how environment variables are scanned and loaded. .. versionadded:: 1.0 """ # Force merge of existing data to ensure we have an up to date picture debug("Running pre-merge for shell env loading...") self.merge() debug("Done with pre-merge.") loader = Environment(config=self._config, prefix=self._env_prefix) self._set(_env=loader.load()) debug("Loaded shell environment, triggering final merge") self.merge() def load_collection( self, data: Dict[str, Any], merge: bool = True ) -> None: """ Update collection-driven config data. `.load_collection` is intended for use by the core task execution machinery, which is responsible for obtaining collection-driven data. See :ref:`collection-configuration` for details. .. versionadded:: 1.0 """ debug("Loading collection configuration") self._set(_collection=data) if merge: self.merge() def set_project_location(self, path: Union[PathLike, str, None]) -> None: """ Set the directory path where a project-level config file may be found. Does not do any file loading on its own; for that, see `load_project`. .. versionadded:: 1.0 """ # 'Prefix' to match the other sets of attrs project_prefix = None if path is not None: # Ensure the prefix is normalized to a directory-like path string project_prefix = join(path, "") self._set(_project_prefix=project_prefix) # Path to loaded per-project config file, if any. self._set(_project_path=None) # Whether the project config file has been loaded or not (or ``None`` # if no loading has been attempted yet.) self._set(_project_found=None) # Data loaded from the per-project config file. self._set(_project={}) def _load_file( self, prefix: str, absolute: bool = False, merge: bool = True ) -> None: # Setup found = "_{}_found".format(prefix) path = "_{}_path".format(prefix) data = "_{}".format(prefix) midfix = self.file_prefix if midfix is None: midfix = self.prefix # Short-circuit if loading appears to have occurred already if getattr(self, found) is not None: return # Moar setup if absolute: absolute_path = getattr(self, path) # None -> expected absolute path but none set, short circuit if absolute_path is None: return paths = [absolute_path] else: path_prefix = getattr(self, "_{}_prefix".format(prefix)) # Short circuit if loading seems unnecessary (eg for project config # files when not running out of a project) if path_prefix is None: return paths = [ ".".join((path_prefix + midfix, x)) for x in self._file_suffixes ] # Poke 'em for filepath in paths: # Normalize filepath = expanduser(filepath) try: try: type_ = splitext(filepath)[1].lstrip(".") loader = getattr(self, "_load_{}".format(type_)) except AttributeError: msg = "Config files of type {!r} (from file {!r}) are not supported! Please use one of: {!r}" # noqa raise UnknownFileType( msg.format(type_, filepath, self._file_suffixes) ) # Store data, the path it was found at, and fact that it was # found self._set(data, loader(filepath)) self._set(path, filepath) self._set(found, True) break # Typically means 'no such file', so just note & skip past. except IOError as e: if e.errno == 2: err = "Didn't see any {}, skipping." debug(err.format(filepath)) else: raise # Still None -> no suffixed paths were found, record this fact if getattr(self, path) is None: self._set(found, False) # Merge loaded data in if any was found elif merge: self.merge() def _load_yaml(self, path: PathLike) -> Any: with open(path) as fd: return yaml.safe_load(fd) _load_yml = _load_yaml def _load_json(self, path: PathLike) -> Any: with open(path) as fd: return json.load(fd) def _load_py(self, path: str) -> Dict[str, Any]: data = {} for key, value in (load_source("mod", path)).items(): # Strip special members, as these are always going to be builtins # and other special things a user will not want in their config. if key.startswith("__"): continue # Raise exceptions on module values; they are unpicklable. # TODO: suck it up and reimplement copy() without pickling? Then # again, a user trying to stuff a module into their config is # probably doing something better done in runtime/library level # code and not in a "config file"...right? if isinstance(value, types.ModuleType): err = "'{}' is a module, which can't be used as a config value. (Are you perhaps giving a tasks file instead of a config file by mistake?)" # noqa raise UnpicklableConfigMember(err.format(key)) data[key] = value return data def merge(self) -> None: """ Merge all config sources, in order. .. versionadded:: 1.0 """ debug("Merging config sources in order onto new empty _config...") self._set(_config={}) debug("Defaults: {!r}".format(self._defaults)) merge_dicts(self._config, self._defaults) debug("Collection-driven: {!r}".format(self._collection)) merge_dicts(self._config, self._collection) self._merge_file("system", "System-wide") self._merge_file("user", "Per-user") self._merge_file("project", "Per-project") debug("Environment variable config: {!r}".format(self._env)) merge_dicts(self._config, self._env) self._merge_file("runtime", "Runtime") debug("Overrides: {!r}".format(self._overrides)) merge_dicts(self._config, self._overrides) debug("Modifications: {!r}".format(self._modifications)) merge_dicts(self._config, self._modifications) debug("Deletions: {!r}".format(self._deletions)) obliterate(self._config, self._deletions) def _merge_file(self, name: str, desc: str) -> None: # Setup desc += " config file" # yup found = getattr(self, "_{}_found".format(name)) path = getattr(self, "_{}_path".format(name)) data = getattr(self, "_{}".format(name)) # None -> no loading occurred yet if found is None: debug("{} has not been loaded yet, skipping".format(desc)) # True -> hooray elif found: debug("{} ({}): {!r}".format(desc, path, data)) merge_dicts(self._config, data) # False -> did try, did not succeed else: # TODO: how to preserve what was tried for each case but only for # the negative? Just a branch here based on 'name'? debug("{} not found, skipping".format(desc)) def clone(self, into: Optional[Type["Config"]] = None) -> "Config": """ Return a copy of this configuration object. The new object will be identical in terms of configured sources and any loaded (or user-manipulated) data, but will be a distinct object with as little shared mutable state as possible. Specifically, all `dict` values within the config are recursively recreated, with non-dict leaf values subjected to `copy.copy` (note: *not* `copy.deepcopy`, as this can cause issues with various objects such as compiled regexen or threading locks, often found buried deep within rich aggregates like API or DB clients). The only remaining config values that may end up shared between a config and its clone are thus those 'rich' objects that do not `copy.copy` cleanly, or compound non-dict objects (such as lists or tuples). :param into: A `.Config` subclass that the new clone should be "upgraded" to. Used by client libraries which have their own `.Config` subclasses that e.g. define additional defaults; cloning "into" one of these subclasses ensures that any new keys/subtrees are added gracefully, without overwriting anything that may have been pre-defined. Default: ``None`` (just clone into another regular `.Config`). :returns: A `.Config`, or an instance of the class given to ``into``. .. versionadded:: 1.0 """ # Construct new object klass = self.__class__ if into is None else into # Also allow arbitrary constructor kwargs, for subclasses where passing # (some) data in at init time is desired (vs post-init copying) # TODO: probably want to pivot the whole class this way eventually...? # No longer recall exactly why we went with the 'fresh init + attribute # setting' approach originally...tho there's clearly some impedance # mismatch going on between "I want stuff to happen in my config's # instantiation" and "I want cloning to not trigger certain things like # external data source loading". # NOTE: this will include lazy=True, see end of method new = klass(**self._clone_init_kwargs(into=into)) # Copy/merge/etc all 'private' data sources and attributes for name in """ collection system_prefix system_path system_found system user_prefix user_path user_found user project_prefix project_path project_found project env_prefix env runtime_path runtime_found runtime overrides modifications """.split(): name = "_{}".format(name) my_data = getattr(self, name) # Non-dict data gets carried over straight (via a copy()) # NOTE: presumably someone could really screw up and change these # values' types, but at that point it's on them... if not isinstance(my_data, dict): new._set(name, copy.copy(my_data)) # Dict data gets merged (which also involves a copy.copy # eventually) else: merge_dicts(getattr(new, name), my_data) # Do what __init__ would've done if not lazy, i.e. load user/system # conf files. new.load_base_conf_files() # Finally, merge() for reals (_load_base_conf_files doesn't do so # internally, so that data wouldn't otherwise show up.) new.merge() return new def _clone_init_kwargs( self, into: Optional[Type["Config"]] = None ) -> Dict[str, Any]: """ Supply kwargs suitable for initializing a new clone of this object. Note that most of the `.clone` process involves copying data between two instances instead of passing init kwargs; however, sometimes you really do want init kwargs, which is why this method exists. :param into: The value of ``into`` as passed to the calling `.clone`. :returns: A `dict`. """ # NOTE: must pass in defaults fresh or otherwise global_defaults() gets # used instead. Except when 'into' is in play, in which case we truly # want the union of the two. new_defaults = copy_dict(self._defaults) if into is not None: merge_dicts(new_defaults, into.global_defaults()) # The kwargs. return dict( defaults=new_defaults, # TODO: consider making this 'hardcoded' on the calling end (ie # inside clone()) to make sure nobody accidentally nukes it via # subclassing? lazy=True, ) def _modify(self, keypath: Tuple[str, ...], key: str, value: str) -> None: """ Update our user-modifications config level with new data. :param tuple keypath: The key path identifying the sub-dict being updated. May be an empty tuple if the update is occurring at the topmost level. :param str key: The actual key receiving an update. :param value: The value being written. """ # First, ensure we wipe the keypath from _deletions, in case it was # previously deleted. excise(self._deletions, keypath + (key,)) # Now we can add it to the modifications structure. data = self._modifications keypath_list = list(keypath) while keypath_list: subkey = keypath_list.pop(0) # TODO: could use defaultdict here, but...meh? if subkey not in data: # TODO: generify this and the subsequent 3 lines... data[subkey] = {} data = data[subkey] data[key] = value self.merge() def _remove(self, keypath: Tuple[str, ...], key: str) -> None: """ Like `._modify`, but for removal. """ # NOTE: because deletions are processed in merge() last, we do not need # to remove things from _modifications on removal; but we *do* do the # inverse - remove from _deletions on modification. # TODO: may be sane to push this step up to callers? data = self._deletions keypath_list = list(keypath) while keypath_list: subkey = keypath_list.pop(0) if subkey in data: data = data[subkey] # If we encounter None, it means something higher up than our # requested keypath is already marked as deleted; so we don't # have to do anything or go further. if data is None: return # Otherwise it's presumably another dict, so keep looping... else: # Key not found -> nobody's marked anything along this part of # the path for deletion, so we'll start building it out. data[subkey] = {} # Then prep for next iteration data = data[subkey] # Exited loop -> data must be the leafmost dict, so we can now set our # deleted key to None data[key] = None self.merge() class AmbiguousMergeError(ValueError): pass def merge_dicts( base: Dict[str, Any], updates: Dict[str, Any] ) -> Dict[str, Any]: """ Recursively merge dict ``updates`` into dict ``base`` (mutating ``base``.) * Values which are themselves dicts will be recursed into. * Values which are a dict in one input and *not* a dict in the other input (e.g. if our inputs were ``{'foo': 5}`` and ``{'foo': {'bar': 5}}``) are irreconciliable and will generate an exception. * Non-dict leaf values are run through `copy.copy` to avoid state bleed. .. note:: This is effectively a lightweight `copy.deepcopy` which offers protection from mismatched types (dict vs non-dict) and avoids some core deepcopy problems (such as how it explodes on certain object types). :returns: The value of ``base``, which is mostly useful for wrapper functions like `copy_dict`. .. versionadded:: 1.0 """ # TODO: for chrissakes just make it return instead of mutating? for key, value in (updates or {}).items(): # Dict values whose keys also exist in 'base' -> recurse # (But only if both types are dicts.) if key in base: if isinstance(value, dict): if isinstance(base[key], dict): merge_dicts(base[key], value) else: raise _merge_error(base[key], value) else: if isinstance(base[key], dict): raise _merge_error(base[key], value) # Fileno-bearing objects are probably 'real' files which do not # copy well & must be passed by reference. Meh. elif hasattr(value, "fileno"): base[key] = value else: base[key] = copy.copy(value) # New values get set anew else: # Dict values get reconstructed to avoid being references to the # updates dict, which can lead to nasty state-bleed bugs otherwise if isinstance(value, dict): base[key] = copy_dict(value) # Fileno-bearing objects are probably 'real' files which do not # copy well & must be passed by reference. Meh. elif hasattr(value, "fileno"): base[key] = value # Non-dict values just get set straight else: base[key] = copy.copy(value) return base def _merge_error(orig: object, new: object) -> AmbiguousMergeError: return AmbiguousMergeError( "Can't cleanly merge {} with {}".format( _format_mismatch(orig), _format_mismatch(new) ) ) def _format_mismatch(x: object) -> str: return "{} ({!r})".format(type(x), x) def copy_dict(source: Dict[str, Any]) -> Dict[str, Any]: """ Return a fresh copy of ``source`` with as little shared state as possible. Uses `merge_dicts` under the hood, with an empty ``base`` dict; see its documentation for details on behavior. .. versionadded:: 1.0 """ return merge_dicts({}, source) def excise(dict_: Dict[str, Any], keypath: Tuple[str, ...]) -> None: """ Remove key pointed at by ``keypath`` from nested dict ``dict_``, if exists. .. versionadded:: 1.0 """ data = dict_ keypath_list = list(keypath) leaf_key = keypath_list.pop() while keypath_list: key = keypath_list.pop(0) if key not in data: # Not there, nothing to excise return data = data[key] if leaf_key in data: del data[leaf_key] def obliterate(base: Dict[str, Any], deletions: Dict[str, Any]) -> None: """ Remove all (nested) keys mentioned in ``deletions``, from ``base``. .. versionadded:: 1.0 """ for key, value in deletions.items(): if isinstance(value, dict): # NOTE: not testing for whether base[key] exists; if something's # listed in a deletions structure, it must exist in some source # somewhere, and thus also in the cache being obliterated. obliterate(base[key], deletions[key]) else: # implicitly None del base[key] invoke-2.2.0/invoke/context.py000066400000000000000000000616161445356551000163610ustar00rootroot00000000000000import os import re from contextlib import contextmanager from itertools import cycle from os import PathLike from typing import ( TYPE_CHECKING, Any, Generator, Iterator, List, Optional, Union, ) from unittest.mock import Mock from .config import Config, DataProxy from .exceptions import Failure, AuthFailure, ResponseNotAccepted from .runners import Result from .watchers import FailingResponder if TYPE_CHECKING: from invoke.runners import Runner class Context(DataProxy): """ Context-aware API wrapper & state-passing object. `.Context` objects are created during command-line parsing (or, if desired, by hand) and used to share parser and configuration state with executed tasks (see :ref:`why-context`). Specifically, the class offers wrappers for core API calls (such as `.run`) which take into account CLI parser flags, configuration files, and/or changes made at runtime. It also acts as a proxy for its `~.Context.config` attribute - see that attribute's documentation for details. Instances of `.Context` may be shared between tasks when executing sub-tasks - either the same context the caller was given, or an altered copy thereof (or, theoretically, a brand new one). .. versionadded:: 1.0 """ def __init__(self, config: Optional[Config] = None) -> None: """ :param config: `.Config` object to use as the base configuration. Defaults to an anonymous/default `.Config` instance. """ #: The fully merged `.Config` object appropriate for this context. #: #: `.Config` settings (see their documentation for details) may be #: accessed like dictionary keys (``c.config['foo']``) or object #: attributes (``c.config.foo``). #: #: As a convenience shorthand, the `.Context` object proxies to its #: ``config`` attribute in the same way - e.g. ``c['foo']`` or #: ``c.foo`` returns the same value as ``c.config['foo']``. config = config if config is not None else Config() self._set(_config=config) #: A list of commands to run (via "&&") before the main argument to any #: `run` or `sudo` calls. Note that the primary API for manipulating #: this list is `prefix`; see its docs for details. command_prefixes: List[str] = list() self._set(command_prefixes=command_prefixes) #: A list of directories to 'cd' into before running commands with #: `run` or `sudo`; intended for management via `cd`, please see its #: docs for details. command_cwds: List[str] = list() self._set(command_cwds=command_cwds) @property def config(self) -> Config: # Allows Context to expose a .config attribute even though DataProxy # otherwise considers it a config key. return self._config @config.setter def config(self, value: Config) -> None: # NOTE: mostly used by client libraries needing to tweak a Context's # config at execution time; i.e. a Context subclass that bears its own # unique data may want to be stood up when parameterizing/expanding a # call list at start of a session, with the final config filled in at # runtime. self._set(_config=value) def run(self, command: str, **kwargs: Any) -> Optional[Result]: """ Execute a local shell command, honoring config options. Specifically, this method instantiates a `.Runner` subclass (according to the ``runner`` config option; default is `.Local`) and calls its ``.run`` method with ``command`` and ``kwargs``. See `.Runner.run` for details on ``command`` and the available keyword arguments. .. versionadded:: 1.0 """ runner = self.config.runners.local(self) return self._run(runner, command, **kwargs) # NOTE: broken out of run() to allow for runner class injection in # Fabric/etc, which needs to juggle multiple runner class types (local and # remote). def _run( self, runner: "Runner", command: str, **kwargs: Any ) -> Optional[Result]: command = self._prefix_commands(command) return runner.run(command, **kwargs) def sudo(self, command: str, **kwargs: Any) -> Optional[Result]: """ Execute a shell command via ``sudo`` with password auto-response. **Basics** This method is identical to `run` but adds a handful of convenient behaviors around invoking the ``sudo`` program. It doesn't do anything users could not do themselves by wrapping `run`, but the use case is too common to make users reinvent these wheels themselves. .. note:: If you intend to respond to sudo's password prompt by hand, just use ``run("sudo command")`` instead! The autoresponding features in this method will just get in your way. Specifically, `sudo`: * Places a `.FailingResponder` into the ``watchers`` kwarg (see :doc:`/concepts/watchers`) which: * searches for the configured ``sudo`` password prompt; * responds with the configured sudo password (``sudo.password`` from the :doc:`configuration `); * can tell when that response causes an authentication failure (e.g. if the system requires a password and one was not configured), and raises `.AuthFailure` if so. * Builds a ``sudo`` command string using the supplied ``command`` argument, prefixed by various flags (see below); * Executes that command via a call to `run`, returning the result. **Flags used** ``sudo`` flags used under the hood include: - ``-S`` to allow auto-responding of password via stdin; - ``-p `` to explicitly state the prompt to use, so we can be sure our auto-responder knows what to look for; - ``-u `` if ``user`` is not ``None``, to execute the command as a user other than ``root``; - When ``-u`` is present, ``-H`` is also added, to ensure the subprocess has the requested user's ``$HOME`` set properly. **Configuring behavior** There are a couple of ways to change how this method behaves: - Because it wraps `run`, it honors all `run` config parameters and keyword arguments, in the same way that `run` does. - Thus, invocations such as ``c.sudo('command', echo=True)`` are possible, and if a config layer (such as a config file or env var) specifies that e.g. ``run.warn = True``, that too will take effect under `sudo`. - `sudo` has its own set of keyword arguments (see below) and they are also all controllable via the configuration system, under the ``sudo.*`` tree. - Thus you could, for example, pre-set a sudo user in a config file; such as an ``invoke.json`` containing ``{"sudo": {"user": "someuser"}}``. :param str password: Runtime override for ``sudo.password``. :param str user: Runtime override for ``sudo.user``. .. versionadded:: 1.0 """ runner = self.config.runners.local(self) return self._sudo(runner, command, **kwargs) # NOTE: this is for runner injection; see NOTE above _run(). def _sudo( self, runner: "Runner", command: str, **kwargs: Any ) -> Optional[Result]: prompt = self.config.sudo.prompt password = kwargs.pop("password", self.config.sudo.password) user = kwargs.pop("user", self.config.sudo.user) env = kwargs.get("env", {}) # TODO: allow subclassing for 'get the password' so users who REALLY # want lazy runtime prompting can have it easily implemented. # TODO: want to print a "cleaner" echo with just 'sudo '; but # hard to do as-is, obtaining config data from outside a Runner one # holds is currently messy (could fix that), if instead we manually # inspect the config ourselves that duplicates logic. NOTE: once we # figure that out, there is an existing, would-fail-if-not-skipped test # for this behavior in test/context.py. # TODO: once that is done, though: how to handle "full debug" output # exactly (display of actual, real full sudo command w/ -S and -p), in # terms of API/config? Impl is easy, just go back to passing echo # through to 'run'... user_flags = "" if user is not None: user_flags = "-H -u {} ".format(user) env_flags = "" if env: env_flags = "--preserve-env='{}' ".format(",".join(env.keys())) command = self._prefix_commands(command) cmd_str = "sudo -S -p '{}' {}{}{}".format( prompt, env_flags, user_flags, command ) watcher = FailingResponder( pattern=re.escape(prompt), response="{}\n".format(password), sentinel="Sorry, try again.\n", ) # Ensure we merge any user-specified watchers with our own. # NOTE: If there are config-driven watchers, we pull those up to the # kwarg level; that lets us merge cleanly without needing complex # config-driven "override vs merge" semantics. # TODO: if/when those semantics are implemented, use them instead. # NOTE: config value for watchers defaults to an empty list; and we # want to clone it to avoid actually mutating the config. watchers = kwargs.pop("watchers", list(self.config.run.watchers)) watchers.append(watcher) try: return runner.run(cmd_str, watchers=watchers, **kwargs) except Failure as failure: # Transmute failures driven by our FailingResponder, into auth # failures - the command never even ran. # TODO: wants to be a hook here for users that desire "override a # bad config value for sudo.password" manual input # NOTE: as noted in #294 comments, we MAY in future want to update # this so run() is given ability to raise AuthFailure on its own. # For now that has been judged unnecessary complexity. if isinstance(failure.reason, ResponseNotAccepted): # NOTE: not bothering with 'reason' here, it's pointless. error = AuthFailure(result=failure.result, prompt=prompt) raise error # Reraise for any other error so it bubbles up normally. else: raise # TODO: wonder if it makes sense to move this part of things inside Runner, # which would grow a `prefixes` and `cwd` init kwargs or similar. The less # that's stuffed into Context, probably the better. def _prefix_commands(self, command: str) -> str: """ Prefixes ``command`` with all prefixes found in ``command_prefixes``. ``command_prefixes`` is a list of strings which is modified by the `prefix` context manager. """ prefixes = list(self.command_prefixes) current_directory = self.cwd if current_directory: prefixes.insert(0, "cd {}".format(current_directory)) return " && ".join(prefixes + [command]) @contextmanager def prefix(self, command: str) -> Generator[None, None, None]: """ Prefix all nested `run`/`sudo` commands with given command plus ``&&``. Most of the time, you'll want to be using this alongside a shell script which alters shell state, such as ones which export or alter shell environment variables. For example, one of the most common uses of this tool is with the ``workon`` command from `virtualenvwrapper `_:: with c.prefix('workon myvenv'): c.run('./manage.py migrate') In the above snippet, the actual shell command run would be this:: $ workon myvenv && ./manage.py migrate This context manager is compatible with `cd`, so if your virtualenv doesn't ``cd`` in its ``postactivate`` script, you could do the following:: with c.cd('/path/to/app'): with c.prefix('workon myvenv'): c.run('./manage.py migrate') c.run('./manage.py loaddata fixture') Which would result in executions like so:: $ cd /path/to/app && workon myvenv && ./manage.py migrate $ cd /path/to/app && workon myvenv && ./manage.py loaddata fixture Finally, as alluded to above, `prefix` may be nested if desired, e.g.:: with c.prefix('workon myenv'): c.run('ls') with c.prefix('source /some/script'): c.run('touch a_file') The result:: $ workon myenv && ls $ workon myenv && source /some/script && touch a_file Contrived, but hopefully illustrative. .. versionadded:: 1.0 """ self.command_prefixes.append(command) try: yield finally: self.command_prefixes.pop() @property def cwd(self) -> str: """ Return the current working directory, accounting for uses of `cd`. .. versionadded:: 1.0 """ if not self.command_cwds: # TODO: should this be None? Feels cleaner, though there may be # benefits to it being an empty string, such as relying on a no-arg # `cd` typically being shorthand for "go to user's $HOME". return "" # get the index for the subset of paths starting with the last / or ~ for i, path in reversed(list(enumerate(self.command_cwds))): if path.startswith("~") or path.startswith("/"): break # TODO: see if there's a stronger "escape this path" function somewhere # we can reuse. e.g., escaping tildes or slashes in filenames. paths = [path.replace(" ", r"\ ") for path in self.command_cwds[i:]] return str(os.path.join(*paths)) @contextmanager def cd(self, path: Union[PathLike, str]) -> Generator[None, None, None]: """ Context manager that keeps directory state when executing commands. Any calls to `run`, `sudo`, within the wrapped block will implicitly have a string similar to ``"cd && "`` prefixed in order to give the sense that there is actually statefulness involved. Because use of `cd` affects all such invocations, any code making use of the `cwd` property will also be affected by use of `cd`. Like the actual 'cd' shell builtin, `cd` may be called with relative paths (keep in mind that your default starting directory is your user's ``$HOME``) and may be nested as well. Below is a "normal" attempt at using the shell 'cd', which doesn't work since all commands are executed in individual subprocesses -- state is **not** kept between invocations of `run` or `sudo`:: c.run('cd /var/www') c.run('ls') The above snippet will list the contents of the user's ``$HOME`` instead of ``/var/www``. With `cd`, however, it will work as expected:: with c.cd('/var/www'): c.run('ls') # Turns into "cd /var/www && ls" Finally, a demonstration (see inline comments) of nesting:: with c.cd('/var/www'): c.run('ls') # cd /var/www && ls with c.cd('website1'): c.run('ls') # cd /var/www/website1 && ls .. note:: Space characters will be escaped automatically to make dealing with such directory names easier. .. versionadded:: 1.0 .. versionchanged:: 1.5 Explicitly cast the ``path`` argument (the only argument) to a string; this allows any object defining ``__str__`` to be handed in (such as the various ``Path`` objects out there), and not just string literals. """ path = str(path) self.command_cwds.append(path) try: yield finally: self.command_cwds.pop() class MockContext(Context): """ A `.Context` whose methods' return values can be predetermined. Primarily useful for testing Invoke-using codebases. .. note:: This class wraps its ``run``, etc methods in `unittest.mock.Mock` objects. This allows you to easily assert that the methods (still returning the values you prepare them with) were actually called. .. note:: Methods not given `Results <.Result>` to yield will raise ``NotImplementedError`` if called (since the alternative is to call the real underlying method - typically undesirable when mocking.) .. versionadded:: 1.0 .. versionchanged:: 1.5 Added ``Mock`` wrapping of ``run`` and ``sudo``. """ def __init__(self, config: Optional[Config] = None, **kwargs: Any) -> None: """ Create a ``Context``-like object whose methods yield `.Result` objects. :param config: A Configuration object to use. Identical in behavior to `.Context`. :param run: A data structure indicating what `.Result` objects to return from calls to the instantiated object's `~.Context.run` method (instead of actually executing the requested shell command). Specifically, this kwarg accepts: - A single `.Result` object. - A boolean; if True, yields a `.Result` whose ``exited`` is ``0``, and if False, ``1``. - An iterable of the above values, which will be returned on each subsequent call to ``.run`` (the first item on the first call, the second on the second call, etc). - A dict mapping command strings or compiled regexen to the above values (including an iterable), allowing specific call-and-response semantics instead of assuming a call order. :param sudo: Identical to ``run``, but whose values are yielded from calls to `~.Context.sudo`. :param bool repeat: A flag determining whether results yielded by this class' methods repeat or are consumed. For example, when a single result is indicated, it will normally only be returned once, causing ``NotImplementedError`` afterwards. But when ``repeat=True`` is given, that result is returned on every call, forever. Similarly, iterable results are normally exhausted once, but when this setting is enabled, they are wrapped in `itertools.cycle`. Default: ``True``. :raises: ``TypeError``, if the values given to ``run`` or other kwargs aren't of the expected types. .. versionchanged:: 1.5 Added support for boolean and string result values. .. versionchanged:: 1.5 Added support for regex dict keys. .. versionchanged:: 1.5 Added the ``repeat`` keyword argument. .. versionchanged:: 2.0 Changed ``repeat`` default value from ``False`` to ``True``. """ # Set up like any other Context would, with the config super().__init__(config) # Pull out behavioral kwargs self._set("__repeat", kwargs.pop("repeat", True)) # The rest must be things like run/sudo - mock Context method info for method, results in kwargs.items(): # For each possible value type, normalize to iterable of Result # objects (possibly repeating). singletons = (Result, bool, str) if isinstance(results, dict): for key, value in results.items(): results[key] = self._normalize(value) elif isinstance(results, singletons) or hasattr( results, "__iter__" ): results = self._normalize(results) # Unknown input value: cry else: err = "Not sure how to yield results from a {!r}" raise TypeError(err.format(type(results))) # Save results for use by the method self._set("__{}".format(method), results) # Wrap the method in a Mock self._set(method, Mock(wraps=getattr(self, method))) def _normalize(self, value: Any) -> Iterator[Any]: # First turn everything into an iterable if not hasattr(value, "__iter__") or isinstance(value, str): value = [value] # Then turn everything within into a Result results = [] for obj in value: if isinstance(obj, bool): obj = Result(exited=0 if obj else 1) elif isinstance(obj, str): obj = Result(obj) results.append(obj) # Finally, turn that iterable into an iteratOR, depending on repeat return cycle(results) if getattr(self, "__repeat") else iter(results) # TODO: _maybe_ make this more metaprogrammy/flexible (using __call__ etc)? # Pretty worried it'd cause more hard-to-debug issues than it's presently # worth. Maybe in situations where Context grows a _lot_ of methods (e.g. # in Fabric 2; though Fabric could do its own sub-subclass in that case...) def _yield_result(self, attname: str, command: str) -> Result: try: obj = getattr(self, attname) # Dicts need to try direct lookup or regex matching if isinstance(obj, dict): try: obj = obj[command] except KeyError: # TODO: could optimize by skipping this if not any regex # objects in keys()? for key, value in obj.items(): if hasattr(key, "match") and key.match(command): obj = value break else: # Nope, nothing did match. raise KeyError # Here, the value was either never a dict or has been extracted # from one, so we can assume it's an iterable of Result objects due # to work done by __init__. result: Result = next(obj) # Populate Result's command string with what matched unless # explicitly given if not result.command: result.command = command return result except (AttributeError, IndexError, KeyError, StopIteration): # raise_from(NotImplementedError(command), None) raise NotImplementedError(command) def run(self, command: str, *args: Any, **kwargs: Any) -> Result: # TODO: perform more convenience stuff associating args/kwargs with the # result? E.g. filling in .command, etc? Possibly useful for debugging # if one hits unexpected-order problems with what they passed in to # __init__. return self._yield_result("__run", command) def sudo(self, command: str, *args: Any, **kwargs: Any) -> Result: # TODO: this completely nukes the top-level behavior of sudo(), which # could be good or bad, depending. Most of the time I think it's good. # No need to supply dummy password config, etc. # TODO: see the TODO from run() re: injecting arg/kwarg values return self._yield_result("__sudo", command) def set_result_for( self, attname: str, command: str, result: Result ) -> None: """ Modify the stored mock results for given ``attname`` (e.g. ``run``). This is similar to how one instantiates `MockContext` with a ``run`` or ``sudo`` dict kwarg. For example, this:: mc = MockContext(run={'mycommand': Result("mystdout")}) assert mc.run('mycommand').stdout == "mystdout" is functionally equivalent to this:: mc = MockContext() mc.set_result_for('run', 'mycommand', Result("mystdout")) assert mc.run('mycommand').stdout == "mystdout" `set_result_for` is mostly useful for modifying an already-instantiated `MockContext`, such as one created by test setup or helper methods. .. versionadded:: 1.0 """ attname = "__{}".format(attname) heck = TypeError( "Can't update results for non-dict or nonexistent mock results!" ) # Get value & complain if it's not a dict. # TODO: should we allow this to set non-dict values too? Seems vaguely # pointless, at that point, just make a new MockContext eh? try: value = getattr(self, attname) except AttributeError: raise heck if not isinstance(value, dict): raise heck # OK, we're good to modify, so do so. value[command] = self._normalize(result) invoke-2.2.0/invoke/env.py000066400000000000000000000104521445356551000154550ustar00rootroot00000000000000""" Environment variable configuration loading class. Using a class here doesn't really model anything but makes state passing (in a situation requiring it) more convenient. This module is currently considered private/an implementation detail and should not be included in the Sphinx API documentation. """ import os from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Sequence from .exceptions import UncastableEnvVar, AmbiguousEnvVar from .util import debug if TYPE_CHECKING: from .config import Config class Environment: def __init__(self, config: "Config", prefix: str) -> None: self._config = config self._prefix = prefix self.data: Dict[str, Any] = {} # Accumulator def load(self) -> Dict[str, Any]: """ Return a nested dict containing values from `os.environ`. Specifically, values whose keys map to already-known configuration settings, allowing us to perform basic typecasting. See :ref:`env-vars` for details. """ # Obtain allowed env var -> existing value map env_vars = self._crawl(key_path=[], env_vars={}) m = "Scanning for env vars according to prefix: {!r}, mapping: {!r}" debug(m.format(self._prefix, env_vars)) # Check for actual env var (honoring prefix) and try to set for env_var, key_path in env_vars.items(): real_var = (self._prefix or "") + env_var if real_var in os.environ: self._path_set(key_path, os.environ[real_var]) debug("Obtained env var config: {!r}".format(self.data)) return self.data def _crawl( self, key_path: List[str], env_vars: Mapping[str, Sequence[str]] ) -> Dict[str, Any]: """ Examine config at location ``key_path`` & return potential env vars. Uses ``env_vars`` dict to determine if a conflict exists, and raises an exception if so. This dict is of the following form:: { 'EXPECTED_ENV_VAR_HERE': ['actual', 'nested', 'key_path'], ... } Returns another dictionary of new keypairs as per above. """ new_vars: Dict[str, List[str]] = {} obj = self._path_get(key_path) # Sub-dict -> recurse if ( hasattr(obj, "keys") and callable(obj.keys) and hasattr(obj, "__getitem__") ): for key in obj.keys(): merged_vars = dict(env_vars, **new_vars) merged_path = key_path + [key] crawled = self._crawl(merged_path, merged_vars) # Handle conflicts for key in crawled: if key in new_vars: err = "Found >1 source for {}" raise AmbiguousEnvVar(err.format(key)) # Merge and continue new_vars.update(crawled) # Other -> is leaf, no recursion else: new_vars[self._to_env_var(key_path)] = key_path return new_vars def _to_env_var(self, key_path: Iterable[str]) -> str: return "_".join(key_path).upper() def _path_get(self, key_path: Iterable[str]) -> "Config": # Gets are from self._config because that's what determines valid env # vars and/or values for typecasting. obj = self._config for key in key_path: obj = obj[key] return obj def _path_set(self, key_path: Sequence[str], value: str) -> None: # Sets are to self.data since that's what we are presenting to the # outer config object and debugging. obj = self.data for key in key_path[:-1]: if key not in obj: obj[key] = {} obj = obj[key] old = self._path_get(key_path) new = self._cast(old, value) obj[key_path[-1]] = new def _cast(self, old: Any, new: Any) -> Any: if isinstance(old, bool): return new not in ("0", "") elif isinstance(old, str): return new elif old is None: return new elif isinstance(old, (list, tuple)): err = "Can't adapt an environment string into a {}!" err = err.format(type(old)) raise UncastableEnvVar(err) else: return old.__class__(new) invoke-2.2.0/invoke/exceptions.py000066400000000000000000000277031445356551000170550ustar00rootroot00000000000000""" Custom exception classes. These vary in use case from "we needed a specific data structure layout in exceptions used for message-passing" to simply "we needed to express an error condition in a way easily told apart from other, truly unexpected errors". """ from pprint import pformat from traceback import format_exception from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple if TYPE_CHECKING: from .parser import ParserContext from .runners import Result from .util import ExceptionWrapper class CollectionNotFound(Exception): def __init__(self, name: str, start: str) -> None: self.name = name self.start = start class Failure(Exception): """ Exception subclass representing failure of a command execution. "Failure" may mean the command executed and the shell indicated an unusual result (usually, a non-zero exit code), or it may mean something else, like a ``sudo`` command which was aborted when the supplied password failed authentication. Two attributes allow introspection to determine the nature of the problem: * ``result``: a `.Result` instance with info about the command being executed and, if it ran to completion, how it exited. * ``reason``: a wrapped exception instance if applicable (e.g. a `.StreamWatcher` raised `WatcherError`) or ``None`` otherwise, in which case, it's probably a `Failure` subclass indicating its own specific nature, such as `UnexpectedExit` or `CommandTimedOut`. This class is only rarely raised by itself; most of the time `.Runner.run` (or a wrapper of same, such as `.Context.sudo`) will raise a specific subclass like `UnexpectedExit` or `AuthFailure`. .. versionadded:: 1.0 """ def __init__( self, result: "Result", reason: Optional["WatcherError"] = None ) -> None: self.result = result self.reason = reason def streams_for_display(self) -> Tuple[str, str]: """ Return stdout/err streams as necessary for error display. Subject to the following rules: - If a given stream was *not* hidden during execution, a placeholder is used instead, to avoid printing it twice. - Only the last 10 lines of stream text is included. - PTY-driven execution will lack stderr, and a specific message to this effect is returned instead of a stderr dump. :returns: Two-tuple of stdout, stderr strings. .. versionadded:: 1.3 """ already_printed = " already printed" if "stdout" not in self.result.hide: stdout = already_printed else: stdout = self.result.tail("stdout") if self.result.pty: stderr = " n/a (PTYs have no stderr)" else: if "stderr" not in self.result.hide: stderr = already_printed else: stderr = self.result.tail("stderr") return stdout, stderr def __repr__(self) -> str: return self._repr() def _repr(self, **kwargs: Any) -> str: """ Return ``__repr__``-like value from inner result + any kwargs. """ # TODO: expand? # TODO: truncate command? template = "<{}: cmd={!r}{}>" rest = "" if kwargs: rest = " " + " ".join( "{}={}".format(key, value) for key, value in kwargs.items() ) return template.format( self.__class__.__name__, self.result.command, rest ) class UnexpectedExit(Failure): """ A shell command ran to completion but exited with an unexpected exit code. Its string representation displays the following: - Command executed; - Exit code; - The last 10 lines of stdout, if it was hidden; - The last 10 lines of stderr, if it was hidden and non-empty (e.g. pty=False; when pty=True, stderr never happens.) .. versionadded:: 1.0 """ def __str__(self) -> str: stdout, stderr = self.streams_for_display() command = self.result.command exited = self.result.exited template = """Encountered a bad command exit code! Command: {!r} Exit code: {} Stdout:{} Stderr:{} """ return template.format(command, exited, stdout, stderr) def _repr(self, **kwargs: Any) -> str: kwargs.setdefault("exited", self.result.exited) return super()._repr(**kwargs) class CommandTimedOut(Failure): """ Raised when a subprocess did not exit within a desired timeframe. """ def __init__(self, result: "Result", timeout: int) -> None: super().__init__(result) self.timeout = timeout def __repr__(self) -> str: return self._repr(timeout=self.timeout) def __str__(self) -> str: stdout, stderr = self.streams_for_display() command = self.result.command template = """Command did not complete within {} seconds! Command: {!r} Stdout:{} Stderr:{} """ return template.format(self.timeout, command, stdout, stderr) class AuthFailure(Failure): """ An authentication failure, e.g. due to an incorrect ``sudo`` password. .. note:: `.Result` objects attached to these exceptions typically lack exit code information, since the command was never fully executed - the exception was raised instead. .. versionadded:: 1.0 """ def __init__(self, result: "Result", prompt: str) -> None: self.result = result self.prompt = prompt def __str__(self) -> str: err = "The password submitted to prompt {!r} was rejected." return err.format(self.prompt) class ParseError(Exception): """ An error arising from the parsing of command-line flags/arguments. Ambiguous input, invalid task names, invalid flags, etc. .. versionadded:: 1.0 """ def __init__( self, msg: str, context: Optional["ParserContext"] = None ) -> None: super().__init__(msg) self.context = context class Exit(Exception): """ Simple custom stand-in for SystemExit. Replaces scattered sys.exit calls, improves testability, allows one to catch an exit request without intercepting real SystemExits (typically an unfriendly thing to do, as most users calling `sys.exit` rather expect it to truly exit.) Defaults to a non-printing, exit-0 friendly termination behavior if the exception is uncaught. If ``code`` (an int) given, that code is used to exit. If ``message`` (a string) given, it is printed to standard error, and the program exits with code ``1`` by default (unless overridden by also giving ``code`` explicitly.) .. versionadded:: 1.0 """ def __init__( self, message: Optional[str] = None, code: Optional[int] = None ) -> None: self.message = message self._code = code @property def code(self) -> int: if self._code is not None: return self._code return 1 if self.message else 0 class PlatformError(Exception): """ Raised when an illegal operation occurs for the current platform. E.g. Windows users trying to use functionality requiring the ``pty`` module. Typically used to present a clearer error message to the user. .. versionadded:: 1.0 """ pass class AmbiguousEnvVar(Exception): """ Raised when loading env var config keys has an ambiguous target. .. versionadded:: 1.0 """ pass class UncastableEnvVar(Exception): """ Raised on attempted env var loads whose default values are too rich. E.g. trying to stuff ``MY_VAR="foo"`` into ``{'my_var': ['uh', 'oh']}`` doesn't make any sense until/if we implement some sort of transform option. .. versionadded:: 1.0 """ pass class UnknownFileType(Exception): """ A config file of an unknown type was specified and cannot be loaded. .. versionadded:: 1.0 """ pass class UnpicklableConfigMember(Exception): """ A config file contained module objects, which can't be pickled/copied. We raise this more easily catchable exception instead of letting the (unclearly phrased) TypeError bubble out of the pickle module. (However, to avoid our own fragile catching of that error, we head it off by explicitly testing for module members.) .. versionadded:: 1.0.2 """ pass def _printable_kwargs(kwargs: Any) -> Dict[str, Any]: """ Return print-friendly version of a thread-related ``kwargs`` dict. Extra care is taken with ``args`` members which are very long iterables - those need truncating to be useful. """ printable = {} for key, value in kwargs.items(): item = value if key == "args": item = [] for arg in value: new_arg = arg if hasattr(arg, "__len__") and len(arg) > 10: msg = "<... remainder truncated during error display ...>" new_arg = arg[:10] + [msg] item.append(new_arg) printable[key] = item return printable class ThreadException(Exception): """ One or more exceptions were raised within background threads. The real underlying exceptions are stored in the `exceptions` attribute; see its documentation for data structure details. .. note:: Threads which did not encounter an exception, do not contribute to this exception object and thus are not present inside `exceptions`. .. versionadded:: 1.0 """ #: A tuple of `ExceptionWrappers ` containing #: the initial thread constructor kwargs (because `threading.Thread` #: subclasses should always be called with kwargs) and the caught exception #: for that thread as seen by `sys.exc_info` (so: type, value, traceback). #: #: .. note:: #: The ordering of this attribute is not well-defined. #: #: .. note:: #: Thread kwargs which appear to be very long (e.g. IO #: buffers) will be truncated when printed, to avoid huge #: unreadable error display. exceptions: Tuple["ExceptionWrapper", ...] = tuple() def __init__(self, exceptions: List["ExceptionWrapper"]) -> None: self.exceptions = tuple(exceptions) def __str__(self) -> str: details = [] for x in self.exceptions: # Build useful display detail = "Thread args: {}\n\n{}" details.append( detail.format( pformat(_printable_kwargs(x.kwargs)), "\n".join(format_exception(x.type, x.value, x.traceback)), ) ) args = ( len(self.exceptions), ", ".join(x.type.__name__ for x in self.exceptions), "\n\n".join(details), ) return """ Saw {} exceptions within threads ({}): {} """.format( *args ) class WatcherError(Exception): """ Generic parent exception class for `.StreamWatcher`-related errors. Typically, one of these exceptions indicates a `.StreamWatcher` noticed something anomalous in an output stream, such as an authentication response failure. `.Runner` catches these and attaches them to `.Failure` exceptions so they can be referenced by intermediate code and/or act as extra info for end users. .. versionadded:: 1.0 """ pass class ResponseNotAccepted(WatcherError): """ A responder/watcher class noticed a 'bad' response to its submission. Mostly used by `.FailingResponder` and subclasses, e.g. "oh dear I autosubmitted a sudo password and it was incorrect." .. versionadded:: 1.0 """ pass class SubprocessPipeError(Exception): """ Some problem was encountered handling subprocess pipes (stdout/err/in). Typically only for corner cases; most of the time, errors in this area are raised by the interpreter or the operating system, and end up wrapped in a `.ThreadException`. .. versionadded:: 1.3 """ pass invoke-2.2.0/invoke/executor.py000066400000000000000000000212271445356551000165250ustar00rootroot00000000000000from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from .config import Config from .parser import ParserContext from .util import debug from .tasks import Call, Task if TYPE_CHECKING: from .collection import Collection from .runners import Result from .parser import ParseResult class Executor: """ An execution strategy for Task objects. Subclasses may override various extension points to change, add or remove behavior. .. versionadded:: 1.0 """ def __init__( self, collection: "Collection", config: Optional["Config"] = None, core: Optional["ParseResult"] = None, ) -> None: """ Initialize executor with handles to necessary data structures. :param collection: A `.Collection` used to look up requested tasks (and their default config data, if any) by name during execution. :param config: An optional `.Config` holding configuration state. Defaults to an empty `.Config` if not given. :param core: An optional `.ParseResult` holding parsed core program arguments. Defaults to ``None``. """ self.collection = collection self.config = config if config is not None else Config() self.core = core def execute( self, *tasks: Union[str, Tuple[str, Dict[str, Any]], ParserContext] ) -> Dict["Task", "Result"]: """ Execute one or more ``tasks`` in sequence. :param tasks: An all-purpose iterable of "tasks to execute", each member of which may take one of the following forms: **A string** naming a task from the Executor's `.Collection`. This name may contain dotted syntax appropriate for calling namespaced tasks, e.g. ``subcollection.taskname``. Such tasks are executed without arguments. **A two-tuple** whose first element is a task name string (as above) and whose second element is a dict suitable for use as ``**kwargs`` when calling the named task. E.g.:: [ ('task1', {}), ('task2', {'arg1': 'val1'}), ... ] is equivalent, roughly, to:: task1() task2(arg1='val1') **A `.ParserContext`** instance, whose ``.name`` attribute is used as the task name and whose ``.as_kwargs`` attribute is used as the task kwargs (again following the above specifications). .. note:: When called without any arguments at all (i.e. when ``*tasks`` is empty), the default task from ``self.collection`` is used instead, if defined. :returns: A dict mapping task objects to their return values. This dict may include pre- and post-tasks if any were executed. For example, in a collection with a ``build`` task depending on another task named ``setup``, executing ``build`` will result in a dict with two keys, one for ``build`` and one for ``setup``. .. versionadded:: 1.0 """ # Normalize input debug("Examining top level tasks {!r}".format([x for x in tasks])) calls = self.normalize(tasks) debug("Tasks (now Calls) with kwargs: {!r}".format(calls)) # Obtain copy of directly-given tasks since they should sometimes # behave differently direct = list(calls) # Expand pre/post tasks # TODO: may make sense to bundle expansion & deduping now eh? expanded = self.expand_calls(calls) # Get some good value for dedupe option, even if config doesn't have # the tree we expect. (This is a concession to testing.) try: dedupe = self.config.tasks.dedupe except AttributeError: dedupe = True # Dedupe across entire run now that we know about all calls in order calls = self.dedupe(expanded) if dedupe else expanded # Execute results = {} # TODO: maybe clone initial config here? Probably not necessary, # especially given Executor is not designed to execute() >1 time at the # moment... for call in calls: autoprint = call in direct and call.autoprint debug("Executing {!r}".format(call)) # Hand in reference to our config, which will preserve user # modifications across the lifetime of the session. config = self.config # But make sure we reset its task-sensitive levels each time # (collection & shell env) # TODO: load_collection needs to be skipped if task is anonymous # (Fabric 2 or other subclassing libs only) collection_config = self.collection.configuration(call.called_as) config.load_collection(collection_config) config.load_shell_env() debug("Finished loading collection & shell env configs") # Get final context from the Call (which will know how to generate # an appropriate one; e.g. subclasses might use extra data from # being parameterized), handing in this config for use there. context = call.make_context(config) args = (context, *call.args) result = call.task(*args, **call.kwargs) if autoprint: print(result) # TODO: handle the non-dedupe case / the same-task-different-args # case, wherein one task obj maps to >1 result. results[call.task] = result return results def normalize( self, tasks: Tuple[ Union[str, Tuple[str, Dict[str, Any]], ParserContext], ... ], ) -> List["Call"]: """ Transform arbitrary task list w/ various types, into `.Call` objects. See docstring for `~.Executor.execute` for details. .. versionadded:: 1.0 """ calls = [] for task in tasks: name: Optional[str] if isinstance(task, str): name = task kwargs = {} elif isinstance(task, ParserContext): name = task.name kwargs = task.as_kwargs else: name, kwargs = task c = Call(self.collection[name], kwargs=kwargs, called_as=name) calls.append(c) if not tasks and self.collection.default is not None: calls = [Call(self.collection[self.collection.default])] return calls def dedupe(self, calls: List["Call"]) -> List["Call"]: """ Deduplicate a list of `tasks <.Call>`. :param calls: An iterable of `.Call` objects representing tasks. :returns: A list of `.Call` objects. .. versionadded:: 1.0 """ deduped = [] debug("Deduplicating tasks...") for call in calls: if call not in deduped: debug("{!r}: no duplicates found, ok".format(call)) deduped.append(call) else: debug("{!r}: found in list already, skipping".format(call)) return deduped def expand_calls(self, calls: List["Call"]) -> List["Call"]: """ Expand a list of `.Call` objects into a near-final list of same. The default implementation of this method simply adds a task's pre/post-task list before/after the task itself, as necessary. Subclasses may wish to do other things in addition (or instead of) the above, such as multiplying the `calls <.Call>` by argument vectors or similar. .. versionadded:: 1.0 """ ret = [] for call in calls: # Normalize to Call (this method is sometimes called with pre/post # task lists, which may contain 'raw' Task objects) if isinstance(call, Task): call = Call(call) debug("Expanding task-call {!r}".format(call)) # TODO: this is where we _used_ to call Executor.config_for(call, # config)... # TODO: now we may need to preserve more info like where the call # came from, etc, but I feel like that shit should go _on the call # itself_ right??? # TODO: we _probably_ don't even want the config in here anymore, # we want this to _just_ be about the recursion across pre/post # tasks or parameterization...? ret.extend(self.expand_calls(call.pre)) ret.append(call) ret.extend(self.expand_calls(call.post)) return ret invoke-2.2.0/invoke/loader.py000066400000000000000000000135651445356551000161430ustar00rootroot00000000000000import os import sys from importlib.machinery import ModuleSpec from importlib.util import module_from_spec, spec_from_file_location from pathlib import Path from types import ModuleType from typing import Any, Optional, Tuple from . import Config from .exceptions import CollectionNotFound from .util import debug class Loader: """ Abstract class defining how to find/import a session's base `.Collection`. .. versionadded:: 1.0 """ def __init__(self, config: Optional["Config"] = None) -> None: """ Set up a new loader with some `.Config`. :param config: An explicit `.Config` to use; it is referenced for loading-related config options. Defaults to an anonymous ``Config()`` if none is given. """ if config is None: config = Config() self.config = config def find(self, name: str) -> Optional[ModuleSpec]: """ Implementation-specific finder method seeking collection ``name``. Must return a ModuleSpec valid for use by `importlib`, which is typically a name string followed by the contents of the 3-tuple returned by `importlib.module_from_spec` (``name``, ``loader``, ``origin``.) For a sample implementation, see `.FilesystemLoader`. .. versionadded:: 1.0 """ raise NotImplementedError def load(self, name: Optional[str] = None) -> Tuple[ModuleType, str]: """ Load and return collection module identified by ``name``. This method requires a working implementation of `.find` in order to function. In addition to importing the named module, it will add the module's parent directory to the front of `sys.path` to provide normal Python import behavior (i.e. so the loaded module may load local-to-it modules or packages.) :returns: Two-tuple of ``(module, directory)`` where ``module`` is the collection-containing Python module object, and ``directory`` is the string path to the directory the module was found in. .. versionadded:: 1.0 """ if name is None: name = self.config.tasks.collection_name spec = self.find(name) if spec and spec.loader and spec.origin: # Typically either tasks.py or tasks/__init__.py source_file = Path(spec.origin) # Will be 'the dir tasks.py is in', or 'tasks/', in both cases this # is what wants to be in sys.path for "from . import sibling" enclosing_dir = source_file.parent # Will be "the directory above the spot that 'import tasks' found", # namely the parent of "your task tree", i.e. "where project level # config files are looked for". So, same as enclosing_dir for # tasks.py, but one more level up for tasks/__init__.py... module_parent = enclosing_dir if spec.parent: # it's a package, so we have to go up again module_parent = module_parent.parent # Get the enclosing dir on the path enclosing_str = str(enclosing_dir) if enclosing_str not in sys.path: sys.path.insert(0, enclosing_str) # Actual import module = module_from_spec(spec) sys.modules[spec.name] = module # so 'from . import xxx' works spec.loader.exec_module(module) # Return the module and the folder it was found in return module, str(module_parent) msg = "ImportError loading {!r}, raising ImportError" debug(msg.format(name)) raise ImportError class FilesystemLoader(Loader): """ Loads Python files from the filesystem (e.g. ``tasks.py``.) Searches recursively towards filesystem root from a given start point. .. versionadded:: 1.0 """ # TODO: could introduce config obj here for transmission to Collection # TODO: otherwise Loader has to know about specific bits to transmit, such # as auto-dashes, and has to grow one of those for every bit Collection # ever needs to know def __init__(self, start: Optional[str] = None, **kwargs: Any) -> None: super().__init__(**kwargs) if start is None: start = self.config.tasks.search_root self._start = start @property def start(self) -> str: # Lazily determine default CWD if configured value is falsey return self._start or os.getcwd() def find(self, name: str) -> Optional[ModuleSpec]: debug("FilesystemLoader find starting at {!r}".format(self.start)) spec = None module = "{}.py".format(name) paths = self.start.split(os.sep) try: # walk the path upwards to check for dynamic import for x in reversed(range(len(paths) + 1)): path = os.sep.join(paths[0:x]) if module in os.listdir(path): spec = spec_from_file_location( name, os.path.join(path, module) ) break elif name in os.listdir(path) and os.path.exists( os.path.join(path, name, "__init__.py") ): basepath = os.path.join(path, name) spec = spec_from_file_location( name, os.path.join(basepath, "__init__.py"), submodule_search_locations=[basepath], ) break if spec: debug("Found module: {!r}".format(spec)) return spec except (FileNotFoundError, ModuleNotFoundError): msg = "ImportError loading {!r}, raising CollectionNotFound" debug(msg.format(name)) raise CollectionNotFound(name=name, start=self.start) return None invoke-2.2.0/invoke/main.py000066400000000000000000000003531445356551000156100ustar00rootroot00000000000000""" Invoke's own 'binary' entrypoint. Dogfoods the `program` module. """ from . import __version__, Program program = Program( name="Invoke", binary="inv[oke]", binary_names=["invoke", "inv"], version=__version__, ) invoke-2.2.0/invoke/parser/000077500000000000000000000000001445356551000156055ustar00rootroot00000000000000invoke-2.2.0/invoke/parser/__init__.py000066400000000000000000000002651445356551000177210ustar00rootroot00000000000000# flake8: noqa from .parser import * from .context import ParserContext from .context import ParserContext as Context, to_flag, translate_underscores from .argument import Argument invoke-2.2.0/invoke/parser/argument.py000066400000000000000000000136351445356551000200110ustar00rootroot00000000000000from typing import Any, Iterable, Optional, Tuple # TODO: dynamic type for kind # T = TypeVar('T') class Argument: """ A command-line argument/flag. :param name: Syntactic sugar for ``names=[]``. Giving both ``name`` and ``names`` is invalid. :param names: List of valid identifiers for this argument. For example, a "help" argument may be defined with a name list of ``['-h', '--help']``. :param kind: Type factory & parser hint. E.g. ``int`` will turn the default text value parsed, into a Python integer; and ``bool`` will tell the parser not to expect an actual value but to treat the argument as a toggle/flag. :param default: Default value made available to the parser if no value is given on the command line. :param help: Help text, intended for use with ``--help``. :param positional: Whether or not this argument's value may be given positionally. When ``False`` (default) arguments must be explicitly named. :param optional: Whether or not this (non-``bool``) argument requires a value. :param incrementable: Whether or not this (``int``) argument is to be incremented instead of overwritten/assigned to. :param attr_name: A Python identifier/attribute friendly name, typically filled in with the underscored version when ``name``/``names`` contain dashes. .. versionadded:: 1.0 """ def __init__( self, name: Optional[str] = None, names: Iterable[str] = (), kind: Any = str, default: Optional[Any] = None, help: Optional[str] = None, positional: bool = False, optional: bool = False, incrementable: bool = False, attr_name: Optional[str] = None, ) -> None: if name and names: raise TypeError( "Cannot give both 'name' and 'names' arguments! Pick one." ) if not (name or names): raise TypeError("An Argument must have at least one name.") if names: self.names = tuple(names) elif name and not names: self.names = (name,) self.kind = kind initial_value: Optional[Any] = None # Special case: list-type args start out as empty list, not None. if kind is list: initial_value = [] # Another: incrementable args start out as their default value. if incrementable: initial_value = default self.raw_value = self._value = initial_value self.default = default self.help = help self.positional = positional self.optional = optional self.incrementable = incrementable self.attr_name = attr_name def __repr__(self) -> str: nicks = "" if self.nicknames: nicks = " ({})".format(", ".join(self.nicknames)) flags = "" if self.positional or self.optional: flags = " " if self.positional: flags += "*" if self.optional: flags += "?" # TODO: store this default value somewhere other than signature of # Argument.__init__? kind = "" if self.kind != str: kind = " [{}]".format(self.kind.__name__) return "<{}: {}{}{}{}>".format( self.__class__.__name__, self.name, nicks, kind, flags ) @property def name(self) -> Optional[str]: """ The canonical attribute-friendly name for this argument. Will be ``attr_name`` (if given to constructor) or the first name in ``names`` otherwise. .. versionadded:: 1.0 """ return self.attr_name or self.names[0] @property def nicknames(self) -> Tuple[str, ...]: return self.names[1:] @property def takes_value(self) -> bool: if self.kind is bool: return False if self.incrementable: return False return True @property def value(self) -> Any: # TODO: should probably be optional instead return self._value if self._value is not None else self.default @value.setter def value(self, arg: str) -> None: self.set_value(arg, cast=True) def set_value(self, value: Any, cast: bool = True) -> None: """ Actual explicit value-setting API call. Sets ``self.raw_value`` to ``value`` directly. Sets ``self.value`` to ``self.kind(value)``, unless: - ``cast=False``, in which case the raw value is also used. - ``self.kind==list``, in which case the value is appended to ``self.value`` instead of cast & overwritten. - ``self.incrementable==True``, in which case the value is ignored and the current (assumed int) value is simply incremented. .. versionadded:: 1.0 """ self.raw_value = value # Default to do-nothing/identity function func = lambda x: x # If cast, set to self.kind, which should be str/int/etc if cast: func = self.kind # If self.kind is a list, append instead of using cast func. if self.kind is list: func = lambda x: self.value + [x] # If incrementable, just increment. if self.incrementable: # TODO: explode nicely if self.value was not an int to start # with func = lambda x: self.value + 1 self._value = func(value) @property def got_value(self) -> bool: """ Returns whether the argument was ever given a (non-default) value. For most argument kinds, this simply checks whether the internally stored value is non-``None``; for others, such as ``list`` kinds, different checks may be used. .. versionadded:: 1.3 """ if self.kind is list: return bool(self._value) return self._value is not None invoke-2.2.0/invoke/parser/context.py000066400000000000000000000231271445356551000176500ustar00rootroot00000000000000import itertools from typing import Any, Dict, List, Iterable, Optional, Tuple, Union try: from ..vendor.lexicon import Lexicon except ImportError: from lexicon import Lexicon # type: ignore[no-redef] from .argument import Argument def translate_underscores(name: str) -> str: return name.lstrip("_").rstrip("_").replace("_", "-") def to_flag(name: str) -> str: name = translate_underscores(name) if len(name) == 1: return "-" + name return "--" + name def sort_candidate(arg: Argument) -> str: names = arg.names # TODO: is there no "split into two buckets on predicate" builtin? shorts = {x for x in names if len(x.strip("-")) == 1} longs = {x for x in names if x not in shorts} return str(sorted(shorts if shorts else longs)[0]) def flag_key(arg: Argument) -> List[Union[int, str]]: """ Obtain useful key list-of-ints for sorting CLI flags. .. versionadded:: 1.0 """ # Setup ret: List[Union[int, str]] = [] x = sort_candidate(arg) # Long-style flags win over short-style ones, so the first item of # comparison is simply whether the flag is a single character long (with # non-length-1 flags coming "first" [lower number]) ret.append(1 if len(x) == 1 else 0) # Next item of comparison is simply the strings themselves, # case-insensitive. They will compare alphabetically if compared at this # stage. ret.append(x.lower()) # Finally, if the case-insensitive test also matched, compare # case-sensitive, but inverse (with lowercase letters coming first) inversed = "" for char in x: inversed += char.lower() if char.isupper() else char.upper() ret.append(inversed) return ret # Named slightly more verbose so Sphinx references can be unambiguous. # Got real sick of fully qualified paths. class ParserContext: """ Parsing context with knowledge of flags & their format. Generally associated with the core program or a task. When run through a parser, will also hold runtime values filled in by the parser. .. versionadded:: 1.0 """ def __init__( self, name: Optional[str] = None, aliases: Iterable[str] = (), args: Iterable[Argument] = (), ) -> None: """ Create a new ``ParserContext`` named ``name``, with ``aliases``. ``name`` is optional, and should be a string if given. It's used to tell ParserContext objects apart, and for use in a Parser when determining what chunk of input might belong to a given ParserContext. ``aliases`` is also optional and should be an iterable containing strings. Parsing will honor any aliases when trying to "find" a given context in its input. May give one or more ``args``, which is a quick alternative to calling ``for arg in args: self.add_arg(arg)`` after initialization. """ self.args = Lexicon() self.positional_args: List[Argument] = [] self.flags = Lexicon() self.inverse_flags: Dict[str, str] = {} # No need for Lexicon here self.name = name self.aliases = aliases for arg in args: self.add_arg(arg) def __repr__(self) -> str: aliases = "" if self.aliases: aliases = " ({})".format(", ".join(self.aliases)) name = (" {!r}{}".format(self.name, aliases)) if self.name else "" args = (": {!r}".format(self.args)) if self.args else "" return "".format(name, args) def add_arg(self, *args: Any, **kwargs: Any) -> None: """ Adds given ``Argument`` (or constructor args for one) to this context. The Argument in question is added to the following dict attributes: * ``args``: "normal" access, i.e. the given names are directly exposed as keys. * ``flags``: "flaglike" access, i.e. the given names are translated into CLI flags, e.g. ``"foo"`` is accessible via ``flags['--foo']``. * ``inverse_flags``: similar to ``flags`` but containing only the "inverse" versions of boolean flags which default to True. This allows the parser to track e.g. ``--no-myflag`` and turn it into a False value for the ``myflag`` Argument. .. versionadded:: 1.0 """ # Normalize if len(args) == 1 and isinstance(args[0], Argument): arg = args[0] else: arg = Argument(*args, **kwargs) # Uniqueness constraint: no name collisions for name in arg.names: if name in self.args: msg = "Tried to add an argument named {!r} but one already exists!" # noqa raise ValueError(msg.format(name)) # First name used as "main" name for purposes of aliasing main = arg.names[0] # NOT arg.name self.args[main] = arg # Note positionals in distinct, ordered list attribute if arg.positional: self.positional_args.append(arg) # Add names & nicknames to flags, args self.flags[to_flag(main)] = arg for name in arg.nicknames: self.args.alias(name, to=main) self.flags.alias(to_flag(name), to=to_flag(main)) # Add attr_name to args, but not flags if arg.attr_name: self.args.alias(arg.attr_name, to=main) # Add to inverse_flags if required if arg.kind == bool and arg.default is True: # Invert the 'main' flag name here, which will be a dashed version # of the primary argument name if underscore-to-dash transformation # occurred. inverse_name = to_flag("no-{}".format(main)) self.inverse_flags[inverse_name] = to_flag(main) @property def missing_positional_args(self) -> List[Argument]: return [x for x in self.positional_args if x.value is None] @property def as_kwargs(self) -> Dict[str, Any]: """ This context's arguments' values keyed by their ``.name`` attribute. Results in a dict suitable for use in Python contexts, where e.g. an arg named ``foo-bar`` becomes accessible as ``foo_bar``. .. versionadded:: 1.0 """ ret = {} for arg in self.args.values(): ret[arg.name] = arg.value return ret def names_for(self, flag: str) -> List[str]: # TODO: should probably be a method on Lexicon/AliasDict return list(set([flag] + self.flags.aliases_of(flag))) def help_for(self, flag: str) -> Tuple[str, str]: """ Return 2-tuple of ``(flag-spec, help-string)`` for given ``flag``. .. versionadded:: 1.0 """ # Obtain arg obj if flag not in self.flags: err = "{!r} is not a valid flag for this context! Valid flags are: {!r}" # noqa raise ValueError(err.format(flag, self.flags.keys())) arg = self.flags[flag] # Determine expected value type, if any value = {str: "STRING", int: "INT"}.get(arg.kind) # Format & go full_names = [] for name in self.names_for(flag): if value: # Short flags are -f VAL, long are --foo=VAL # When optional, also, -f [VAL] and --foo[=VAL] if len(name.strip("-")) == 1: value_ = ("[{}]".format(value)) if arg.optional else value valuestr = " {}".format(value_) else: valuestr = "={}".format(value) if arg.optional: valuestr = "[{}]".format(valuestr) else: # no value => boolean # check for inverse if name in self.inverse_flags.values(): name = "--[no-]{}".format(name[2:]) valuestr = "" # Tack together full_names.append(name + valuestr) namestr = ", ".join(sorted(full_names, key=len)) helpstr = arg.help or "" return namestr, helpstr def help_tuples(self) -> List[Tuple[str, Optional[str]]]: """ Return sorted iterable of help tuples for all member Arguments. Sorts like so: * General sort is alphanumerically * Short flags win over long flags * Arguments with *only* long flags and *no* short flags will come first. * When an Argument has multiple long or short flags, it will sort using the most favorable (lowest alphabetically) candidate. This will result in a help list like so:: --alpha, --zeta # 'alpha' wins --beta -a, --query # short flag wins -b, --argh -c .. versionadded:: 1.0 """ # TODO: argument/flag API must change :( # having to call to_flag on 1st name of an Argument is just dumb. # To pass in an Argument object to help_for may require moderate # changes? return list( map( lambda x: self.help_for(to_flag(x.name)), sorted(self.flags.values(), key=flag_key), ) ) def flag_names(self) -> Tuple[str, ...]: """ Similar to `help_tuples` but returns flag names only, no helpstrs. Specifically, all flag names, flattened, in rough order. .. versionadded:: 1.0 """ # Regular flag names flags = sorted(self.flags.values(), key=flag_key) names = [self.names_for(to_flag(x.name)) for x in flags] # Inverse flag names sold separately names.append(list(self.inverse_flags.keys())) return tuple(itertools.chain.from_iterable(names)) invoke-2.2.0/invoke/parser/parser.py000066400000000000000000000465411445356551000174650ustar00rootroot00000000000000import copy from typing import TYPE_CHECKING, Any, Iterable, List, Optional try: from ..vendor.lexicon import Lexicon from ..vendor.fluidity import StateMachine, state, transition except ImportError: from lexicon import Lexicon # type: ignore[no-redef] from fluidity import ( # type: ignore[no-redef] StateMachine, state, transition, ) from ..exceptions import ParseError from ..util import debug if TYPE_CHECKING: from .context import ParserContext def is_flag(value: str) -> bool: return value.startswith("-") def is_long_flag(value: str) -> bool: return value.startswith("--") class ParseResult(List["ParserContext"]): """ List-like object with some extra parse-related attributes. Specifically, a ``.remainder`` attribute, which is the string found after a ``--`` in any parsed argv list; and an ``.unparsed`` attribute, a list of tokens that were unable to be parsed. .. versionadded:: 1.0 """ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.remainder = "" self.unparsed: List[str] = [] class Parser: """ Create parser conscious of ``contexts`` and optional ``initial`` context. ``contexts`` should be an iterable of ``Context`` instances which will be searched when new context names are encountered during a parse. These Contexts determine what flags may follow them, as well as whether given flags take values. ``initial`` is optional and will be used to determine validity of "core" options/flags at the start of the parse run, if any are encountered. ``ignore_unknown`` determines what to do when contexts are found which do not map to any members of ``contexts``. By default it is ``False``, meaning any unknown contexts result in a parse error exception. If ``True``, encountering an unknown context halts parsing and populates the return value's ``.unparsed`` attribute with the remaining parse tokens. .. versionadded:: 1.0 """ def __init__( self, contexts: Iterable["ParserContext"] = (), initial: Optional["ParserContext"] = None, ignore_unknown: bool = False, ) -> None: self.initial = initial self.contexts = Lexicon() self.ignore_unknown = ignore_unknown for context in contexts: debug("Adding {}".format(context)) if not context.name: raise ValueError("Non-initial contexts must have names.") exists = "A context named/aliased {!r} is already in this parser!" if context.name in self.contexts: raise ValueError(exists.format(context.name)) self.contexts[context.name] = context for alias in context.aliases: if alias in self.contexts: raise ValueError(exists.format(alias)) self.contexts.alias(alias, to=context.name) def parse_argv(self, argv: List[str]) -> ParseResult: """ Parse an argv-style token list ``argv``. Returns a list (actually a subclass, `.ParseResult`) of `.ParserContext` objects matching the order they were found in the ``argv`` and containing `.Argument` objects with updated values based on any flags given. Assumes any program name has already been stripped out. Good:: Parser(...).parse_argv(['--core-opt', 'task', '--task-opt']) Bad:: Parser(...).parse_argv(['invoke', '--core-opt', ...]) :param argv: List of argument string tokens. :returns: A `.ParseResult` (a ``list`` subclass containing some number of `.ParserContext` objects). .. versionadded:: 1.0 """ machine = ParseMachine( # FIXME: initial should not be none initial=self.initial, # type: ignore[arg-type] contexts=self.contexts, ignore_unknown=self.ignore_unknown, ) # FIXME: Why isn't there str.partition for lists? There must be a # better way to do this. Split argv around the double-dash remainder # sentinel. debug("Starting argv: {!r}".format(argv)) try: ddash = argv.index("--") except ValueError: ddash = len(argv) # No remainder == body gets all body = argv[:ddash] remainder = argv[ddash:][1:] # [1:] to strip off remainder itself if remainder: debug( "Remainder: argv[{!r}:][1:] => {!r}".format(ddash, remainder) ) for index, token in enumerate(body): # Handle non-space-delimited forms, if not currently expecting a # flag value and still in valid parsing territory (i.e. not in # "unknown" state which implies store-only) # NOTE: we do this in a few steps so we can # split-then-check-validity; necessary for things like when the # previously seen flag optionally takes a value. mutations = [] orig = token if is_flag(token) and not machine.result.unparsed: # Equals-sign-delimited flags, eg --foo=bar or -f=bar if "=" in token: token, _, value = token.partition("=") msg = "Splitting x=y expr {!r} into tokens {!r} and {!r}" debug(msg.format(orig, token, value)) mutations.append((index + 1, value)) # Contiguous boolean short flags, e.g. -qv elif not is_long_flag(token) and len(token) > 2: full_token = token[:] rest, token = token[2:], token[:2] err = "Splitting {!r} into token {!r} and rest {!r}" debug(err.format(full_token, token, rest)) # Handle boolean flag block vs short-flag + value. Make # sure not to test the token as a context flag if we've # passed into 'storing unknown stuff' territory (e.g. on a # core-args pass, handling what are going to be task args) have_flag = ( token in machine.context.flags and machine.current_state != "unknown" ) if have_flag and machine.context.flags[token].takes_value: msg = "{!r} is a flag for current context & it takes a value, giving it {!r}" # noqa debug(msg.format(token, rest)) mutations.append((index + 1, rest)) else: _rest = ["-{}".format(x) for x in rest] msg = "Splitting multi-flag glob {!r} into {!r} and {!r}" # noqa debug(msg.format(orig, token, _rest)) for item in reversed(_rest): mutations.append((index + 1, item)) # Here, we've got some possible mutations queued up, and 'token' # may have been overwritten as well. Whether we apply those and # continue as-is, or roll it back, depends: # - If the parser wasn't waiting for a flag value, we're already on # the right track, so apply mutations and move along to the # handle() step. # - If we ARE waiting for a value, and the flag expecting it ALWAYS # wants a value (it's not optional), we go back to using the # original token. (TODO: could reorganize this to avoid the # sub-parsing in this case, but optimizing for human-facing # execution isn't critical.) # - Finally, if we are waiting for a value AND it's optional, we # inspect the first sub-token/mutation to see if it would otherwise # have been a valid flag, and let that determine what we do (if # valid, we apply the mutations; if invalid, we reinstate the # original token.) if machine.waiting_for_flag_value: optional = machine.flag and machine.flag.optional subtoken_is_valid_flag = token in machine.context.flags if not (optional and subtoken_is_valid_flag): token = orig mutations = [] for index, value in mutations: body.insert(index, value) machine.handle(token) machine.finish() result = machine.result result.remainder = " ".join(remainder) return result class ParseMachine(StateMachine): initial_state = "context" state("context", enter=["complete_flag", "complete_context"]) state("unknown", enter=["complete_flag", "complete_context"]) state("end", enter=["complete_flag", "complete_context"]) transition(from_=("context", "unknown"), event="finish", to="end") transition( from_="context", event="see_context", action="switch_to_context", to="context", ) transition( from_=("context", "unknown"), event="see_unknown", action="store_only", to="unknown", ) def changing_state(self, from_: str, to: str) -> None: debug("ParseMachine: {!r} => {!r}".format(from_, to)) def __init__( self, initial: "ParserContext", contexts: Lexicon, ignore_unknown: bool, ) -> None: # Initialize self.ignore_unknown = ignore_unknown self.initial = self.context = copy.deepcopy(initial) debug("Initialized with context: {!r}".format(self.context)) self.flag = None self.flag_got_value = False self.result = ParseResult() self.contexts = copy.deepcopy(contexts) debug("Available contexts: {!r}".format(self.contexts)) # In case StateMachine does anything in __init__ super().__init__() @property def waiting_for_flag_value(self) -> bool: # Do we have a current flag, and does it expect a value (vs being a # bool/toggle)? takes_value = self.flag and self.flag.takes_value if not takes_value: return False # OK, this flag is one that takes values. # Is it a list type (which has only just been switched to)? Then it'll # always accept more values. # TODO: how to handle somebody wanting it to be some other iterable # like tuple or custom class? Or do we just say unsupported? if self.flag.kind is list and not self.flag_got_value: return True # Not a list, okay. Does it already have a value? has_value = self.flag.raw_value is not None # If it doesn't have one, we're waiting for one (which tells the parser # how to proceed and typically to store the next token.) # TODO: in the negative case here, we should do something else instead: # - Except, "hey you screwed up, you already gave that flag!" # - Overwrite, "oh you changed your mind?" - which requires more work # elsewhere too, unfortunately. (Perhaps additional properties on # Argument that can be queried, e.g. "arg.is_iterable"?) return not has_value def handle(self, token: str) -> None: debug("Handling token: {!r}".format(token)) # Handle unknown state at the top: we don't care about even # possibly-valid input if we've encountered unknown input. if self.current_state == "unknown": debug("Top-of-handle() see_unknown({!r})".format(token)) self.see_unknown(token) return # Flag if self.context and token in self.context.flags: debug("Saw flag {!r}".format(token)) self.switch_to_flag(token) elif self.context and token in self.context.inverse_flags: debug("Saw inverse flag {!r}".format(token)) self.switch_to_flag(token, inverse=True) # Value for current flag elif self.waiting_for_flag_value: debug( "We're waiting for a flag value so {!r} must be it?".format( token ) ) # noqa self.see_value(token) # Positional args (must come above context-name check in case we still # need a posarg and the user legitimately wants to give it a value that # just happens to be a valid context name.) elif self.context and self.context.missing_positional_args: msg = "Context {!r} requires positional args, eating {!r}" debug(msg.format(self.context, token)) self.see_positional_arg(token) # New context elif token in self.contexts: self.see_context(token) # Initial-context flag being given as per-task flag (e.g. --help) elif self.initial and token in self.initial.flags: debug("Saw (initial-context) flag {!r}".format(token)) flag = self.initial.flags[token] # Special-case for core --help flag: context name is used as value. if flag.name == "help": flag.value = self.context.name msg = "Saw --help in a per-task context, setting task name ({!r}) as its value" # noqa debug(msg.format(flag.value)) # All others: just enter the 'switch to flag' parser state else: # TODO: handle inverse core flags too? There are none at the # moment (e.g. --no-dedupe is actually 'no_dedupe', not a # default-False 'dedupe') and it's up to us whether we actually # put any in place. self.switch_to_flag(token) # Unknown else: if not self.ignore_unknown: debug("Can't find context named {!r}, erroring".format(token)) self.error("No idea what {!r} is!".format(token)) else: debug("Bottom-of-handle() see_unknown({!r})".format(token)) self.see_unknown(token) def store_only(self, token: str) -> None: # Start off the unparsed list debug("Storing unknown token {!r}".format(token)) self.result.unparsed.append(token) def complete_context(self) -> None: debug( "Wrapping up context {!r}".format( self.context.name if self.context else self.context ) ) # Ensure all of context's positional args have been given. if self.context and self.context.missing_positional_args: err = "'{}' did not receive required positional arguments: {}" names = ", ".join( "'{}'".format(x.name) for x in self.context.missing_positional_args ) self.error(err.format(self.context.name, names)) if self.context and self.context not in self.result: self.result.append(self.context) def switch_to_context(self, name: str) -> None: self.context = copy.deepcopy(self.contexts[name]) debug("Moving to context {!r}".format(name)) debug("Context args: {!r}".format(self.context.args)) debug("Context flags: {!r}".format(self.context.flags)) debug("Context inverse_flags: {!r}".format(self.context.inverse_flags)) def complete_flag(self) -> None: if self.flag: msg = "Completing current flag {} before moving on" debug(msg.format(self.flag)) # Barf if we needed a value and didn't get one if ( self.flag and self.flag.takes_value and self.flag.raw_value is None and not self.flag.optional ): err = "Flag {!r} needed value and was not given one!" self.error(err.format(self.flag)) # Handle optional-value flags; at this point they were not given an # explicit value, but they were seen, ergo they should get treated like # bools. if self.flag and self.flag.raw_value is None and self.flag.optional: msg = "Saw optional flag {!r} go by w/ no value; setting to True" debug(msg.format(self.flag.name)) # Skip casting so the bool gets preserved self.flag.set_value(True, cast=False) def check_ambiguity(self, value: Any) -> bool: """ Guard against ambiguity when current flag takes an optional value. .. versionadded:: 1.0 """ # No flag is currently being examined, or one is but it doesn't take an # optional value? Ambiguity isn't possible. if not (self.flag and self.flag.optional): return False # We *are* dealing with an optional-value flag, but it's already # received a value? There can't be ambiguity here either. if self.flag.raw_value is not None: return False # Otherwise, there *may* be ambiguity if 1 or more of the below tests # fail. tests = [] # Unfilled posargs still exist? tests.append(self.context and self.context.missing_positional_args) # Value matches another valid task/context name? tests.append(value in self.contexts) if any(tests): msg = "{!r} is ambiguous when given after an optional-value flag" raise ParseError(msg.format(value)) def switch_to_flag(self, flag: str, inverse: bool = False) -> None: # Sanity check for ambiguity w/ prior optional-value flag self.check_ambiguity(flag) # Also tie it off, in case prior had optional value or etc. Seems to be # harmless for other kinds of flags. (TODO: this is a serious indicator # that we need to move some of this flag-by-flag bookkeeping into the # state machine bits, if possible - as-is it was REAL confusing re: why # this was manually required!) self.complete_flag() # Set flag/arg obj flag = self.context.inverse_flags[flag] if inverse else flag # Update state try: self.flag = self.context.flags[flag] except KeyError as e: # Try fallback to initial/core flag try: self.flag = self.initial.flags[flag] except KeyError: # If it wasn't in either, raise the original context's # exception, as that's more useful / correct. raise e debug("Moving to flag {!r}".format(self.flag)) # Bookkeeping for iterable-type flags (where the typical 'value # non-empty/nondefault -> clearly it got its value already' test is # insufficient) self.flag_got_value = False # Handle boolean flags (which can immediately be updated) if self.flag and not self.flag.takes_value: val = not inverse debug("Marking seen flag {!r} as {}".format(self.flag, val)) self.flag.value = val def see_value(self, value: Any) -> None: self.check_ambiguity(value) if self.flag and self.flag.takes_value: debug("Setting flag {!r} to value {!r}".format(self.flag, value)) self.flag.value = value self.flag_got_value = True else: self.error("Flag {!r} doesn't take any value!".format(self.flag)) def see_positional_arg(self, value: Any) -> None: for arg in self.context.positional_args: if arg.value is None: arg.value = value break def error(self, msg: str) -> None: raise ParseError(msg, self.context) invoke-2.2.0/invoke/program.py000066400000000000000000001124411445356551000163350ustar00rootroot00000000000000import getpass import inspect import json import os import sys import textwrap from importlib import import_module # buffalo buffalo from typing import ( TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Type, ) from . import Collection, Config, Executor, FilesystemLoader from .completion.complete import complete, print_completion_script from .parser import Parser, ParserContext, Argument from .exceptions import UnexpectedExit, CollectionNotFound, ParseError, Exit from .terminals import pty_size from .util import debug, enable_logging, helpline if TYPE_CHECKING: from .loader import Loader from .parser import ParseResult from .util import Lexicon class Program: """ Manages top-level CLI invocation, typically via ``setup.py`` entrypoints. Designed for distributing Invoke task collections as standalone programs, but also used internally to implement the ``invoke`` program itself. .. seealso:: :ref:`reusing-as-a-binary` for a tutorial/walkthrough of this functionality. .. versionadded:: 1.0 """ core: "ParseResult" def core_args(self) -> List["Argument"]: """ Return default core `.Argument` objects, as a list. .. versionadded:: 1.0 """ # Arguments present always, even when wrapped as a different binary return [ Argument( names=("command-timeout", "T"), kind=int, help="Specify a global command execution timeout, in seconds.", ), Argument( names=("complete",), kind=bool, default=False, help="Print tab-completion candidates for given parse remainder.", # noqa ), Argument( names=("config", "f"), help="Runtime configuration file to use.", ), Argument( names=("debug", "d"), kind=bool, default=False, help="Enable debug output.", ), Argument( names=("dry", "R"), kind=bool, default=False, help="Echo commands instead of running.", ), Argument( names=("echo", "e"), kind=bool, default=False, help="Echo executed commands before running.", ), Argument( names=("help", "h"), optional=True, help="Show core or per-task help and exit.", ), Argument( names=("hide",), help="Set default value of run()'s 'hide' kwarg.", ), Argument( names=("list", "l"), optional=True, help="List available tasks, optionally limited to a namespace.", # noqa ), Argument( names=("list-depth", "D"), kind=int, default=0, help="When listing tasks, only show the first INT levels.", ), Argument( names=("list-format", "F"), help="Change the display format used when listing tasks. Should be one of: flat (default), nested, json.", # noqa default="flat", ), Argument( names=("print-completion-script",), kind=str, default="", help="Print the tab-completion script for your preferred shell (bash|zsh|fish).", # noqa ), Argument( names=("prompt-for-sudo-password",), kind=bool, default=False, help="Prompt user at start of session for the sudo.password config value.", # noqa ), Argument( names=("pty", "p"), kind=bool, default=False, help="Use a pty when executing shell commands.", ), Argument( names=("version", "V"), kind=bool, default=False, help="Show version and exit.", ), Argument( names=("warn-only", "w"), kind=bool, default=False, help="Warn, instead of failing, when shell commands fail.", ), Argument( names=("write-pyc",), kind=bool, default=False, help="Enable creation of .pyc files.", ), ] def task_args(self) -> List["Argument"]: """ Return default task-related `.Argument` objects, as a list. These are only added to the core args in "task runner" mode (the default for ``invoke`` itself) - they are omitted when the constructor is given a non-empty ``namespace`` argument ("bundled namespace" mode). .. versionadded:: 1.0 """ # Arguments pertaining specifically to invocation as 'invoke' itself # (or as other arbitrary-task-executing programs, like 'fab') return [ Argument( names=("collection", "c"), help="Specify collection name to load.", ), Argument( names=("no-dedupe",), kind=bool, default=False, help="Disable task deduplication.", ), Argument( names=("search-root", "r"), help="Change root directory used for finding task modules.", ), ] argv: List[str] # Other class-level global variables a subclass might override sometime # maybe? leading_indent_width = 2 leading_indent = " " * leading_indent_width indent_width = 4 indent = " " * indent_width col_padding = 3 def __init__( self, version: Optional[str] = None, namespace: Optional["Collection"] = None, name: Optional[str] = None, binary: Optional[str] = None, loader_class: Optional[Type["Loader"]] = None, executor_class: Optional[Type["Executor"]] = None, config_class: Optional[Type["Config"]] = None, binary_names: Optional[List[str]] = None, ) -> None: """ Create a new, parameterized `.Program` instance. :param str version: The program's version, e.g. ``"0.1.0"``. Defaults to ``"unknown"``. :param namespace: A `.Collection` to use as this program's subcommands. If ``None`` (the default), the program will behave like ``invoke``, seeking a nearby task namespace with a `.Loader` and exposing arguments such as :option:`--list` and :option:`--collection` for inspecting or selecting specific namespaces. If given a `.Collection` object, will use it as if it had been handed to :option:`--collection`. Will also update the parser to remove references to tasks and task-related options, and display the subcommands in ``--help`` output. The result will be a program that has a static set of subcommands. :param str name: The program's name, as displayed in ``--version`` output. If ``None`` (default), is a capitalized version of the first word in the ``argv`` handed to `.run`. For example, when invoked from a binstub installed as ``foobar``, it will default to ``Foobar``. :param str binary: Descriptive lowercase binary name string used in help text. For example, Invoke's own internal value for this is ``inv[oke]``, denoting that it is installed as both ``inv`` and ``invoke``. As this is purely text intended for help display, it may be in any format you wish, though it should match whatever you've put into your ``setup.py``'s ``console_scripts`` entry. If ``None`` (default), uses the first word in ``argv`` verbatim (as with ``name`` above, except not capitalized). :param binary_names: List of binary name strings, for use in completion scripts. This list ensures that the shell completion scripts generated by :option:`--print-completion-script` instruct the shell to use that completion for all of this program's installed names. For example, Invoke's internal default for this is ``["inv", "invoke"]``. If ``None`` (the default), the first word in ``argv`` (in the invocation of :option:`--print-completion-script`) is used in a single-item list. :param loader_class: The `.Loader` subclass to use when loading task collections. Defaults to `.FilesystemLoader`. :param executor_class: The `.Executor` subclass to use when executing tasks. Defaults to `.Executor`; may also be overridden at runtime by the :ref:`configuration system ` and its ``tasks.executor_class`` setting (anytime that setting is not ``None``). :param config_class: The `.Config` subclass to use for the base config object. Defaults to `.Config`. .. versionchanged:: 1.2 Added the ``binary_names`` argument. """ self.version = "unknown" if version is None else version self.namespace = namespace self._name = name # TODO 3.0: rename binary to binary_help_name or similar. (Or write # code to autogenerate it from binary_names.) self._binary = binary self._binary_names = binary_names self.argv = [] self.loader_class = loader_class or FilesystemLoader self.executor_class = executor_class or Executor self.config_class = config_class or Config def create_config(self) -> None: """ Instantiate a `.Config` (or subclass, depending) for use in task exec. This Config is fully usable but will lack runtime-derived data like project & runtime config files, CLI arg overrides, etc. That data is added later in `update_config`. See `.Config` docstring for lifecycle details. :returns: ``None``; sets ``self.config`` instead. .. versionadded:: 1.0 """ self.config = self.config_class() def update_config(self, merge: bool = True) -> None: """ Update the previously instantiated `.Config` with parsed data. For example, this is how ``--echo`` is able to override the default config value for ``run.echo``. :param bool merge: Whether to merge at the end, or defer. Primarily useful for subclassers. Default: ``True``. .. versionadded:: 1.0 """ # Now that we have parse results handy, we can grab the remaining # config bits: # - runtime config, as it is dependent on the runtime flag/env var # - the overrides config level, as it is composed of runtime flag data # NOTE: only fill in values that would alter behavior, otherwise we # want the defaults to come through. run = {} if self.args["warn-only"].value: run["warn"] = True if self.args.pty.value: run["pty"] = True if self.args.hide.value: run["hide"] = self.args.hide.value if self.args.echo.value: run["echo"] = True if self.args.dry.value: run["dry"] = True tasks = {} if "no-dedupe" in self.args and self.args["no-dedupe"].value: tasks["dedupe"] = False timeouts = {} command = self.args["command-timeout"].value if command: timeouts["command"] = command # Handle "fill in config values at start of runtime", which for now is # just sudo password sudo = {} if self.args["prompt-for-sudo-password"].value: prompt = "Desired 'sudo.password' config value: " sudo["password"] = getpass.getpass(prompt) overrides = dict(run=run, tasks=tasks, sudo=sudo, timeouts=timeouts) self.config.load_overrides(overrides, merge=False) runtime_path = self.args.config.value if runtime_path is None: runtime_path = os.environ.get("INVOKE_RUNTIME_CONFIG", None) self.config.set_runtime_path(runtime_path) self.config.load_runtime(merge=False) if merge: self.config.merge() def run(self, argv: Optional[List[str]] = None, exit: bool = True) -> None: """ Execute main CLI logic, based on ``argv``. :param argv: The arguments to execute against. May be ``None``, a list of strings, or a string. See `.normalize_argv` for details. :param bool exit: When ``False`` (default: ``True``), will ignore `.ParseError`, `.Exit` and `.Failure` exceptions, which otherwise trigger calls to `sys.exit`. .. note:: This is mostly a concession to testing. If you're setting this to ``False`` in a production setting, you should probably be using `.Executor` and friends directly instead! .. versionadded:: 1.0 """ try: # Create an initial config, which will hold defaults & values from # most config file locations (all but runtime.) Used to inform # loading & parsing behavior. self.create_config() # Parse the given ARGV with our CLI parsing machinery, resulting in # things like self.args (core args/flags), self.collection (the # loaded namespace, which may be affected by the core flags) and # self.tasks (the tasks requested for exec and their own # args/flags) self.parse_core(argv) # Handle collection concerns including project config self.parse_collection() # Parse remainder of argv as task-related input self.parse_tasks() # End of parsing (typically bailout stuff like --list, --help) self.parse_cleanup() # Update the earlier Config with new values from the parse step - # runtime config file contents and flag-derived overrides (e.g. for # run()'s echo, warn, etc options.) self.update_config() # Create an Executor, passing in the data resulting from the prior # steps, then tell it to execute the tasks. self.execute() except (UnexpectedExit, Exit, ParseError) as e: debug("Received a possibly-skippable exception: {!r}".format(e)) # Print error messages from parser, runner, etc if necessary; # prevents messy traceback but still clues interactive user into # problems. if isinstance(e, ParseError): print(e, file=sys.stderr) if isinstance(e, Exit) and e.message: print(e.message, file=sys.stderr) if isinstance(e, UnexpectedExit) and e.result.hide: print(e, file=sys.stderr, end="") # Terminate execution unless we were told not to. if exit: if isinstance(e, UnexpectedExit): code = e.result.exited elif isinstance(e, Exit): code = e.code elif isinstance(e, ParseError): code = 1 sys.exit(code) else: debug("Invoked as run(..., exit=False), ignoring exception") except KeyboardInterrupt: sys.exit(1) # Same behavior as Python itself outside of REPL def parse_core(self, argv: Optional[List[str]]) -> None: debug("argv given to Program.run: {!r}".format(argv)) self.normalize_argv(argv) # Obtain core args (sets self.core) self.parse_core_args() debug("Finished parsing core args") # Set interpreter bytecode-writing flag sys.dont_write_bytecode = not self.args["write-pyc"].value # Enable debugging from here on out, if debug flag was given. # (Prior to this point, debugging requires setting INVOKE_DEBUG). if self.args.debug.value: enable_logging() # Short-circuit if --version if self.args.version.value: debug("Saw --version, printing version & exiting") self.print_version() raise Exit # Print (dynamic, no tasks required) completion script if requested if self.args["print-completion-script"].value: print_completion_script( shell=self.args["print-completion-script"].value, names=self.binary_names, ) raise Exit def parse_collection(self) -> None: """ Load a tasks collection & project-level config. .. versionadded:: 1.0 """ # Load a collection of tasks unless one was already set. if self.namespace is not None: debug( "Program was given default namespace, not loading collection" ) self.collection = self.namespace else: debug( "No default namespace provided, trying to load one from disk" ) # noqa # If no bundled namespace & --help was given, just print it and # exit. (If we did have a bundled namespace, core --help will be # handled *after* the collection is loaded & parsing is done.) if self.args.help.value is True: debug( "No bundled namespace & bare --help given; printing help." ) self.print_help() raise Exit self.load_collection() # Set these up for potential use later when listing tasks # TODO: be nice if these came from the config...! Users would love to # say they default to nested for example. Easy 2.x feature-add. self.list_root: Optional[str] = None self.list_depth: Optional[int] = None self.list_format = "flat" self.scoped_collection = self.collection # TODO: load project conf, if possible, gracefully def parse_cleanup(self) -> None: """ Post-parsing, pre-execution steps such as --help, --list, etc. .. versionadded:: 1.0 """ halp = self.args.help.value # Core (no value given) --help output (only when bundled namespace) if halp is True: debug("Saw bare --help, printing help & exiting") self.print_help() raise Exit # Print per-task help, if necessary if halp: if halp in self.parser.contexts: msg = "Saw --help , printing per-task help & exiting" debug(msg) self.print_task_help(halp) raise Exit else: # TODO: feels real dumb to factor this out of Parser, but...we # should? raise ParseError("No idea what '{}' is!".format(halp)) # Print discovered tasks if necessary list_root = self.args.list.value # will be True or string self.list_format = self.args["list-format"].value self.list_depth = self.args["list-depth"].value if list_root: # Not just --list, but --list some-root - do moar work if isinstance(list_root, str): self.list_root = list_root try: sub = self.collection.subcollection_from_path(list_root) self.scoped_collection = sub except KeyError: msg = "Sub-collection '{}' not found!" raise Exit(msg.format(list_root)) self.list_tasks() raise Exit # Print completion helpers if necessary if self.args.complete.value: complete( names=self.binary_names, core=self.core, initial_context=self.initial_context, collection=self.collection, # NOTE: can't reuse self.parser as it has likely been mutated # between when it was set and now. parser=self._make_parser(), ) # Fallback behavior if no tasks were given & no default specified # (mostly a subroutine for overriding purposes) # NOTE: when there is a default task, Executor will select it when no # tasks were found in CLI parsing. if not self.tasks and not self.collection.default: self.no_tasks_given() def no_tasks_given(self) -> None: debug( "No tasks specified for execution and no default task; printing global help as fallback" # noqa ) self.print_help() raise Exit def execute(self) -> None: """ Hand off data & tasks-to-execute specification to an `.Executor`. .. note:: Client code just wanting a different `.Executor` subclass can just set ``executor_class`` in `.__init__`, or override ``tasks.executor_class`` anywhere in the :ref:`config system ` (which may allow you to avoid using a custom Program entirely). .. versionadded:: 1.0 """ klass = self.executor_class config_path = self.config.tasks.executor_class if config_path is not None: # TODO: why the heck is this not builtin to importlib? module_path, _, class_name = config_path.rpartition(".") # TODO: worth trying to wrap both of these and raising ImportError # for cases where module exists but class name does not? More # "normal" but also its own possible source of bugs/confusion... module = import_module(module_path) klass = getattr(module, class_name) executor = klass(self.collection, self.config, self.core) executor.execute(*self.tasks) def normalize_argv(self, argv: Optional[List[str]]) -> None: """ Massages ``argv`` into a useful list of strings. **If None** (the default), uses `sys.argv`. **If a non-string iterable**, uses that in place of `sys.argv`. **If a string**, performs a `str.split` and then executes with the result. (This is mostly a convenience; when in doubt, use a list.) Sets ``self.argv`` to the result. .. versionadded:: 1.0 """ if argv is None: argv = sys.argv debug("argv was None; using sys.argv: {!r}".format(argv)) elif isinstance(argv, str): argv = argv.split() debug("argv was string-like; splitting: {!r}".format(argv)) self.argv = argv @property def name(self) -> str: """ Derive program's human-readable name based on `.binary`. .. versionadded:: 1.0 """ return self._name or self.binary.capitalize() @property def called_as(self) -> str: """ Returns the program name we were actually called as. Specifically, this is the (Python's os module's concept of a) basename of the first argument in the parsed argument vector. .. versionadded:: 1.2 """ # XXX: defaults to empty string if 'argv' is '[]' or 'None' return os.path.basename(self.argv[0]) if self.argv else "" @property def binary(self) -> str: """ Derive program's help-oriented binary name(s) from init args & argv. .. versionadded:: 1.0 """ return self._binary or self.called_as @property def binary_names(self) -> List[str]: """ Derive program's completion-oriented binary name(s) from args & argv. .. versionadded:: 1.2 """ return self._binary_names or [self.called_as] # TODO 3.0: ugh rename this or core_args, they are too confusing @property def args(self) -> "Lexicon": """ Obtain core program args from ``self.core`` parse result. .. versionadded:: 1.0 """ return self.core[0].args @property def initial_context(self) -> ParserContext: """ The initial parser context, aka core program flags. The specific arguments contained therein will differ depending on whether a bundled namespace was specified in `.__init__`. .. versionadded:: 1.0 """ args = self.core_args() if self.namespace is None: args += self.task_args() return ParserContext(args=args) def print_version(self) -> None: print("{} {}".format(self.name, self.version or "unknown")) def print_help(self) -> None: usage_suffix = "task1 [--task1-opts] ... taskN [--taskN-opts]" if self.namespace is not None: usage_suffix = " [--subcommand-opts] ..." print("Usage: {} [--core-opts] {}".format(self.binary, usage_suffix)) print("") print("Core options:") print("") self.print_columns(self.initial_context.help_tuples()) if self.namespace is not None: self.list_tasks() def parse_core_args(self) -> None: """ Filter out core args, leaving any tasks or their args for later. Sets ``self.core`` to the `.ParseResult` from this step. .. versionadded:: 1.0 """ debug("Parsing initial context (core args)") parser = Parser(initial=self.initial_context, ignore_unknown=True) self.core = parser.parse_argv(self.argv[1:]) msg = "Core-args parse result: {!r} & unparsed: {!r}" debug(msg.format(self.core, self.core.unparsed)) def load_collection(self) -> None: """ Load a task collection based on parsed core args, or die trying. .. versionadded:: 1.0 """ # NOTE: start, coll_name both fall back to configuration values within # Loader (which may, however, get them from our config.) start = self.args["search-root"].value loader = self.loader_class( # type: ignore config=self.config, start=start ) coll_name = self.args.collection.value try: module, parent = loader.load(coll_name) # This is the earliest we can load project config, so we should - # allows project config to affect the task parsing step! # TODO: is it worth merging these set- and load- methods? May # require more tweaking of how things behave in/after __init__. self.config.set_project_location(parent) self.config.load_project() self.collection = Collection.from_module( module, loaded_from=parent, auto_dash_names=self.config.tasks.auto_dash_names, ) except CollectionNotFound as e: raise Exit("Can't find any collection named {!r}!".format(e.name)) def _update_core_context( self, context: ParserContext, new_args: Dict[str, Any] ) -> None: # Update core context w/ core_via_task args, if and only if the # via-task version of the arg was truly given a value. # TODO: push this into an Argument-aware Lexicon subclass and # .update()? for key, arg in new_args.items(): if arg.got_value: context.args[key]._value = arg._value def _make_parser(self) -> Parser: return Parser( initial=self.initial_context, contexts=self.collection.to_contexts( ignore_unknown_help=self.config.tasks.ignore_unknown_help ), ) def parse_tasks(self) -> None: """ Parse leftover args, which are typically tasks & per-task args. Sets ``self.parser`` to the parser used, ``self.tasks`` to the parsed per-task contexts, and ``self.core_via_tasks`` to a context holding any core flags seen within the task contexts. Also modifies ``self.core`` to include the data from ``core_via_tasks`` (so that it correctly reflects any supplied core flags regardless of where they appeared). .. versionadded:: 1.0 """ self.parser = self._make_parser() debug("Parsing tasks against {!r}".format(self.collection)) result = self.parser.parse_argv(self.core.unparsed) self.core_via_tasks = result.pop(0) self._update_core_context( context=self.core[0], new_args=self.core_via_tasks.args ) self.tasks = result debug("Resulting task contexts: {!r}".format(self.tasks)) def print_task_help(self, name: str) -> None: """ Print help for a specific task, e.g. ``inv --help ``. .. versionadded:: 1.0 """ # Setup ctx = self.parser.contexts[name] tuples = ctx.help_tuples() docstring = inspect.getdoc(self.collection[name]) header = "Usage: {} [--core-opts] {} {}[other tasks here ...]" opts = "[--options] " if tuples else "" print(header.format(self.binary, name, opts)) print("") print("Docstring:") if docstring: # Really wish textwrap worked better for this. for line in docstring.splitlines(): if line.strip(): print(self.leading_indent + line) else: print("") print("") else: print(self.leading_indent + "none") print("") print("Options:") if tuples: self.print_columns(tuples) else: print(self.leading_indent + "none") print("") def list_tasks(self) -> None: # Short circuit if no tasks to show (Collection now implements bool) focus = self.scoped_collection if not focus: msg = "No tasks found in collection '{}'!" raise Exit(msg.format(focus.name)) # TODO: now that flat/nested are almost 100% unified, maybe rethink # this a bit? getattr(self, "list_{}".format(self.list_format))() def list_flat(self) -> None: pairs = self._make_pairs(self.scoped_collection) self.display_with_columns(pairs=pairs) def list_nested(self) -> None: pairs = self._make_pairs(self.scoped_collection) extra = "'*' denotes collection defaults" self.display_with_columns(pairs=pairs, extra=extra) def _make_pairs( self, coll: "Collection", ancestors: Optional[List[str]] = None, ) -> List[Tuple[str, Optional[str]]]: if ancestors is None: ancestors = [] pairs = [] indent = len(ancestors) * self.indent ancestor_path = ".".join(x for x in ancestors) for name, task in sorted(coll.tasks.items()): is_default = name == coll.default # Start with just the name and just the aliases, no prefixes or # dots. displayname = name aliases = list(map(coll.transform, sorted(task.aliases))) # If displaying a sub-collection (or if we are displaying a given # namespace/root), tack on some dots to make it clear these names # require dotted paths to invoke. if ancestors or self.list_root: displayname = ".{}".format(displayname) aliases = [".{}".format(x) for x in aliases] # Nested? Indent, and add asterisks to default-tasks. if self.list_format == "nested": prefix = indent if is_default: displayname += "*" # Flat? Prefix names and aliases with ancestor names to get full # dotted path; and give default-tasks their collection name as the # first alias. if self.list_format == "flat": prefix = ancestor_path # Make sure leading dots are present for subcollections if # scoped display if prefix and self.list_root: prefix = "." + prefix aliases = [prefix + alias for alias in aliases] if is_default and ancestors: aliases.insert(0, prefix) # Generate full name and help columns and add to pairs. alias_str = " ({})".format(", ".join(aliases)) if aliases else "" full = prefix + displayname + alias_str pairs.append((full, helpline(task))) # Determine whether we're at max-depth or not truncate = self.list_depth and (len(ancestors) + 1) >= self.list_depth for name, subcoll in sorted(coll.collections.items()): displayname = name if ancestors or self.list_root: displayname = ".{}".format(displayname) if truncate: tallies = [ "{} {}".format(len(getattr(subcoll, attr)), attr) for attr in ("tasks", "collections") if getattr(subcoll, attr) ] displayname += " [{}]".format(", ".join(tallies)) if self.list_format == "nested": pairs.append((indent + displayname, helpline(subcoll))) elif self.list_format == "flat" and truncate: # NOTE: only adding coll-oriented pair if limiting by depth pairs.append((ancestor_path + displayname, helpline(subcoll))) # Recurse, if not already at max depth if not truncate: recursed_pairs = self._make_pairs( coll=subcoll, ancestors=ancestors + [name] ) pairs.extend(recursed_pairs) return pairs def list_json(self) -> None: # Sanity: we can't cleanly honor the --list-depth argument without # changing the data schema or otherwise acting strangely; and it also # doesn't make a ton of sense to limit depth when the output is for a # script to handle. So we just refuse, for now. TODO: find better way if self.list_depth: raise Exit( "The --list-depth option is not supported with JSON format!" ) # noqa # TODO: consider using something more formal re: the format this emits, # eg json-schema or whatever. Would simplify the # relatively-concise-but-only-human docs that currently describe this. coll = self.scoped_collection data = coll.serialized() print(json.dumps(data)) def task_list_opener(self, extra: str = "") -> str: root = self.list_root depth = self.list_depth specifier = " '{}'".format(root) if root else "" tail = "" if depth or extra: depthstr = "depth={}".format(depth) if depth else "" joiner = "; " if (depth and extra) else "" tail = " ({}{}{})".format(depthstr, joiner, extra) text = "Available{} tasks{}".format(specifier, tail) # TODO: do use cases w/ bundled namespace want to display things like # root and depth too? Leaving off for now... if self.namespace is not None: text = "Subcommands" return text def display_with_columns( self, pairs: Sequence[Tuple[str, Optional[str]]], extra: str = "" ) -> None: root = self.list_root print("{}:\n".format(self.task_list_opener(extra=extra))) self.print_columns(pairs) # TODO: worth stripping this out for nested? since it's signified with # asterisk there? ugggh default = self.scoped_collection.default if default: specific = "" if root: specific = " '{}'".format(root) default = ".{}".format(default) # TODO: trim/prefix dots print("Default{} task: {}\n".format(specific, default)) def print_columns( self, tuples: Sequence[Tuple[str, Optional[str]]] ) -> None: """ Print tabbed columns from (name, help) ``tuples``. Useful for listing tasks + docstrings, flags + help strings, etc. .. versionadded:: 1.0 """ # Calculate column sizes: don't wrap flag specs, give what's left over # to the descriptions. name_width = max(len(x[0]) for x in tuples) desc_width = ( pty_size()[0] - name_width - self.leading_indent_width - self.col_padding - 1 ) wrapper = textwrap.TextWrapper(width=desc_width) for name, help_str in tuples: if help_str is None: help_str = "" # Wrap descriptions/help text help_chunks = wrapper.wrap(help_str) # Print flag spec + padding name_padding = name_width - len(name) spec = "".join( ( self.leading_indent, name, name_padding * " ", self.col_padding * " ", ) ) # Print help text as needed if help_chunks: print(spec + help_chunks[0]) for chunk in help_chunks[1:]: print((" " * len(spec)) + chunk) else: print(spec.rstrip()) print("") invoke-2.2.0/invoke/py.typed000066400000000000000000000000001445356551000157760ustar00rootroot00000000000000invoke-2.2.0/invoke/runners.py000066400000000000000000001777451445356551000164040ustar00rootroot00000000000000import errno import locale import os import struct import sys import threading import time import signal from subprocess import Popen, PIPE from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Dict, Generator, IO, List, Optional, Tuple, Type, ) # Import some platform-specific things at top level so they can be mocked for # tests. try: import pty except ImportError: pty = None # type: ignore[assignment] try: import fcntl except ImportError: fcntl = None # type: ignore[assignment] try: import termios except ImportError: termios = None # type: ignore[assignment] from .exceptions import ( UnexpectedExit, Failure, ThreadException, WatcherError, SubprocessPipeError, CommandTimedOut, ) from .terminals import ( WINDOWS, pty_size, character_buffered, ready_for_reading, bytes_to_read, ) from .util import has_fileno, isatty, ExceptionHandlingThread if TYPE_CHECKING: from .context import Context from .watchers import StreamWatcher class Runner: """ Partially-abstract core command-running API. This class is not usable by itself and must be subclassed, implementing a number of methods such as `start`, `wait` and `returncode`. For a subclass implementation example, see the source code for `.Local`. .. versionadded:: 1.0 """ opts: Dict[str, Any] using_pty: bool read_chunk_size = 1000 input_sleep = 0.01 def __init__(self, context: "Context") -> None: """ Create a new runner with a handle on some `.Context`. :param context: a `.Context` instance, used to transmit default options and provide access to other contextualized information (e.g. a remote-oriented `.Runner` might want a `.Context` subclass holding info about hostnames and ports.) .. note:: The `.Context` given to `.Runner` instances **must** contain default config values for the `.Runner` class in question. At a minimum, this means values for each of the default `.Runner.run` keyword arguments such as ``echo`` and ``warn``. :raises exceptions.ValueError: if not all expected default values are found in ``context``. """ #: The `.Context` given to the same-named argument of `__init__`. self.context = context #: A `threading.Event` signaling program completion. #: #: Typically set after `wait` returns. Some IO mechanisms rely on this #: to know when to exit an infinite read loop. self.program_finished = threading.Event() # I wish Sphinx would organize all class/instance attrs in the same # place. If I don't do this here, it goes 'class vars -> __init__ # docstring -> instance vars' :( TODO: consider just merging class and # __init__ docstrings, though that's annoying too. #: How many bytes (at maximum) to read per iteration of stream reads. self.read_chunk_size = self.__class__.read_chunk_size # Ditto re: declaring this in 2 places for doc reasons. #: How many seconds to sleep on each iteration of the stdin read loop #: and other otherwise-fast loops. self.input_sleep = self.__class__.input_sleep #: Whether pty fallback warning has been emitted. self.warned_about_pty_fallback = False #: A list of `.StreamWatcher` instances for use by `respond`. Is filled #: in at runtime by `run`. self.watchers: List["StreamWatcher"] = [] # Optional timeout timer placeholder self._timer: Optional[threading.Timer] = None # Async flags (initialized for 'finally' referencing in case something # goes REAL bad during options parsing) self._asynchronous = False self._disowned = False def run(self, command: str, **kwargs: Any) -> Optional["Result"]: """ Execute ``command``, returning an instance of `Result` once complete. By default, this method is synchronous (it only returns once the subprocess has completed), and allows interactive keyboard communication with the subprocess. It can instead behave asynchronously (returning early & requiring interaction with the resulting object to manage subprocess lifecycle) if you specify ``asynchronous=True``. Furthermore, you can completely disassociate the subprocess from Invoke's control (allowing it to persist on its own after Python exits) by saying ``disown=True``. See the per-kwarg docs below for details on both of these. .. note:: All kwargs will default to the values found in this instance's `~.Runner.context` attribute, specifically in its configuration's ``run`` subtree (e.g. ``run.echo`` provides the default value for the ``echo`` keyword, etc). The base default values are described in the parameter list below. :param str command: The shell command to execute. :param bool asynchronous: When set to ``True`` (default ``False``), enables asynchronous behavior, as follows: - Connections to the controlling terminal are disabled, meaning you will not see the subprocess output and it will not respond to your keyboard input - similar to ``hide=True`` and ``in_stream=False`` (though explicitly given ``(out|err|in)_stream`` file-like objects will still be honored as normal). - `.run` returns immediately after starting the subprocess, and its return value becomes an instance of `Promise` instead of `Result`. - `Promise` objects are primarily useful for their `~Promise.join` method, which blocks until the subprocess exits (similar to threading APIs) and either returns a final `~Result` or raises an exception, just as a synchronous ``run`` would. - As with threading and similar APIs, users of ``asynchronous=True`` should make sure to ``join`` their `Promise` objects to prevent issues with interpreter shutdown. - One easy way to handle such cleanup is to use the `Promise` as a context manager - it will automatically ``join`` at the exit of the context block. .. versionadded:: 1.4 :param bool disown: When set to ``True`` (default ``False``), returns immediately like ``asynchronous=True``, but does not perform any background work related to that subprocess (it is completely ignored). This allows subprocesses using shell backgrounding or similar techniques (e.g. trailing ``&``, ``nohup``) to persist beyond the lifetime of the Python process running Invoke. .. note:: If you're unsure whether you want this or ``asynchronous``, you probably want ``asynchronous``! Specifically, ``disown=True`` has the following behaviors: - The return value is ``None`` instead of a `Result` or subclass. - No I/O worker threads are spun up, so you will have no access to the subprocess' stdout/stderr, your stdin will not be forwarded, ``(out|err|in)_stream`` will be ignored, and features like ``watchers`` will not function. - No exit code is checked for, so you will not receive any errors if the subprocess fails to exit cleanly. - ``pty=True`` may not function correctly (subprocesses may not run at all; this seems to be a potential bug in Python's ``pty.fork``) unless your command line includes tools such as ``nohup`` or (the shell builtin) ``disown``. .. versionadded:: 1.4 :param bool dry: Whether to dry-run instead of truly invoking the given command. See :option:`--dry` (which flips this on globally) for details on this behavior. .. versionadded:: 1.3 :param bool echo: Controls whether `.run` prints the command string to local stdout prior to executing it. Default: ``False``. .. note:: ``hide=True`` will override ``echo=True`` if both are given. :param echo_format: A string, which when passed to Python's inbuilt ``.format`` method, will change the format of the output when ``run.echo`` is set to true. Currently, only ``{command}`` is supported as a parameter. Defaults to printing the full command string in ANSI-escaped bold. :param bool echo_stdin: Whether to write data from ``in_stream`` back to ``out_stream``. In other words, in normal interactive usage, this parameter controls whether Invoke mirrors what you type back to your terminal. By default (when ``None``), this behavior is triggered by the following: * Not using a pty to run the subcommand (i.e. ``pty=False``), as ptys natively echo stdin to stdout on their own; * And when the controlling terminal of Invoke itself (as per ``in_stream``) appears to be a valid terminal device or TTY. (Specifically, when `~invoke.util.isatty` yields a ``True`` result when given ``in_stream``.) .. note:: This property tends to be ``False`` when piping another program's output into an Invoke session, or when running Invoke within another program (e.g. running Invoke from itself). If both of those properties are true, echoing will occur; if either is false, no echoing will be performed. When not ``None``, this parameter will override that auto-detection and force, or disable, echoing. :param str encoding: Override auto-detection of which encoding the subprocess is using for its stdout/stderr streams (which defaults to the return value of `default_encoding`). :param err_stream: Same as ``out_stream``, except for standard error, and defaulting to ``sys.stderr``. :param dict env: By default, subprocesses receive a copy of Invoke's own environment (i.e. ``os.environ``). Supply a dict here to update that child environment. For example, ``run('command', env={'PYTHONPATH': '/some/virtual/env/maybe'})`` would modify the ``PYTHONPATH`` env var, with the rest of the child's env looking identical to the parent. .. seealso:: ``replace_env`` for changing 'update' to 'replace'. :param bool fallback: Controls auto-fallback behavior re: problems offering a pty when ``pty=True``. Whether this has any effect depends on the specific `Runner` subclass being invoked. Default: ``True``. :param hide: Allows the caller to disable ``run``'s default behavior of copying the subprocess' stdout and stderr to the controlling terminal. Specify ``hide='out'`` (or ``'stdout'``) to hide only the stdout stream, ``hide='err'`` (or ``'stderr'``) to hide only stderr, or ``hide='both'`` (or ``True``) to hide both streams. The default value is ``None``, meaning to print everything; ``False`` will also disable hiding. .. note:: Stdout and stderr are always captured and stored in the ``Result`` object, regardless of ``hide``'s value. .. note:: ``hide=True`` will also override ``echo=True`` if both are given (either as kwargs or via config/CLI). :param in_stream: A file-like stream object to used as the subprocess' standard input. If ``None`` (the default), ``sys.stdin`` will be used. If ``False``, will disable stdin mirroring entirely (though other functionality which writes to the subprocess' stdin, such as autoresponding, will still function.) Disabling stdin mirroring can help when ``sys.stdin`` is a misbehaving non-stream object, such as under test harnesses or headless command runners. :param out_stream: A file-like stream object to which the subprocess' standard output should be written. If ``None`` (the default), ``sys.stdout`` will be used. :param bool pty: By default, ``run`` connects directly to the invoked process and reads its stdout/stderr streams. Some programs will buffer (or even behave) differently in this situation compared to using an actual terminal or pseudoterminal (pty). To use a pty instead of the default behavior, specify ``pty=True``. .. warning:: Due to their nature, ptys have a single output stream, so the ability to tell stdout apart from stderr is **not possible** when ``pty=True``. As such, all output will appear on ``out_stream`` (see below) and be captured into the ``stdout`` result attribute. ``err_stream`` and ``stderr`` will always be empty when ``pty=True``. :param bool replace_env: When ``True``, causes the subprocess to receive the dictionary given to ``env`` as its entire shell environment, instead of updating a copy of ``os.environ`` (which is the default behavior). Default: ``False``. :param str shell: Which shell binary to use. Default: ``/bin/bash`` (on Unix; ``COMSPEC`` or ``cmd.exe`` on Windows.) :param timeout: Cause the runner to submit an interrupt to the subprocess and raise `.CommandTimedOut`, if the command takes longer than ``timeout`` seconds to execute. Defaults to ``None``, meaning no timeout. .. versionadded:: 1.3 :param bool warn: Whether to warn and continue, instead of raising `.UnexpectedExit`, when the executed command exits with a nonzero status. Default: ``False``. .. note:: This setting has no effect on exceptions, which will still be raised, typically bundled in `.ThreadException` objects if they were raised by the IO worker threads. Similarly, `.WatcherError` exceptions raised by `.StreamWatcher` instances will also ignore this setting, and will usually be bundled inside `.Failure` objects (in order to preserve the execution context). Ditto `.CommandTimedOut` - basically, anything that prevents a command from actually getting to "exited with an exit code" ignores this flag. :param watchers: A list of `.StreamWatcher` instances which will be used to scan the program's ``stdout`` or ``stderr`` and may write into its ``stdin`` (typically ``bytes`` objects) in response to patterns or other heuristics. See :doc:`/concepts/watchers` for details on this functionality. Default: ``[]``. :returns: `Result`, or a subclass thereof. :raises: `.UnexpectedExit`, if the command exited nonzero and ``warn`` was ``False``. :raises: `.Failure`, if the command didn't even exit cleanly, e.g. if a `.StreamWatcher` raised `.WatcherError`. :raises: `.ThreadException` (if the background I/O threads encountered exceptions other than `.WatcherError`). .. versionadded:: 1.0 """ try: return self._run_body(command, **kwargs) finally: if not (self._asynchronous or self._disowned): self.stop() def echo(self, command: str) -> None: print(self.opts["echo_format"].format(command=command)) def _setup(self, command: str, kwargs: Any) -> None: """ Prepare data on ``self`` so we're ready to start running. """ # Normalize kwargs w/ config; sets self.opts, self.streams self._unify_kwargs_with_config(kwargs) # Environment setup self.env = self.generate_env( self.opts["env"], self.opts["replace_env"] ) # Arrive at final encoding if neither config nor kwargs had one self.encoding = self.opts["encoding"] or self.default_encoding() # Echo running command (wants to be early to be included in dry-run) if self.opts["echo"]: self.echo(command) # Prepare common result args. # TODO: I hate this. Needs a deeper separate think about tweaking # Runner.generate_result in a way that isn't literally just this same # two-step process, and which also works w/ downstream. self.result_kwargs = dict( command=command, shell=self.opts["shell"], env=self.env, pty=self.using_pty, hide=self.opts["hide"], encoding=self.encoding, ) def _run_body(self, command: str, **kwargs: Any) -> Optional["Result"]: # Prepare all the bits n bobs. self._setup(command, kwargs) # If dry-run, stop here. if self.opts["dry"]: return self.generate_result( **dict(self.result_kwargs, stdout="", stderr="", exited=0) ) # Start executing the actual command (runs in background) self.start(command, self.opts["shell"], self.env) # If disowned, we just stop here - no threads, no timer, no error # checking, nada. if self._disowned: return None # Stand up & kick off IO, timer threads self.start_timer(self.opts["timeout"]) self.threads, self.stdout, self.stderr = self.create_io_threads() for thread in self.threads.values(): thread.start() # Wrap up or promise that we will, depending return self.make_promise() if self._asynchronous else self._finish() def make_promise(self) -> "Promise": """ Return a `Promise` allowing async control of the rest of lifecycle. .. versionadded:: 1.4 """ return Promise(self) def _finish(self) -> "Result": # Wait for subprocess to run, forwarding signals as we get them. try: while True: try: self.wait() break # done waiting! # Don't locally stop on ^C, only forward it: # - if remote end really stops, we'll naturally stop after # - if remote end does not stop (eg REPL, editor) we don't want # to stop prematurely except KeyboardInterrupt as e: self.send_interrupt(e) # TODO: honor other signals sent to our own process and # transmit them to the subprocess before handling 'normally'. # Make sure we tie off our worker threads, even if something exploded. # Any exceptions that raised during self.wait() above will appear after # this block. finally: # Inform stdin-mirroring worker to stop its eternal looping self.program_finished.set() # Join threads, storing inner exceptions, & set a timeout if # necessary. (Segregate WatcherErrors as they are "anticipated # errors" that want to show up at the end during creation of # Failure objects.) watcher_errors = [] thread_exceptions = [] for target, thread in self.threads.items(): thread.join(self._thread_join_timeout(target)) exception = thread.exception() if exception is not None: real = exception.value if isinstance(real, WatcherError): watcher_errors.append(real) else: thread_exceptions.append(exception) # If any exceptions appeared inside the threads, raise them now as an # aggregate exception object. # NOTE: this is kept outside the 'finally' so that main-thread # exceptions are raised before worker-thread exceptions; they're more # likely to be Big Serious Problems. if thread_exceptions: raise ThreadException(thread_exceptions) # Collate stdout/err, calculate exited, and get final result obj result = self._collate_result(watcher_errors) # Any presence of WatcherError from the threads indicates a watcher was # upset and aborted execution; make a generic Failure out of it and # raise that. if watcher_errors: # TODO: ambiguity exists if we somehow get WatcherError in *both* # threads...as unlikely as that would normally be. raise Failure(result, reason=watcher_errors[0]) # If a timeout was requested and the subprocess did time out, shout. timeout = self.opts["timeout"] if timeout is not None and self.timed_out: raise CommandTimedOut(result, timeout=timeout) if not (result or self.opts["warn"]): raise UnexpectedExit(result) return result def _unify_kwargs_with_config(self, kwargs: Any) -> None: """ Unify `run` kwargs with config options to arrive at local options. Sets: - ``self.opts`` - opts dict - ``self.streams`` - map of stream names to stream target values """ opts = {} for key, value in self.context.config.run.items(): runtime = kwargs.pop(key, None) opts[key] = value if runtime is None else runtime # Pull in command execution timeout, which stores config elsewhere, # but only use it if it's actually set (backwards compat) config_timeout = self.context.config.timeouts.command opts["timeout"] = kwargs.pop("timeout", config_timeout) # Handle invalid kwarg keys (anything left in kwargs). # Act like a normal function would, i.e. TypeError if kwargs: err = "run() got an unexpected keyword argument '{}'" raise TypeError(err.format(list(kwargs.keys())[0])) # Update disowned, async flags self._asynchronous = opts["asynchronous"] self._disowned = opts["disown"] if self._asynchronous and self._disowned: err = "Cannot give both 'asynchronous' and 'disown' at the same time!" # noqa raise ValueError(err) # If hide was True, turn off echoing if opts["hide"] is True: opts["echo"] = False # Conversely, ensure echoing is always on when dry-running if opts["dry"] is True: opts["echo"] = True # Always hide if async if self._asynchronous: opts["hide"] = True # Then normalize 'hide' from one of the various valid input values, # into a stream-names tuple. Also account for the streams. out_stream, err_stream = opts["out_stream"], opts["err_stream"] opts["hide"] = normalize_hide(opts["hide"], out_stream, err_stream) # Derive stream objects if out_stream is None: out_stream = sys.stdout if err_stream is None: err_stream = sys.stderr in_stream = opts["in_stream"] if in_stream is None: # If in_stream hasn't been overridden, and we're async, we don't # want to read from sys.stdin (otherwise the default) - so set # False instead. in_stream = False if self._asynchronous else sys.stdin # Determine pty or no self.using_pty = self.should_use_pty(opts["pty"], opts["fallback"]) if opts["watchers"]: self.watchers = opts["watchers"] # Set data self.opts = opts self.streams = {"out": out_stream, "err": err_stream, "in": in_stream} def _collate_result(self, watcher_errors: List[WatcherError]) -> "Result": # At this point, we had enough success that we want to be returning or # raising detailed info about our execution; so we generate a Result. stdout = "".join(self.stdout) stderr = "".join(self.stderr) if WINDOWS: # "Universal newlines" - replace all standard forms of # newline with \n. This is not technically Windows related # (\r as newline is an old Mac convention) but we only apply # the translation for Windows as that's the only platform # it is likely to matter for these days. stdout = stdout.replace("\r\n", "\n").replace("\r", "\n") stderr = stderr.replace("\r\n", "\n").replace("\r", "\n") # Get return/exit code, unless there were WatcherErrors to handle. # NOTE: In that case, returncode() may block waiting on the process # (which may be waiting for user input). Since most WatcherError # situations lack a useful exit code anyways, skipping this doesn't # really hurt any. exited = None if watcher_errors else self.returncode() # TODO: as noted elsewhere, I kinda hate this. Consider changing # generate_result()'s API in next major rev so we can tidy up. result = self.generate_result( **dict( self.result_kwargs, stdout=stdout, stderr=stderr, exited=exited ) ) return result def _thread_join_timeout(self, target: Callable) -> Optional[int]: # Add a timeout to out/err thread joins when it looks like they're not # dead but their counterpart is dead; this indicates issue #351 (fixed # by #432) where the subproc may hang because its stdout (or stderr) is # no longer being consumed by the dead thread (and a pipe is filling # up.) In that case, the non-dead thread is likely to block forever on # a `recv` unless we add this timeout. if target == self.handle_stdin: return None opposite = self.handle_stderr if target == self.handle_stderr: opposite = self.handle_stdout if opposite in self.threads and self.threads[opposite].is_dead: return 1 return None def create_io_threads( self, ) -> Tuple[Dict[Callable, ExceptionHandlingThread], List[str], List[str]]: """ Create and return a dictionary of IO thread worker objects. Caller is expected to handle persisting and/or starting the wrapped threads. """ stdout: List[str] = [] stderr: List[str] = [] # Set up IO thread parameters (format - body_func: {kwargs}) thread_args: Dict[Callable, Any] = { self.handle_stdout: { "buffer_": stdout, "hide": "stdout" in self.opts["hide"], "output": self.streams["out"], } } # After opt processing above, in_stream will be a real stream obj or # False, so we can truth-test it. We don't even create a stdin-handling # thread if it's False, meaning user indicated stdin is nonexistent or # problematic. if self.streams["in"]: thread_args[self.handle_stdin] = { "input_": self.streams["in"], "output": self.streams["out"], "echo": self.opts["echo_stdin"], } if not self.using_pty: thread_args[self.handle_stderr] = { "buffer_": stderr, "hide": "stderr" in self.opts["hide"], "output": self.streams["err"], } # Kick off IO threads threads = {} for target, kwargs in thread_args.items(): t = ExceptionHandlingThread(target=target, kwargs=kwargs) threads[target] = t return threads, stdout, stderr def generate_result(self, **kwargs: Any) -> "Result": """ Create & return a suitable `Result` instance from the given ``kwargs``. Subclasses may wish to override this in order to manipulate things or generate a `Result` subclass (e.g. ones containing additional metadata besides the default). .. versionadded:: 1.0 """ return Result(**kwargs) def read_proc_output(self, reader: Callable) -> Generator[str, None, None]: """ Iteratively read & decode bytes from a subprocess' out/err stream. :param reader: A literal reader function/partial, wrapping the actual stream object in question, which takes a number of bytes to read, and returns that many bytes (or ``None``). ``reader`` should be a reference to either `read_proc_stdout` or `read_proc_stderr`, which perform the actual, platform/library specific read calls. :returns: A generator yielding strings. Specifically, each resulting string is the result of decoding `read_chunk_size` bytes read from the subprocess' out/err stream. .. versionadded:: 1.0 """ # NOTE: Typically, reading from any stdout/err (local, remote or # otherwise) can be thought of as "read until you get nothing back". # This is preferable over "wait until an out-of-band signal claims the # process is done running" because sometimes that signal will appear # before we've actually read all the data in the stream (i.e.: a race # condition). while True: data = reader(self.read_chunk_size) if not data: break yield self.decode(data) def write_our_output(self, stream: IO, string: str) -> None: """ Write ``string`` to ``stream``. Also calls ``.flush()`` on ``stream`` to ensure that real terminal streams don't buffer. :param stream: A file-like stream object, mapping to the ``out_stream`` or ``err_stream`` parameters of `run`. :param string: A Unicode string object. :returns: ``None``. .. versionadded:: 1.0 """ stream.write(string) stream.flush() def _handle_output( self, buffer_: List[str], hide: bool, output: IO, reader: Callable, ) -> None: # TODO: store un-decoded/raw bytes somewhere as well... for data in self.read_proc_output(reader): # Echo to local stdout if necessary # TODO: should we rephrase this as "if you want to hide, give me a # dummy output stream, e.g. something like /dev/null"? Otherwise, a # combo of 'hide=stdout' + 'here is an explicit out_stream' means # out_stream is never written to, and that seems...odd. if not hide: self.write_our_output(stream=output, string=data) # Store in shared buffer so main thread can do things with the # result after execution completes. # NOTE: this is threadsafe insofar as no reading occurs until after # the thread is join()'d. buffer_.append(data) # Run our specific buffer through the autoresponder framework self.respond(buffer_) def handle_stdout( self, buffer_: List[str], hide: bool, output: IO ) -> None: """ Read process' stdout, storing into a buffer & printing/parsing. Intended for use as a thread target. Only terminates when all stdout from the subprocess has been read. :param buffer_: The capture buffer shared with the main thread. :param bool hide: Whether or not to replay data into ``output``. :param output: Output stream (file-like object) to write data into when not hiding. :returns: ``None``. .. versionadded:: 1.0 """ self._handle_output( buffer_, hide, output, reader=self.read_proc_stdout ) def handle_stderr( self, buffer_: List[str], hide: bool, output: IO ) -> None: """ Read process' stderr, storing into a buffer & printing/parsing. Identical to `handle_stdout` except for the stream read from; see its docstring for API details. .. versionadded:: 1.0 """ self._handle_output( buffer_, hide, output, reader=self.read_proc_stderr ) def read_our_stdin(self, input_: IO) -> Optional[str]: """ Read & decode bytes from a local stdin stream. :param input_: Actual stream object to read from. Maps to ``in_stream`` in `run`, so will often be ``sys.stdin``, but might be any stream-like object. :returns: A Unicode string, the result of decoding the read bytes (this might be the empty string if the pipe has closed/reached EOF); or ``None`` if stdin wasn't ready for reading yet. .. versionadded:: 1.0 """ # TODO: consider moving the character_buffered contextmanager call in # here? Downside is it would be flipping those switches for every byte # read instead of once per session, which could be costly (?). bytes_ = None if ready_for_reading(input_): try: bytes_ = input_.read(bytes_to_read(input_)) except OSError as e: # Assume EBADF in this situation implies running under nohup or # similar, where: # - we cannot reliably detect a bad FD up front # - trying to read it would explode # - user almost surely doesn't care about stdin anyways # and ignore it (but not other OSErrors!) if e.errno != errno.EBADF: raise # Decode if it appears to be binary-type. (From real terminal # streams, usually yes; from file-like objects, often no.) if bytes_ and isinstance(bytes_, bytes): # TODO: will decoding 1 byte at a time break multibyte # character encodings? How to square interactivity with that? bytes_ = self.decode(bytes_) return bytes_ def handle_stdin( self, input_: IO, output: IO, echo: bool = False, ) -> None: """ Read local stdin, copying into process' stdin as necessary. Intended for use as a thread target. .. note:: Because real terminal stdin streams have no well-defined "end", if such a stream is detected (based on existence of a callable ``.fileno()``) this method will wait until `program_finished` is set, before terminating. When the stream doesn't appear to be from a terminal, the same semantics as `handle_stdout` are used - the stream is simply ``read()`` from until it returns an empty value. :param input_: Stream (file-like object) from which to read. :param output: Stream (file-like object) to which echoing may occur. :param bool echo: User override option for stdin-stdout echoing. :returns: ``None``. .. versionadded:: 1.0 """ # TODO: reinstate lock/whatever thread logic from fab v1 which prevents # reading from stdin while other parts of the code are prompting for # runtime passwords? (search for 'input_enabled') # TODO: fabric#1339 is strongly related to this, if it's not literally # exposing some regression in Fabric 1.x itself. closed_stdin = False with character_buffered(input_): while True: data = self.read_our_stdin(input_) if data: # Mirror what we just read to process' stdin. # We encode to ensure bytes, but skip the decode step since # there's presumably no need (nobody's interacting with # this data programmatically). self.write_proc_stdin(data) # Also echo it back to local stdout (or whatever # out_stream is set to) when necessary. if echo is None: echo = self.should_echo_stdin(input_, output) if echo: self.write_our_output(stream=output, string=data) # Empty string/char/byte != None. Can't just use 'else' here. elif data is not None: # When reading from file-like objects that aren't "real" # terminal streams, an empty byte signals EOF. if not self.using_pty and not closed_stdin: self.close_proc_stdin() closed_stdin = True # Dual all-done signals: program being executed is done # running, *and* we don't seem to be reading anything out of # stdin. (NOTE: If we only test the former, we may encounter # race conditions re: unread stdin.) if self.program_finished.is_set() and not data: break # Take a nap so we're not chewing CPU. time.sleep(self.input_sleep) def should_echo_stdin(self, input_: IO, output: IO) -> bool: """ Determine whether data read from ``input_`` should echo to ``output``. Used by `handle_stdin`; tests attributes of ``input_`` and ``output``. :param input_: Input stream (file-like object). :param output: Output stream (file-like object). :returns: A ``bool``. .. versionadded:: 1.0 """ return (not self.using_pty) and isatty(input_) def respond(self, buffer_: List[str]) -> None: """ Write to the program's stdin in response to patterns in ``buffer_``. The patterns and responses are driven by the `.StreamWatcher` instances from the ``watchers`` kwarg of `run` - see :doc:`/concepts/watchers` for a conceptual overview. :param buffer: The capture buffer for this thread's particular IO stream. :returns: ``None``. .. versionadded:: 1.0 """ # Join buffer contents into a single string; without this, # StreamWatcher subclasses can't do things like iteratively scan for # pattern matches. # NOTE: using string.join should be "efficient enough" for now, re: # speed and memory use. Should that become false, consider using # StringIO or cStringIO (tho the latter doesn't do Unicode well?) which # is apparently even more efficient. stream = "".join(buffer_) for watcher in self.watchers: for response in watcher.submit(stream): self.write_proc_stdin(response) def generate_env( self, env: Dict[str, Any], replace_env: bool ) -> Dict[str, Any]: """ Return a suitable environment dict based on user input & behavior. :param dict env: Dict supplying overrides or full env, depending. :param bool replace_env: Whether ``env`` updates, or is used in place of, the value of `os.environ`. :returns: A dictionary of shell environment vars. .. versionadded:: 1.0 """ return env if replace_env else dict(os.environ, **env) def should_use_pty(self, pty: bool, fallback: bool) -> bool: """ Should execution attempt to use a pseudo-terminal? :param bool pty: Whether the user explicitly asked for a pty. :param bool fallback: Whether falling back to non-pty execution should be allowed, in situations where ``pty=True`` but a pty could not be allocated. .. versionadded:: 1.0 """ # NOTE: fallback not used: no falling back implemented by default. return pty @property def has_dead_threads(self) -> bool: """ Detect whether any IO threads appear to have terminated unexpectedly. Used during process-completion waiting (in `wait`) to ensure we don't deadlock our child process if our IO processing threads have errored/died. :returns: ``True`` if any threads appear to have terminated with an exception, ``False`` otherwise. .. versionadded:: 1.0 """ return any(x.is_dead for x in self.threads.values()) def wait(self) -> None: """ Block until the running command appears to have exited. :returns: ``None``. .. versionadded:: 1.0 """ while True: proc_finished = self.process_is_finished dead_threads = self.has_dead_threads if proc_finished or dead_threads: break time.sleep(self.input_sleep) def write_proc_stdin(self, data: str) -> None: """ Write encoded ``data`` to the running process' stdin. :param data: A Unicode string. :returns: ``None``. .. versionadded:: 1.0 """ # Encode always, then request implementing subclass to perform the # actual write to subprocess' stdin. self._write_proc_stdin(data.encode(self.encoding)) def decode(self, data: bytes) -> str: """ Decode some ``data`` bytes, returning Unicode. .. versionadded:: 1.0 """ # NOTE: yes, this is a 1-liner. The point is to make it much harder to # forget to use 'replace' when decoding :) return data.decode(self.encoding, "replace") @property def process_is_finished(self) -> bool: """ Determine whether our subprocess has terminated. .. note:: The implementation of this method should be nonblocking, as it is used within a query/poll loop. :returns: ``True`` if the subprocess has finished running, ``False`` otherwise. .. versionadded:: 1.0 """ raise NotImplementedError def start(self, command: str, shell: str, env: Dict[str, Any]) -> None: """ Initiate execution of ``command`` (via ``shell``, with ``env``). Typically this means use of a forked subprocess or requesting start of execution on a remote system. In most cases, this method will also set subclass-specific member variables used in other methods such as `wait` and/or `returncode`. :param str command: Command string to execute. :param str shell: Shell to use when executing ``command``. :param dict env: Environment dict used to prep shell environment. .. versionadded:: 1.0 """ raise NotImplementedError def start_timer(self, timeout: int) -> None: """ Start a timer to `kill` our subprocess after ``timeout`` seconds. """ if timeout is not None: self._timer = threading.Timer(timeout, self.kill) self._timer.start() def read_proc_stdout(self, num_bytes: int) -> Optional[bytes]: """ Read ``num_bytes`` from the running process' stdout stream. :param int num_bytes: Number of bytes to read at maximum. :returns: A string/bytes object. .. versionadded:: 1.0 """ raise NotImplementedError def read_proc_stderr(self, num_bytes: int) -> Optional[bytes]: """ Read ``num_bytes`` from the running process' stderr stream. :param int num_bytes: Number of bytes to read at maximum. :returns: A string/bytes object. .. versionadded:: 1.0 """ raise NotImplementedError def _write_proc_stdin(self, data: bytes) -> None: """ Write ``data`` to running process' stdin. This should never be called directly; it's for subclasses to implement. See `write_proc_stdin` for the public API call. :param data: Already-encoded byte data suitable for writing. :returns: ``None``. .. versionadded:: 1.0 """ raise NotImplementedError def close_proc_stdin(self) -> None: """ Close running process' stdin. :returns: ``None``. .. versionadded:: 1.3 """ raise NotImplementedError def default_encoding(self) -> str: """ Return a string naming the expected encoding of subprocess streams. This return value should be suitable for use by encode/decode methods. .. versionadded:: 1.0 """ # TODO: probably wants to be 2 methods, one for local and one for # subprocess. For now, good enough to assume both are the same. return default_encoding() def send_interrupt(self, interrupt: "KeyboardInterrupt") -> None: """ Submit an interrupt signal to the running subprocess. In almost all implementations, the default behavior is what will be desired: submit ``\x03`` to the subprocess' stdin pipe. However, we leave this as a public method in case this default needs to be augmented or replaced. :param interrupt: The locally-sourced ``KeyboardInterrupt`` causing the method call. :returns: ``None``. .. versionadded:: 1.0 """ self.write_proc_stdin("\x03") def returncode(self) -> Optional[int]: """ Return the numeric return/exit code resulting from command execution. :returns: `int`, if any reasonable return code could be determined, or ``None`` in corner cases where that was not possible. .. versionadded:: 1.0 """ raise NotImplementedError def stop(self) -> None: """ Perform final cleanup, if necessary. This method is called within a ``finally`` clause inside the main `run` method. Depending on the subclass, it may be a no-op, or it may do things such as close network connections or open files. :returns: ``None`` .. versionadded:: 1.0 """ if self._timer: self._timer.cancel() def kill(self) -> None: """ Forcibly terminate the subprocess. Typically only used by the timeout functionality. This is often a "best-effort" attempt, e.g. remote subprocesses often must settle for simply shutting down the local side of the network connection and hoping the remote end eventually gets the message. """ raise NotImplementedError @property def timed_out(self) -> bool: """ Returns ``True`` if the subprocess stopped because it timed out. .. versionadded:: 1.3 """ # Timer expiry implies we did time out. (The timer itself will have # killed the subprocess, allowing us to even get to this point.) return bool(self._timer and not self._timer.is_alive()) class Local(Runner): """ Execute a command on the local system in a subprocess. .. note:: When Invoke itself is executed without a controlling terminal (e.g. when ``sys.stdin`` lacks a useful ``fileno``), it's not possible to present a handle on our PTY to local subprocesses. In such situations, `Local` will fallback to behaving as if ``pty=False`` (on the theory that degraded execution is better than none at all) as well as printing a warning to stderr. To disable this behavior, say ``fallback=False``. .. versionadded:: 1.0 """ def __init__(self, context: "Context") -> None: super().__init__(context) # Bookkeeping var for pty use case self.status = 0 def should_use_pty(self, pty: bool = False, fallback: bool = True) -> bool: use_pty = False if pty: use_pty = True # TODO: pass in & test in_stream, not sys.stdin if not has_fileno(sys.stdin) and fallback: if not self.warned_about_pty_fallback: err = "WARNING: stdin has no fileno; falling back to non-pty execution!\n" # noqa sys.stderr.write(err) self.warned_about_pty_fallback = True use_pty = False return use_pty def read_proc_stdout(self, num_bytes: int) -> Optional[bytes]: # Obtain useful read-some-bytes function if self.using_pty: # Need to handle spurious OSErrors on some Linux platforms. try: data = os.read(self.parent_fd, num_bytes) except OSError as e: # Only eat I/O specific OSErrors so we don't hide others stringified = str(e) io_errors = ( # The typical default "Input/output error", # Some less common platforms phrase it this way "I/O error", ) if not any(error in stringified for error in io_errors): raise # The bad OSErrors happen after all expected output has # appeared, so we return a falsey value, which triggers the # "end of output" logic in code using reader functions. data = None elif self.process and self.process.stdout: data = os.read(self.process.stdout.fileno(), num_bytes) else: data = None return data def read_proc_stderr(self, num_bytes: int) -> Optional[bytes]: # NOTE: when using a pty, this will never be called. # TODO: do we ever get those OSErrors on stderr? Feels like we could? if self.process and self.process.stderr: return os.read(self.process.stderr.fileno(), num_bytes) return None def _write_proc_stdin(self, data: bytes) -> None: # NOTE: parent_fd from os.fork() is a read/write pipe attached to our # forked process' stdout/stdin, respectively. if self.using_pty: fd = self.parent_fd elif self.process and self.process.stdin: fd = self.process.stdin.fileno() else: raise SubprocessPipeError( "Unable to write to missing subprocess or stdin!" ) # Try to write, ignoring broken pipes if encountered (implies child # process exited before the process piping stdin to us finished; # there's nothing we can do about that!) try: os.write(fd, data) except OSError as e: if "Broken pipe" not in str(e): raise def close_proc_stdin(self) -> None: if self.using_pty: # there is no working scenario to tell the process that stdin # closed when using pty raise SubprocessPipeError("Cannot close stdin when pty=True") elif self.process and self.process.stdin: self.process.stdin.close() else: raise SubprocessPipeError( "Unable to close missing subprocess or stdin!" ) def start(self, command: str, shell: str, env: Dict[str, Any]) -> None: if self.using_pty: if pty is None: # Encountered ImportError err = "You indicated pty=True, but your platform doesn't support the 'pty' module!" # noqa sys.exit(err) cols, rows = pty_size() self.pid, self.parent_fd = pty.fork() # If we're the child process, load up the actual command in a # shell, just as subprocess does; this replaces our process - whose # pipes are all hooked up to the PTY - with the "real" one. if self.pid == 0: # TODO: both pty.spawn() and pexpect.spawn() do a lot of # setup/teardown involving tty.setraw, getrlimit, signal. # Ostensibly we'll want some of that eventually, but if # possible write tests - integration-level if necessary - # before adding it! # # Set pty window size based on what our own controlling # terminal's window size appears to be. # TODO: make subroutine? winsize = struct.pack("HHHH", rows, cols, 0, 0) fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize) # Use execve for bare-minimum "exec w/ variable # args + env" # behavior. No need for the 'p' (use PATH to find executable) # for now. # NOTE: stdlib subprocess (actually its posix flavor, which is # written in C) uses either execve or execv, depending. os.execve(shell, [shell, "-c", command], env) else: self.process = Popen( command, shell=True, executable=shell, env=env, stdout=PIPE, stderr=PIPE, stdin=PIPE, ) def kill(self) -> None: pid = self.pid if self.using_pty else self.process.pid try: os.kill(pid, signal.SIGKILL) except ProcessLookupError: # In odd situations where our subprocess is already dead, don't # throw this upwards. pass @property def process_is_finished(self) -> bool: if self.using_pty: # NOTE: # https://github.com/pexpect/ptyprocess/blob/4058faa05e2940662ab6da1330aa0586c6f9cd9c/ptyprocess/ptyprocess.py#L680-L687 # implies that Linux "requires" use of the blocking, non-WNOHANG # version of this call. Our testing doesn't verify this, however, # so... # NOTE: It does appear to be totally blocking on Windows, so our # issue #351 may be totally unsolvable there. Unclear. pid_val, self.status = os.waitpid(self.pid, os.WNOHANG) return pid_val != 0 else: return self.process.poll() is not None def returncode(self) -> Optional[int]: if self.using_pty: # No subprocess.returncode available; use WIFEXITED/WIFSIGNALED to # determine whch of WEXITSTATUS / WTERMSIG to use. # TODO: is it safe to just say "call all WEXITSTATUS/WTERMSIG and # return whichever one of them is nondefault"? Probably not? # NOTE: doing this in an arbitrary order should be safe since only # one of the WIF* methods ought to ever return True. code = None if os.WIFEXITED(self.status): code = os.WEXITSTATUS(self.status) elif os.WIFSIGNALED(self.status): code = os.WTERMSIG(self.status) # Match subprocess.returncode by turning signals into negative # 'exit code' integers. code = -1 * code return code # TODO: do we care about WIFSTOPPED? Maybe someday? else: return self.process.returncode def stop(self) -> None: super().stop() # If we opened a PTY for child communications, make sure to close() it, # otherwise long-running Invoke-using processes exhaust their file # descriptors eventually. if self.using_pty: try: os.close(self.parent_fd) except Exception: # If something weird happened preventing the close, there's # nothing to be done about it now... pass class Result: """ A container for information about the result of a command execution. All params are exposed as attributes of the same name and type. :param str stdout: The subprocess' standard output. :param str stderr: Same as ``stdout`` but containing standard error (unless the process was invoked via a pty, in which case it will be empty; see `.Runner.run`.) :param str encoding: The string encoding used by the local shell environment. :param str command: The command which was executed. :param str shell: The shell binary used for execution. :param dict env: The shell environment used for execution. (Default is the empty dict, ``{}``, not ``None`` as displayed in the signature.) :param int exited: An integer representing the subprocess' exit/return code. .. note:: This may be ``None`` in situations where the subprocess did not run to completion, such as when auto-responding failed or a timeout was reached. :param bool pty: A boolean describing whether the subprocess was invoked with a pty or not; see `.Runner.run`. :param tuple hide: A tuple of stream names (none, one or both of ``('stdout', 'stderr')``) which were hidden from the user when the generating command executed; this is a normalized value derived from the ``hide`` parameter of `.Runner.run`. For example, ``run('command', hide='stdout')`` will yield a `Result` where ``result.hide == ('stdout',)``; ``hide=True`` or ``hide='both'`` results in ``result.hide == ('stdout', 'stderr')``; and ``hide=False`` (the default) generates ``result.hide == ()`` (the empty tuple.) .. note:: `Result` objects' truth evaluation is equivalent to their `.ok` attribute's value. Therefore, quick-and-dirty expressions like the following are possible:: if run("some shell command"): do_something() else: handle_problem() However, remember `Zen of Python #2 `_. .. versionadded:: 1.0 """ # TODO: inherit from namedtuple instead? heh (or: use attrs from pypi) def __init__( self, stdout: str = "", stderr: str = "", encoding: Optional[str] = None, command: str = "", shell: str = "", env: Optional[Dict[str, Any]] = None, exited: int = 0, pty: bool = False, hide: Tuple[str, ...] = tuple(), ): self.stdout = stdout self.stderr = stderr if encoding is None: encoding = default_encoding() self.encoding = encoding self.command = command self.shell = shell self.env = {} if env is None else env self.exited = exited self.pty = pty self.hide = hide @property def return_code(self) -> int: """ An alias for ``.exited``. .. versionadded:: 1.0 """ return self.exited def __bool__(self) -> bool: return self.ok def __str__(self) -> str: if self.exited is not None: desc = "Command exited with status {}.".format(self.exited) else: desc = "Command was not fully executed due to watcher error." ret = [desc] for x in ("stdout", "stderr"): val = getattr(self, x) ret.append( """=== {} === {} """.format( x, val.rstrip() ) if val else "(no {})".format(x) ) return "\n".join(ret) def __repr__(self) -> str: # TODO: more? e.g. len of stdout/err? (how to represent cleanly in a # 'x=y' format like this? e.g. '4b' is ambiguous as to what it # represents template = "" return template.format(self.command, self.exited) @property def ok(self) -> bool: """ A boolean equivalent to ``exited == 0``. .. versionadded:: 1.0 """ return bool(self.exited == 0) @property def failed(self) -> bool: """ The inverse of ``ok``. I.e., ``True`` if the program exited with a nonzero return code, and ``False`` otherwise. .. versionadded:: 1.0 """ return not self.ok def tail(self, stream: str, count: int = 10) -> str: """ Return the last ``count`` lines of ``stream``, plus leading whitespace. :param str stream: Name of some captured stream attribute, eg ``"stdout"``. :param int count: Number of lines to preserve. .. versionadded:: 1.3 """ # TODO: preserve alternate line endings? Mehhhh # NOTE: no trailing \n preservation; easier for below display if # normalized return "\n\n" + "\n".join(getattr(self, stream).splitlines()[-count:]) class Promise(Result): """ A promise of some future `Result`, yielded from asynchronous execution. This class' primary API member is `join`; instances may also be used as context managers, which will automatically call `join` when the block exits. In such cases, the context manager yields ``self``. `Promise` also exposes copies of many `Result` attributes, specifically those that derive from `~Runner.run` kwargs and not the result of command execution. For example, ``command`` is replicated here, but ``stdout`` is not. .. versionadded:: 1.4 """ def __init__(self, runner: "Runner") -> None: """ Create a new promise. :param runner: An in-flight `Runner` instance making this promise. Must already have started the subprocess and spun up IO threads. """ self.runner = runner # Basically just want exactly this (recently refactored) kwargs dict. # TODO: consider proxying vs copying, but prob wait for refactor for key, value in self.runner.result_kwargs.items(): setattr(self, key, value) def join(self) -> Result: """ Block until associated subprocess exits, returning/raising the result. This acts identically to the end of a synchronously executed ``run``, namely that: - various background threads (such as IO workers) are themselves joined; - if the subprocess exited normally, a `Result` is returned; - in any other case (unforeseen exceptions, IO sub-thread `.ThreadException`, `.Failure`, `.WatcherError`) the relevant exception is raised here. See `~Runner.run` docs, or those of the relevant classes, for further details. """ try: return self.runner._finish() finally: self.runner.stop() def __enter__(self) -> "Promise": return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: BaseException, exc_tb: Optional[TracebackType], ) -> None: self.join() def normalize_hide( val: Any, out_stream: Optional[str] = None, err_stream: Optional[str] = None, ) -> Tuple[str, ...]: # Normalize to list-of-stream-names hide_vals = (None, False, "out", "stdout", "err", "stderr", "both", True) if val not in hide_vals: err = "'hide' got {!r} which is not in {!r}" raise ValueError(err.format(val, hide_vals)) if val in (None, False): hide = [] elif val in ("both", True): hide = ["stdout", "stderr"] elif val == "out": hide = ["stdout"] elif val == "err": hide = ["stderr"] else: hide = [val] # Revert any streams that have been overridden from the default value if out_stream is not None and "stdout" in hide: hide.remove("stdout") if err_stream is not None and "stderr" in hide: hide.remove("stderr") return tuple(hide) def default_encoding() -> str: """ Obtain apparent interpreter-local default text encoding. Often used as a baseline in situations where we must use SOME encoding for unknown-but-presumably-text bytes, and the user has not specified an override. """ encoding = locale.getpreferredencoding(False) return encoding invoke-2.2.0/invoke/tasks.py000066400000000000000000000467521445356551000160260ustar00rootroot00000000000000""" This module contains the core `.Task` class & convenience decorators used to generate new tasks. """ import inspect import types from copy import deepcopy from functools import update_wrapper from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Generic, Iterable, Optional, Set, Tuple, Type, TypeVar, Union, ) from .context import Context from .parser import Argument, translate_underscores if TYPE_CHECKING: from inspect import Signature from .config import Config T = TypeVar("T", bound=Callable) class Task(Generic[T]): """ Core object representing an executable task & its argument specification. For the most part, this object is a clearinghouse for all of the data that may be supplied to the `@task ` decorator, such as ``name``, ``aliases``, ``positional`` etc, which appear as attributes. In addition, instantiation copies some introspection/documentation friendly metadata off of the supplied ``body`` object, such as ``__doc__``, ``__name__`` and ``__module__``, allowing it to "appear as" ``body`` for most intents and purposes. .. versionadded:: 1.0 """ # TODO: store these kwarg defaults central, refer to those values both here # and in @task. # TODO: allow central per-session / per-taskmodule control over some of # them, e.g. (auto_)positional, auto_shortflags. # NOTE: we shadow __builtins__.help here on purpose - obfuscating to avoid # it feels bad, given the builtin will never actually be in play anywhere # except a debug shell whose frame is exactly inside this class. def __init__( self, body: Callable, name: Optional[str] = None, aliases: Iterable[str] = (), positional: Optional[Iterable[str]] = None, optional: Iterable[str] = (), default: bool = False, auto_shortflags: bool = True, help: Optional[Dict[str, Any]] = None, pre: Optional[Union[List[str], str]] = None, post: Optional[Union[List[str], str]] = None, autoprint: bool = False, iterable: Optional[Iterable[str]] = None, incrementable: Optional[Iterable[str]] = None, ) -> None: # Real callable self.body = body update_wrapper(self, self.body) # Copy a bunch of special properties from the body for the benefit of # Sphinx autodoc or other introspectors. self.__doc__ = getattr(body, "__doc__", "") self.__name__ = getattr(body, "__name__", "") self.__module__ = getattr(body, "__module__", "") # Default name, alternate names, and whether it should act as the # default for its parent collection self._name = name self.aliases = aliases self.is_default = default # Arg/flag/parser hints self.positional = self.fill_implicit_positionals(positional) self.optional = tuple(optional) self.iterable = iterable or [] self.incrementable = incrementable or [] self.auto_shortflags = auto_shortflags self.help = (help or {}).copy() # Call chain bidness self.pre = pre or [] self.post = post or [] self.times_called = 0 # Whether to print return value post-execution self.autoprint = autoprint @property def name(self) -> str: return self._name or self.__name__ def __repr__(self) -> str: aliases = "" if self.aliases: aliases = " ({})".format(", ".join(self.aliases)) return "".format(self.name, aliases) def __eq__(self, other: object) -> bool: if not isinstance(other, Task) or self.name != other.name: return False # Functions do not define __eq__ but func_code objects apparently do. # (If we're wrapping some other callable, they will be responsible for # defining equality on their end.) if self.body == other.body: return True else: try: return self.body.__code__ == other.body.__code__ except AttributeError: return False def __hash__(self) -> int: # Presumes name and body will never be changed. Hrm. # Potentially cleaner to just not use Tasks as hash keys, but let's do # this for now. return hash(self.name) + hash(self.body) def __call__(self, *args: Any, **kwargs: Any) -> T: # Guard against calling tasks with no context. if not isinstance(args[0], Context): err = "Task expected a Context as its first arg, got {} instead!" # TODO: raise a custom subclass _of_ TypeError instead raise TypeError(err.format(type(args[0]))) result = self.body(*args, **kwargs) self.times_called += 1 return result @property def called(self) -> bool: return self.times_called > 0 def argspec(self, body: Callable) -> "Signature": """ Returns a modified `inspect.Signature` based on that of ``body``. :returns: an `inspect.Signature` matching that of ``body``, but with the initial context argument removed. :raises TypeError: if the task lacks an initial positional `.Context` argument. .. versionadded:: 1.0 .. versionchanged:: 2.0 Changed from returning a two-tuple of ``(arg_names, spec_dict)`` to returning an `inspect.Signature`. """ # Handle callable-but-not-function objects func = ( body if isinstance(body, types.FunctionType) else body.__call__ # type: ignore ) # Rebuild signature with first arg dropped, or die usefully(ish trying sig = inspect.signature(func) params = list(sig.parameters.values()) # TODO: this ought to also check if an extant 1st param _was_ a Context # arg, and yell similarly if not. if not len(params): # TODO: see TODO under __call__, this should be same type raise TypeError("Tasks must have an initial Context argument!") return sig.replace(parameters=params[1:]) def fill_implicit_positionals( self, positional: Optional[Iterable[str]] ) -> Iterable[str]: # If positionals is None, everything lacking a default # value will be automatically considered positional. if positional is None: positional = [ x.name for x in self.argspec(self.body).parameters.values() if x.default is inspect.Signature.empty ] return positional def arg_opts( self, name: str, default: str, taken_names: Set[str] ) -> Dict[str, Any]: opts: Dict[str, Any] = {} # Whether it's positional or not opts["positional"] = name in self.positional # Whether it is a value-optional flag opts["optional"] = name in self.optional # Whether it should be of an iterable (list) kind if name in self.iterable: opts["kind"] = list # If user gave a non-None default, hopefully they know better # than us what they want here (and hopefully it offers the list # protocol...) - otherwise supply useful default opts["default"] = default if default is not None else [] # Whether it should increment its value or not if name in self.incrementable: opts["incrementable"] = True # Argument name(s) (replace w/ dashed version if underscores present, # and move the underscored version to be the attr_name instead.) original_name = name # For reference in eg help= if "_" in name: opts["attr_name"] = name name = translate_underscores(name) names = [name] if self.auto_shortflags: # Must know what short names are available for char in name: if not (char == name or char in taken_names): names.append(char) break opts["names"] = names # Handle default value & kind if possible if default not in (None, inspect.Signature.empty): # TODO: allow setting 'kind' explicitly. # NOTE: skip setting 'kind' if optional is True + type(default) is # bool; that results in a nonsensical Argument which gives the # parser grief in a few ways. kind = type(default) if not (opts["optional"] and kind is bool): opts["kind"] = kind opts["default"] = default # Help for possibility in name, original_name: if possibility in self.help: opts["help"] = self.help.pop(possibility) break return opts def get_arguments( self, ignore_unknown_help: Optional[bool] = None ) -> List[Argument]: """ Return a list of Argument objects representing this task's signature. :param bool ignore_unknown_help: Controls whether unknown help flags cause errors. See the config option by the same name for details. .. versionadded:: 1.0 .. versionchanged:: 1.7 Added the ``ignore_unknown_help`` kwarg. """ # Core argspec sig = self.argspec(self.body) # Prime the list of all already-taken names (mostly for help in # choosing auto shortflags) taken_names = set(sig.parameters.keys()) # Build arg list (arg_opts will take care of setting up shortnames, # etc) args = [] for param in sig.parameters.values(): new_arg = Argument( **self.arg_opts(param.name, param.default, taken_names) ) args.append(new_arg) # Update taken_names list with new argument's full name list # (which may include new shortflags) so subsequent Argument # creation knows what's taken. taken_names.update(set(new_arg.names)) # If any values were leftover after consuming a 'help' dict, it implies # the user messed up & had a typo or similar. Let's explode. if self.help and not ignore_unknown_help: raise ValueError( "Help field was set for param(s) that don't exist: {}".format( list(self.help.keys()) ) ) # Now we need to ensure positionals end up in the front of the list, in # order given in self.positionals, so that when Context consumes them, # this order is preserved. for posarg in reversed(list(self.positional)): for i, arg in enumerate(args): if arg.name == posarg: args.insert(0, args.pop(i)) break return args def task(*args: Any, **kwargs: Any) -> Callable: """ Marks wrapped callable object as a valid Invoke task. May be called without any parentheses if no extra options need to be specified. Otherwise, the following keyword arguments are allowed in the parenthese'd form: * ``name``: Default name to use when binding to a `.Collection`. Useful for avoiding Python namespace issues (i.e. when the desired CLI level name can't or shouldn't be used as the Python level name.) * ``aliases``: Specify one or more aliases for this task, allowing it to be invoked as multiple different names. For example, a task named ``mytask`` with a simple ``@task`` wrapper may only be invoked as ``"mytask"``. Changing the decorator to be ``@task(aliases=['myothertask'])`` allows invocation as ``"mytask"`` *or* ``"myothertask"``. * ``positional``: Iterable overriding the parser's automatic "args with no default value are considered positional" behavior. If a list of arg names, no args besides those named in this iterable will be considered positional. (This means that an empty list will force all arguments to be given as explicit flags.) * ``optional``: Iterable of argument names, declaring those args to have :ref:`optional values `. Such arguments may be given as value-taking options (e.g. ``--my-arg=myvalue``, wherein the task is given ``"myvalue"``) or as Boolean flags (``--my-arg``, resulting in ``True``). * ``iterable``: Iterable of argument names, declaring them to :ref:`build iterable values `. * ``incrementable``: Iterable of argument names, declaring them to :ref:`increment their values `. * ``default``: Boolean option specifying whether this task should be its collection's default task (i.e. called if the collection's own name is given.) * ``auto_shortflags``: Whether or not to automatically create short flags from task options; defaults to True. * ``help``: Dict mapping argument names to their help strings. Will be displayed in ``--help`` output. For arguments containing underscores (which are transformed into dashes on the CLI by default), either the dashed or underscored version may be supplied here. * ``pre``, ``post``: Lists of task objects to execute prior to, or after, the wrapped task whenever it is executed. * ``autoprint``: Boolean determining whether to automatically print this task's return value to standard output when invoked directly via the CLI. Defaults to False. * ``klass``: Class to instantiate/return. Defaults to `.Task`. If any non-keyword arguments are given, they are taken as the value of the ``pre`` kwarg for convenience's sake. (It is an error to give both ``*args`` and ``pre`` at the same time.) .. versionadded:: 1.0 .. versionchanged:: 1.1 Added the ``klass`` keyword argument. """ klass: Type[Task] = kwargs.pop("klass", Task) # @task -- no options were (probably) given. if len(args) == 1 and callable(args[0]) and not isinstance(args[0], Task): return klass(args[0], **kwargs) # @task(pre, tasks, here) if args: if "pre" in kwargs: raise TypeError( "May not give *args and 'pre' kwarg simultaneously!" ) kwargs["pre"] = args def inner(body: Callable) -> Task[T]: _task = klass(body, **kwargs) return _task # update_wrapper(inner, klass) return inner class Call: """ Represents a call/execution of a `.Task` with given (kw)args. Similar to `~functools.partial` with some added functionality (such as the delegation to the inner task, and optional tracking of the name it's being called by.) .. versionadded:: 1.0 """ def __init__( self, task: "Task", called_as: Optional[str] = None, args: Optional[Tuple[str, ...]] = None, kwargs: Optional[Dict[str, Any]] = None, ) -> None: """ Create a new `.Call` object. :param task: The `.Task` object to be executed. :param str called_as: The name the task is being called as, e.g. if it was called by an alias or other rebinding. Defaults to ``None``, aka, the task was referred to by its default name. :param tuple args: Positional arguments to call with, if any. Default: ``None``. :param dict kwargs: Keyword arguments to call with, if any. Default: ``None``. """ self.task = task self.called_as = called_as self.args = args or tuple() self.kwargs = kwargs or dict() # TODO: just how useful is this? feels like maybe overkill magic def __getattr__(self, name: str) -> Any: return getattr(self.task, name) def __deepcopy__(self, memo: object) -> "Call": return self.clone() def __repr__(self) -> str: aka = "" if self.called_as is not None and self.called_as != self.task.name: aka = " (called as: {!r})".format(self.called_as) return "<{} {!r}{}, args: {!r}, kwargs: {!r}>".format( self.__class__.__name__, self.task.name, aka, self.args, self.kwargs, ) def __eq__(self, other: object) -> bool: # NOTE: Not comparing 'called_as'; a named call of a given Task with # same args/kwargs should be considered same as an unnamed call of the # same Task with the same args/kwargs (e.g. pre/post task specified w/o # name). Ditto tasks with multiple aliases. for attr in "task args kwargs".split(): if getattr(self, attr) != getattr(other, attr): return False return True def make_context(self, config: "Config") -> Context: """ Generate a `.Context` appropriate for this call, with given config. .. versionadded:: 1.0 """ return Context(config=config) def clone_data(self) -> Dict[str, Any]: """ Return keyword args suitable for cloning this call into another. .. versionadded:: 1.1 """ return dict( task=self.task, called_as=self.called_as, args=deepcopy(self.args), kwargs=deepcopy(self.kwargs), ) def clone( self, into: Optional[Type["Call"]] = None, with_: Optional[Dict[str, Any]] = None, ) -> "Call": """ Return a standalone copy of this Call. Useful when parameterizing task executions. :param into: A subclass to generate instead of the current class. Optional. :param dict with_: A dict of additional keyword arguments to use when creating the new clone; typically used when cloning ``into`` a subclass that has extra args on top of the base class. Optional. .. note:: This dict is used to ``.update()`` the original object's data (the return value from its `clone_data`), so in the event of a conflict, values in ``with_`` will win out. .. versionadded:: 1.0 .. versionchanged:: 1.1 Added the ``with_`` kwarg. """ klass = into if into is not None else self.__class__ data = self.clone_data() if with_ is not None: data.update(with_) return klass(**data) def call(task: "Task", *args: Any, **kwargs: Any) -> "Call": """ Describes execution of a `.Task`, typically with pre-supplied arguments. Useful for setting up :ref:`pre/post task invocations `. It's actually just a convenient wrapper around the `.Call` class, which may be used directly instead if desired. For example, here's two build-like tasks that both refer to a ``setup`` pre-task, one with no baked-in argument values (and thus no need to use `.call`), and one that toggles a boolean flag:: @task def setup(c, clean=False): if clean: c.run("rm -rf target") # ... setup things here ... c.run("tar czvf target.tgz target") @task(pre=[setup]) def build(c): c.run("build, accounting for leftover files...") @task(pre=[call(setup, clean=True)]) def clean_build(c): c.run("build, assuming clean slate...") Please see the constructor docs for `.Call` for details - this function's ``args`` and ``kwargs`` map directly to the same arguments as in that method. .. versionadded:: 1.0 """ return Call(task, args=args, kwargs=kwargs) invoke-2.2.0/invoke/terminals.py000066400000000000000000000176011445356551000166660ustar00rootroot00000000000000""" Utility functions surrounding terminal devices & I/O. Much of this code performs platform-sensitive branching, e.g. Windows support. This is its own module to abstract away what would otherwise be distracting logic-flow interruptions. """ from contextlib import contextmanager from typing import Generator, IO, Optional, Tuple import os import select import sys # TODO: move in here? They're currently platform-agnostic... from .util import has_fileno, isatty WINDOWS = sys.platform == "win32" """ Whether or not the current platform appears to be Windows in nature. Note that Cygwin's Python is actually close enough to "real" UNIXes that it doesn't need (or want!) to use PyWin32 -- so we only test for literal Win32 setups (vanilla Python, ActiveState etc) here. .. versionadded:: 1.0 """ if sys.platform == "win32": import msvcrt from ctypes import ( Structure, c_ushort, windll, POINTER, byref, ) from ctypes.wintypes import HANDLE, _COORD, _SMALL_RECT else: import fcntl import struct import termios import tty if sys.platform == "win32": def _pty_size() -> Tuple[Optional[int], Optional[int]]: class CONSOLE_SCREEN_BUFFER_INFO(Structure): _fields_ = [ ("dwSize", _COORD), ("dwCursorPosition", _COORD), ("wAttributes", c_ushort), ("srWindow", _SMALL_RECT), ("dwMaximumWindowSize", _COORD), ] GetStdHandle = windll.kernel32.GetStdHandle GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo GetStdHandle.restype = HANDLE GetConsoleScreenBufferInfo.argtypes = [ HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ] hstd = GetStdHandle(-11) # STD_OUTPUT_HANDLE = -11 csbi = CONSOLE_SCREEN_BUFFER_INFO() ret = GetConsoleScreenBufferInfo(hstd, byref(csbi)) if ret: sizex = csbi.srWindow.Right - csbi.srWindow.Left + 1 sizey = csbi.srWindow.Bottom - csbi.srWindow.Top + 1 return sizex, sizey else: return (None, None) else: def _pty_size() -> Tuple[Optional[int], Optional[int]]: """ Suitable for most POSIX platforms. .. versionadded:: 1.0 """ # Sentinel values to be replaced w/ defaults by caller size = (None, None) # We want two short unsigned integers (rows, cols) fmt = "HH" # Create an empty (zeroed) buffer for ioctl to map onto. Yay for C! buf = struct.pack(fmt, 0, 0) # Call TIOCGWINSZ to get window size of stdout, returns our filled # buffer try: result = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, buf) # Unpack buffer back into Python data types # NOTE: this unpack gives us rows x cols, but we return the # inverse. rows, cols = struct.unpack(fmt, result) return (cols, rows) # Fallback to emptyish return value in various failure cases: # * sys.stdout being monkeypatched, such as in testing, and lacking # * .fileno # * sys.stdout having a .fileno but not actually being attached to a # * TTY # * termios not having a TIOCGWINSZ attribute (happens sometimes...) # * other situations where ioctl doesn't explode but the result isn't # something unpack can deal with except (struct.error, TypeError, IOError, AttributeError): pass return size def pty_size() -> Tuple[int, int]: """ Determine current local pseudoterminal dimensions. :returns: A ``(num_cols, num_rows)`` two-tuple describing PTY size. Defaults to ``(80, 24)`` if unable to get a sensible result dynamically. .. versionadded:: 1.0 """ cols, rows = _pty_size() # TODO: make defaults configurable? return (cols or 80, rows or 24) def stdin_is_foregrounded_tty(stream: IO) -> bool: """ Detect if given stdin ``stream`` seems to be in the foreground of a TTY. Specifically, compares the current Python process group ID to that of the stream's file descriptor to see if they match; if they do not match, it is likely that the process has been placed in the background. This is used as a test to determine whether we should manipulate an active stdin so it runs in a character-buffered mode; touching the terminal in this way when the process is backgrounded, causes most shells to pause execution. .. note:: Processes that aren't attached to a terminal to begin with, will always fail this test, as it starts with "do you have a real ``fileno``?". .. versionadded:: 1.0 """ if not has_fileno(stream): return False return os.getpgrp() == os.tcgetpgrp(stream.fileno()) def cbreak_already_set(stream: IO) -> bool: # Explicitly not docstringed to remain private, for now. Eh. # Checks whether tty.setcbreak appears to have already been run against # ``stream`` (or if it would otherwise just not do anything). # Used to effect idempotency for character-buffering a stream, which also # lets us avoid multiple capture-then-restore cycles. attrs = termios.tcgetattr(stream) lflags, cc = attrs[3], attrs[6] echo = bool(lflags & termios.ECHO) icanon = bool(lflags & termios.ICANON) # setcbreak sets ECHO and ICANON to 0/off, CC[VMIN] to 1-ish, and CC[VTIME] # to 0-ish. If any of that is not true we can reasonably assume it has not # yet been executed against this stream. sentinels = ( not echo, not icanon, cc[termios.VMIN] in [1, b"\x01"], cc[termios.VTIME] in [0, b"\x00"], ) return all(sentinels) @contextmanager def character_buffered( stream: IO, ) -> Generator[None, None, None]: """ Force local terminal ``stream`` be character, not line, buffered. Only applies to Unix-based systems; on Windows this is a no-op. .. versionadded:: 1.0 """ if ( WINDOWS or not isatty(stream) or not stdin_is_foregrounded_tty(stream) or cbreak_already_set(stream) ): yield else: old_settings = termios.tcgetattr(stream) tty.setcbreak(stream) try: yield finally: termios.tcsetattr(stream, termios.TCSADRAIN, old_settings) def ready_for_reading(input_: IO) -> bool: """ Test ``input_`` to determine whether a read action will succeed. :param input_: Input stream object (file-like). :returns: ``True`` if a read should succeed, ``False`` otherwise. .. versionadded:: 1.0 """ # A "real" terminal stdin needs select/kbhit to tell us when it's ready for # a nonblocking read(). # Otherwise, assume a "safer" file-like object that can be read from in a # nonblocking fashion (e.g. a StringIO or regular file). if not has_fileno(input_): return True if sys.platform == "win32": return msvcrt.kbhit() else: reads, _, _ = select.select([input_], [], [], 0.0) return bool(reads and reads[0] is input_) def bytes_to_read(input_: IO) -> int: """ Query stream ``input_`` to see how many bytes may be readable. .. note:: If we are unable to tell (e.g. if ``input_`` isn't a true file descriptor or isn't a valid TTY) we fall back to suggesting reading 1 byte only. :param input: Input stream object (file-like). :returns: `int` number of bytes to read. .. versionadded:: 1.0 """ # NOTE: we have to check both possibilities here; situations exist where # it's not a tty but has a fileno, or vice versa; neither is typically # going to work re: ioctl(). if not WINDOWS and isatty(input_) and has_fileno(input_): fionread = fcntl.ioctl(input_, termios.FIONREAD, b" ") return int(struct.unpack("h", fionread)[0]) return 1 invoke-2.2.0/invoke/util.py000066400000000000000000000234421445356551000156450ustar00rootroot00000000000000from collections import namedtuple from contextlib import contextmanager from types import TracebackType from typing import Any, Generator, List, IO, Optional, Tuple, Type, Union import io import logging import os import threading import sys # NOTE: This is the canonical location for commonly-used vendored modules, # which is the only spot that performs this try/except to allow repackaged # Invoke to function (e.g. distro packages which unvendor the vendored bits and # thus must import our 'vendored' stuff from the overall environment.) # All other uses of Lexicon, etc should do 'from .util import lexicon' etc. # Saves us from having to update the same logic in a dozen places. # TODO: would this make more sense to put _into_ invoke.vendor? That way, the # import lines which now read 'from .util import ' would be # more obvious. Requires packagers to leave invoke/vendor/__init__.py alone tho try: from .vendor.lexicon import Lexicon # noqa from .vendor import yaml # noqa except ImportError: from lexicon import Lexicon # type: ignore[no-redef] # noqa import yaml # type: ignore[no-redef] # noqa LOG_FORMAT = "%(name)s.%(module)s.%(funcName)s: %(message)s" def enable_logging() -> None: logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) # Allow from-the-start debugging (vs toggled during load of tasks module) via # shell env var. if os.environ.get("INVOKE_DEBUG"): enable_logging() # Add top level logger functions to global namespace. Meh. log = logging.getLogger("invoke") debug = log.debug def task_name_sort_key(name: str) -> Tuple[List[str], str]: """ Return key tuple for use sorting dotted task names, via e.g. `sorted`. .. versionadded:: 1.0 """ parts = name.split(".") return ( # First group/sort by non-leaf path components. This keeps everything # grouped in its hierarchy, and incidentally puts top-level tasks # (whose non-leaf path set is the empty list) first, where we want them parts[:-1], # Then we sort lexicographically by the actual task name parts[-1], ) # TODO: Make part of public API sometime @contextmanager def cd(where: str) -> Generator[None, None, None]: cwd = os.getcwd() os.chdir(where) try: yield finally: os.chdir(cwd) def has_fileno(stream: IO) -> bool: """ Cleanly determine whether ``stream`` has a useful ``.fileno()``. .. note:: This function helps determine if a given file-like object can be used with various terminal-oriented modules and functions such as `select`, `termios`, and `tty`. For most of those, a fileno is all that is required; they'll function even if ``stream.isatty()`` is ``False``. :param stream: A file-like object. :returns: ``True`` if ``stream.fileno()`` returns an integer, ``False`` otherwise (this includes when ``stream`` lacks a ``fileno`` method). .. versionadded:: 1.0 """ try: return isinstance(stream.fileno(), int) except (AttributeError, io.UnsupportedOperation): return False def isatty(stream: IO) -> Union[bool, Any]: """ Cleanly determine whether ``stream`` is a TTY. Specifically, first try calling ``stream.isatty()``, and if that fails (e.g. due to lacking the method entirely) fallback to `os.isatty`. .. note:: Most of the time, we don't actually care about true TTY-ness, but merely whether the stream seems to have a fileno (per `has_fileno`). However, in some cases (notably the use of `pty.fork` to present a local pseudoterminal) we need to tell if a given stream has a valid fileno but *isn't* tied to an actual terminal. Thus, this function. :param stream: A file-like object. :returns: A boolean depending on the result of calling ``.isatty()`` and/or `os.isatty`. .. versionadded:: 1.0 """ # If there *is* an .isatty, ask it. if hasattr(stream, "isatty") and callable(stream.isatty): return stream.isatty() # If there wasn't, see if it has a fileno, and if so, ask os.isatty elif has_fileno(stream): return os.isatty(stream.fileno()) # If we got here, none of the above worked, so it's reasonable to assume # the darn thing isn't a real TTY. return False def helpline(obj: object) -> Optional[str]: """ Yield an object's first docstring line, or None if there was no docstring. .. versionadded:: 1.0 """ docstring = obj.__doc__ if ( not docstring or not docstring.strip() or docstring == type(obj).__doc__ ): return None return docstring.lstrip().splitlines()[0] class ExceptionHandlingThread(threading.Thread): """ Thread handler making it easier for parent to handle thread exceptions. Based in part on Fabric 1's ThreadHandler. See also Fabric GH issue #204. When used directly, can be used in place of a regular ``threading.Thread``. If subclassed, the subclass must do one of: - supply ``target`` to ``__init__`` - define ``_run()`` instead of ``run()`` This is because this thread's entire point is to wrap behavior around the thread's execution; subclasses could not redefine ``run()`` without breaking that functionality. .. versionadded:: 1.0 """ def __init__(self, **kwargs: Any) -> None: """ Create a new exception-handling thread instance. Takes all regular `threading.Thread` keyword arguments, via ``**kwargs`` for easier display of thread identity when raising captured exceptions. """ super().__init__(**kwargs) # No record of why, but Fabric used daemon threads ever since the # switch from select.select, so let's keep doing that. self.daemon = True # Track exceptions raised in run() self.kwargs = kwargs # TODO: legacy cruft that needs to be removed self.exc_info: Optional[ Union[ Tuple[Type[BaseException], BaseException, TracebackType], Tuple[None, None, None], ] ] = None def run(self) -> None: try: # Allow subclasses implemented using the "override run()'s body" # approach to work, by using _run() instead of run(). If that # doesn't appear to be the case, then assume we're being used # directly and just use super() ourselves. # XXX https://github.com/python/mypy/issues/1424 if hasattr(self, "_run") and callable(self._run): # type: ignore # TODO: this could be: # - io worker with no 'result' (always local) # - tunnel worker, also with no 'result' (also always local) # - threaded concurrent run(), sudo(), put(), etc, with a # result (not necessarily local; might want to be a subproc or # whatever eventually) # TODO: so how best to conditionally add a "capture result # value of some kind"? # - update so all use cases use subclassing, add functionality # alongside self.exception() that is for the result of _run() # - split out class that does not care about result of _run() # and let it continue acting like a normal thread (meh) # - assume the run/sudo/etc case will use a queue inside its # worker body, orthogonal to how exception handling works self._run() # type: ignore else: super().run() except BaseException: # Store for actual reraising later self.exc_info = sys.exc_info() # And log now, in case we never get to later (e.g. if executing # program is hung waiting for us to do something) msg = "Encountered exception {!r} in thread for {!r}" # Name is either target function's dunder-name, or just "_run" if # we were run subclass-wise. name = "_run" if "target" in self.kwargs: name = self.kwargs["target"].__name__ debug(msg.format(self.exc_info[1], name)) # noqa def exception(self) -> Optional["ExceptionWrapper"]: """ If an exception occurred, return an `.ExceptionWrapper` around it. :returns: An `.ExceptionWrapper` managing the result of `sys.exc_info`, if an exception was raised during thread execution. If no exception occurred, returns ``None`` instead. .. versionadded:: 1.0 """ if self.exc_info is None: return None return ExceptionWrapper(self.kwargs, *self.exc_info) @property def is_dead(self) -> bool: """ Returns ``True`` if not alive and has a stored exception. Used to detect threads that have excepted & shut down. .. versionadded:: 1.0 """ # NOTE: it seems highly unlikely that a thread could still be # is_alive() but also have encountered an exception. But hey. Why not # be thorough? return (not self.is_alive()) and self.exc_info is not None def __repr__(self) -> str: # TODO: beef this up more return str(self.kwargs["target"].__name__) # NOTE: ExceptionWrapper defined here, not in exceptions.py, to avoid circular # dependency issues (e.g. Failure subclasses need to use some bits from this # module...) #: A namedtuple wrapping a thread-borne exception & that thread's arguments. #: Mostly used as an intermediate between `.ExceptionHandlingThread` (which #: preserves initial exceptions) and `.ThreadException` (which holds 1..N such #: exceptions, as typically multiple threads are involved.) ExceptionWrapper = namedtuple( "ExceptionWrapper", "kwargs type value traceback" ) invoke-2.2.0/invoke/vendor/000077500000000000000000000000001445356551000156065ustar00rootroot00000000000000invoke-2.2.0/invoke/vendor/__init__.py000066400000000000000000000000001445356551000177050ustar00rootroot00000000000000invoke-2.2.0/invoke/vendor/fluidity/000077500000000000000000000000001445356551000174375ustar00rootroot00000000000000invoke-2.2.0/invoke/vendor/fluidity/LICENSE000066400000000000000000000020711445356551000204440ustar00rootroot00000000000000The MIT License Copyright (c) 2011 Rodrigo S. Manhães Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. invoke-2.2.0/invoke/vendor/fluidity/__init__.py000066400000000000000000000003041445356551000215450ustar00rootroot00000000000000from .machine import (StateMachine, state, transition, InvalidConfiguration, InvalidTransition, GuardNotSatisfied, ForkedTransition) invoke-2.2.0/invoke/vendor/fluidity/backwardscompat.py000066400000000000000000000002071445356551000231550ustar00rootroot00000000000000import sys if sys.version_info >= (3,): def callable(obj): return hasattr(obj, '__call__') else: callable = callable invoke-2.2.0/invoke/vendor/fluidity/machine.py000066400000000000000000000207561445356551000214270ustar00rootroot00000000000000import re import inspect from .backwardscompat import callable # metaclass implementation idea from # http://blog.ianbicking.org/more-on-python-metaprogramming-comment-14.html _transition_gatherer = [] def transition(event, from_, to, action=None, guard=None): _transition_gatherer.append([event, from_, to, action, guard]) _state_gatherer = [] def state(name, enter=None, exit=None): _state_gatherer.append([name, enter, exit]) class MetaStateMachine(type): def __new__(cls, name, bases, dictionary): global _transition_gatherer, _state_gatherer Machine = super(MetaStateMachine, cls).__new__(cls, name, bases, dictionary) Machine._class_transitions = [] Machine._class_states = {} for s in _state_gatherer: Machine._add_class_state(*s) for i in _transition_gatherer: Machine._add_class_transition(*i) _transition_gatherer = [] _state_gatherer = [] return Machine StateMachineBase = MetaStateMachine('StateMachineBase', (object, ), {}) class StateMachine(StateMachineBase): def __init__(self): self._bring_definitions_to_object_level() self._inject_into_parts() self._validate_machine_definitions() if callable(self.initial_state): self.initial_state = self.initial_state() self._current_state_object = self._state_by_name(self.initial_state) self._current_state_object.run_enter(self) self._create_state_getters() def __new__(cls, *args, **kwargs): obj = super(StateMachine, cls).__new__(cls) obj._states = {} obj._transitions = [] return obj def _bring_definitions_to_object_level(self): self._states.update(self.__class__._class_states) self._transitions.extend(self.__class__._class_transitions) def _inject_into_parts(self): for collection in [self._states.values(), self._transitions]: for component in collection: component.machine = self def _validate_machine_definitions(self): if len(self._states) < 2: raise InvalidConfiguration('There must be at least two states') if not getattr(self, 'initial_state', None): raise InvalidConfiguration('There must exist an initial state') @classmethod def _add_class_state(cls, name, enter, exit): cls._class_states[name] = _State(name, enter, exit) def add_state(self, name, enter=None, exit=None): state = _State(name, enter, exit) setattr(self, state.getter_name(), state.getter_method().__get__(self, self.__class__)) self._states[name] = state def _current_state_name(self): return self._current_state_object.name current_state = property(_current_state_name) def changing_state(self, from_, to): """ This method is called whenever a state change is executed """ pass def _new_state(self, state): self.changing_state(self._current_state_object.name, state.name) self._current_state_object = state def _state_objects(self): return list(self._states.values()) def states(self): return [s.name for s in self._state_objects()] @classmethod def _add_class_transition(cls, event, from_, to, action, guard): transition = _Transition(event, [cls._class_states[s] for s in _listize(from_)], cls._class_states[to], action, guard) cls._class_transitions.append(transition) setattr(cls, event, transition.event_method()) def add_transition(self, event, from_, to, action=None, guard=None): transition = _Transition(event, [self._state_by_name(s) for s in _listize(from_)], self._state_by_name(to), action, guard) self._transitions.append(transition) setattr(self, event, transition.event_method().__get__(self, self.__class__)) def _process_transitions(self, event_name, *args, **kwargs): transitions = self._transitions_by_name(event_name) transitions = self._ensure_from_validity(transitions) this_transition = self._check_guards(transitions) this_transition.run(self, *args, **kwargs) def _create_state_getters(self): for state in self._state_objects(): setattr(self, state.getter_name(), state.getter_method().__get__(self, self.__class__)) def _state_by_name(self, name): for state in self._state_objects(): if state.name == name: return state def _transitions_by_name(self, name): return list(filter(lambda transition: transition.event == name, self._transitions)) def _ensure_from_validity(self, transitions): valid_transitions = list(filter( lambda transition: transition.is_valid_from(self._current_state_object), transitions)) if len(valid_transitions) == 0: raise InvalidTransition("Cannot %s from %s" % ( transitions[0].event, self.current_state)) return valid_transitions def _check_guards(self, transitions): allowed_transitions = [] for transition in transitions: if transition.check_guard(self): allowed_transitions.append(transition) if len(allowed_transitions) == 0: raise GuardNotSatisfied("Guard is not satisfied for this transition") elif len(allowed_transitions) > 1: raise ForkedTransition("More than one transition was allowed for this event") return allowed_transitions[0] class _Transition(object): def __init__(self, event, from_, to, action, guard): self.event = event self.from_ = from_ self.to = to self.action = action self.guard = _Guard(guard) def event_method(self): def generated_event(machine, *args, **kwargs): these_transitions = machine._process_transitions(self.event, *args, **kwargs) generated_event.__doc__ = 'event %s' % self.event generated_event.__name__ = self.event return generated_event def is_valid_from(self, from_): return from_ in _listize(self.from_) def check_guard(self, machine): return self.guard.check(machine) def run(self, machine, *args, **kwargs): machine._current_state_object.run_exit(machine) machine._new_state(self.to) self.to.run_enter(machine) _ActionRunner(machine).run(self.action, *args, **kwargs) class _Guard(object): def __init__(self, action): self.action = action def check(self, machine): if self.action is None: return True items = _listize(self.action) result = True for item in items: result = result and self._evaluate(machine, item) return result def _evaluate(self, machine, item): if callable(item): return item(machine) else: guard = getattr(machine, item) if callable(guard): guard = guard() return guard class _State(object): def __init__(self, name, enter, exit): self.name = name self.enter = enter self.exit = exit def getter_name(self): return 'is_%s' % self.name def getter_method(self): def state_getter(self_machine): return self_machine.current_state == self.name return state_getter def run_enter(self, machine): _ActionRunner(machine).run(self.enter) def run_exit(self, machine): _ActionRunner(machine).run(self.exit) class _ActionRunner(object): def __init__(self, machine): self.machine = machine def run(self, action_param, *args, **kwargs): if not action_param: return action_items = _listize(action_param) for action_item in action_items: self._run_action(action_item, *args, **kwargs) def _run_action(self, action, *args, **kwargs): if callable(action): self._try_to_run_with_args(action, self.machine, *args, **kwargs) else: self._try_to_run_with_args(getattr(self.machine, action), *args, **kwargs) def _try_to_run_with_args(self, action, *args, **kwargs): try: action(*args, **kwargs) except TypeError: action() class InvalidConfiguration(Exception): pass class InvalidTransition(Exception): pass class GuardNotSatisfied(Exception): pass class ForkedTransition(Exception): pass def _listize(value): return type(value) in [list, tuple] and value or [value] invoke-2.2.0/invoke/vendor/lexicon/000077500000000000000000000000001445356551000172475ustar00rootroot00000000000000invoke-2.2.0/invoke/vendor/lexicon/LICENSE000066400000000000000000000024421445356551000202560ustar00rootroot00000000000000Copyright (c) 2020 Jeff Forcier. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. invoke-2.2.0/invoke/vendor/lexicon/__init__.py000066400000000000000000000021551445356551000213630ustar00rootroot00000000000000from ._version import __version_info__, __version__ # noqa from .attribute_dict import AttributeDict from .alias_dict import AliasDict class Lexicon(AttributeDict, AliasDict): def __init__(self, *args, **kwargs): # Need to avoid combining AliasDict's initial attribute write on # self.aliases, with AttributeDict's __setattr__. Doing so results in # an infinite loop. Instead, just skip straight to dict() for both # explicitly (i.e. we override AliasDict.__init__ instead of extending # it.) # NOTE: could tickle AttributeDict.__init__ instead, in case it ever # grows one. dict.__init__(self, *args, **kwargs) dict.__setattr__(self, "aliases", {}) def __getattr__(self, key): # Intercept deepcopy/etc driven access to self.aliases when not # actually set. (Only a problem for us, due to abovementioned combo of # Alias and Attribute Dicts, so not solvable in a parent alone.) if key == "aliases" and key not in self.__dict__: self.__dict__[key] = {} return super(Lexicon, self).__getattr__(key) invoke-2.2.0/invoke/vendor/lexicon/_version.py000066400000000000000000000001201445356551000214360ustar00rootroot00000000000000__version_info__ = (2, 0, 1) __version__ = ".".join(map(str, __version_info__)) invoke-2.2.0/invoke/vendor/lexicon/alias_dict.py000066400000000000000000000062271445356551000217240ustar00rootroot00000000000000class AliasDict(dict): def __init__(self, *args, **kwargs): super(AliasDict, self).__init__(*args, **kwargs) self.aliases = {} def alias(self, from_, to): self.aliases[from_] = to def unalias(self, from_): del self.aliases[from_] def aliases_of(self, name): """ Returns other names for given real key or alias ``name``. If given a real key, returns its aliases. If given an alias, returns the real key it points to, plus any other aliases of that real key. (The given alias itself is not included in the return value.) """ names = [] key = name # self.aliases keys are aliases, not realkeys. Easy test to see if we # should flip around to the POV of a realkey when given an alias. if name in self.aliases: key = self.aliases[name] # Ensure the real key shows up in output. names.append(key) # 'key' is now a realkey, whose aliases are all keys whose value is # itself. Filter out the original name given. names.extend( [k for k, v in self.aliases.items() if v == key and k != name] ) return names def _handle(self, key, value, single, multi, unaliased): # Attribute existence test required to not blow up when deepcopy'd if key in getattr(self, "aliases", {}): target = self.aliases[key] # Single-string targets if isinstance(target, str): return single(self, target, value) # Multi-string targets else: if multi: return multi(self, target, value) else: for subkey in target: single(self, subkey, value) else: return unaliased(self, key, value) def __setitem__(self, key, value): def single(d, target, value): d[target] = value def unaliased(d, key, value): super(AliasDict, d).__setitem__(key, value) return self._handle(key, value, single, None, unaliased) def __getitem__(self, key): def single(d, target, value): return d[target] def unaliased(d, key, value): return super(AliasDict, d).__getitem__(key) def multi(d, target, value): msg = "Multi-target aliases have no well-defined value and can't be read." # noqa raise ValueError(msg) return self._handle(key, None, single, multi, unaliased) def __contains__(self, key): def single(d, target, value): return target in d def multi(d, target, value): return all(subkey in self for subkey in self.aliases[key]) def unaliased(d, key, value): return super(AliasDict, d).__contains__(key) return self._handle(key, None, single, multi, unaliased) def __delitem__(self, key): def single(d, target, value): del d[target] def unaliased(d, key, value): return super(AliasDict, d).__delitem__(key) return self._handle(key, None, single, None, unaliased) invoke-2.2.0/invoke/vendor/lexicon/attribute_dict.py000066400000000000000000000006271445356551000226340ustar00rootroot00000000000000class AttributeDict(dict): def __getattr__(self, key): try: return self[key] except KeyError: # to conform with __getattr__ spec raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): del self[key] def __dir__(self): return dir(type(self)) + list(self.keys()) invoke-2.2.0/invoke/vendor/yaml/000077500000000000000000000000001445356551000165505ustar00rootroot00000000000000invoke-2.2.0/invoke/vendor/yaml/__init__.py000066400000000000000000000315621445356551000206700ustar00rootroot00000000000000 from .error import * from .tokens import * from .events import * from .nodes import * from .loader import * from .dumper import * __version__ = '5.4.1' try: from .cyaml import * __with_libyaml__ = True except ImportError: __with_libyaml__ = False import io #------------------------------------------------------------------------------ # Warnings control #------------------------------------------------------------------------------ # 'Global' warnings state: _warnings_enabled = { 'YAMLLoadWarning': True, } # Get or set global warnings' state def warnings(settings=None): if settings is None: return _warnings_enabled if type(settings) is dict: for key in settings: if key in _warnings_enabled: _warnings_enabled[key] = settings[key] # Warn when load() is called without Loader=... class YAMLLoadWarning(RuntimeWarning): pass def load_warning(method): if _warnings_enabled['YAMLLoadWarning'] is False: return import warnings message = ( "calling yaml.%s() without Loader=... is deprecated, as the " "default Loader is unsafe. Please read " "https://msg.pyyaml.org/load for full details." ) % method warnings.warn(message, YAMLLoadWarning, stacklevel=3) #------------------------------------------------------------------------------ def scan(stream, Loader=Loader): """ Scan a YAML stream and produce scanning tokens. """ loader = Loader(stream) try: while loader.check_token(): yield loader.get_token() finally: loader.dispose() def parse(stream, Loader=Loader): """ Parse a YAML stream and produce parsing events. """ loader = Loader(stream) try: while loader.check_event(): yield loader.get_event() finally: loader.dispose() def compose(stream, Loader=Loader): """ Parse the first YAML document in a stream and produce the corresponding representation tree. """ loader = Loader(stream) try: return loader.get_single_node() finally: loader.dispose() def compose_all(stream, Loader=Loader): """ Parse all YAML documents in a stream and produce corresponding representation trees. """ loader = Loader(stream) try: while loader.check_node(): yield loader.get_node() finally: loader.dispose() def load(stream, Loader=None): """ Parse the first YAML document in a stream and produce the corresponding Python object. """ if Loader is None: load_warning('load') Loader = FullLoader loader = Loader(stream) try: return loader.get_single_data() finally: loader.dispose() def load_all(stream, Loader=None): """ Parse all YAML documents in a stream and produce corresponding Python objects. """ if Loader is None: load_warning('load_all') Loader = FullLoader loader = Loader(stream) try: while loader.check_data(): yield loader.get_data() finally: loader.dispose() def full_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve all tags except those known to be unsafe on untrusted input. """ return load(stream, FullLoader) def full_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. Resolve all tags except those known to be unsafe on untrusted input. """ return load_all(stream, FullLoader) def safe_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve only basic YAML tags. This is known to be safe for untrusted input. """ return load(stream, SafeLoader) def safe_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. Resolve only basic YAML tags. This is known to be safe for untrusted input. """ return load_all(stream, SafeLoader) def unsafe_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve all tags, even those known to be unsafe on untrusted input. """ return load(stream, UnsafeLoader) def unsafe_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. Resolve all tags, even those known to be unsafe on untrusted input. """ return load_all(stream, UnsafeLoader) def emit(events, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None): """ Emit YAML parsing events into a stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: stream = io.StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) try: for event in events: dumper.emit(event) finally: dumper.dispose() if getvalue: return getvalue() def serialize_all(nodes, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of representation trees into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: stream = io.StringIO() else: stream = io.BytesIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) try: dumper.open() for node in nodes: dumper.serialize(node) dumper.close() finally: dumper.dispose() if getvalue: return getvalue() def serialize(node, stream=None, Dumper=Dumper, **kwds): """ Serialize a representation tree into a YAML stream. If stream is None, return the produced string instead. """ return serialize_all([node], stream, Dumper=Dumper, **kwds) def dump_all(documents, stream=None, Dumper=Dumper, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): """ Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: stream = io.StringIO() else: stream = io.BytesIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, default_flow_style=default_flow_style, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys) try: dumper.open() for data in documents: dumper.represent(data) dumper.close() finally: dumper.dispose() if getvalue: return getvalue() def dump(data, stream=None, Dumper=Dumper, **kwds): """ Serialize a Python object into a YAML stream. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=Dumper, **kwds) def safe_dump_all(documents, stream=None, **kwds): """ Serialize a sequence of Python objects into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all(documents, stream, Dumper=SafeDumper, **kwds) def safe_dump(data, stream=None, **kwds): """ Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=SafeDumper, **kwds) def add_implicit_resolver(tag, regexp, first=None, Loader=None, Dumper=Dumper): """ Add an implicit scalar detector. If an implicit scalar value matches the given regexp, the corresponding tag is assigned to the scalar. first is a sequence of possible initial characters or None. """ if Loader is None: loader.Loader.add_implicit_resolver(tag, regexp, first) loader.FullLoader.add_implicit_resolver(tag, regexp, first) loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first) else: Loader.add_implicit_resolver(tag, regexp, first) Dumper.add_implicit_resolver(tag, regexp, first) def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper): """ Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None. """ if Loader is None: loader.Loader.add_path_resolver(tag, path, kind) loader.FullLoader.add_path_resolver(tag, path, kind) loader.UnsafeLoader.add_path_resolver(tag, path, kind) else: Loader.add_path_resolver(tag, path, kind) Dumper.add_path_resolver(tag, path, kind) def add_constructor(tag, constructor, Loader=None): """ Add a constructor for the given tag. Constructor is a function that accepts a Loader instance and a node object and produces the corresponding Python object. """ if Loader is None: loader.Loader.add_constructor(tag, constructor) loader.FullLoader.add_constructor(tag, constructor) loader.UnsafeLoader.add_constructor(tag, constructor) else: Loader.add_constructor(tag, constructor) def add_multi_constructor(tag_prefix, multi_constructor, Loader=None): """ Add a multi-constructor for the given tag prefix. Multi-constructor is called for a node if its tag starts with tag_prefix. Multi-constructor accepts a Loader instance, a tag suffix, and a node object and produces the corresponding Python object. """ if Loader is None: loader.Loader.add_multi_constructor(tag_prefix, multi_constructor) loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor) loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor) else: Loader.add_multi_constructor(tag_prefix, multi_constructor) def add_representer(data_type, representer, Dumper=Dumper): """ Add a representer for the given type. Representer is a function accepting a Dumper instance and an instance of the given data type and producing the corresponding representation node. """ Dumper.add_representer(data_type, representer) def add_multi_representer(data_type, multi_representer, Dumper=Dumper): """ Add a representer for the given type. Multi-representer is a function accepting a Dumper instance and an instance of the given data type or subtype and producing the corresponding representation node. """ Dumper.add_multi_representer(data_type, multi_representer) class YAMLObjectMetaclass(type): """ The metaclass for YAMLObject. """ def __init__(cls, name, bases, kwds): super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: if isinstance(cls.yaml_loader, list): for loader in cls.yaml_loader: loader.add_constructor(cls.yaml_tag, cls.from_yaml) else: cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) cls.yaml_dumper.add_representer(cls, cls.to_yaml) class YAMLObject(metaclass=YAMLObjectMetaclass): """ An object that can dump itself to a YAML stream and load itself from a YAML stream. """ __slots__ = () # no direct instantiation, so allow immutable subclasses yaml_loader = [Loader, FullLoader, UnsafeLoader] yaml_dumper = Dumper yaml_tag = None yaml_flow_style = None @classmethod def from_yaml(cls, loader, node): """ Convert a representation node to a Python object. """ return loader.construct_yaml_object(node, cls) @classmethod def to_yaml(cls, dumper, data): """ Convert a Python object to a representation node. """ return dumper.represent_yaml_object(cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style) invoke-2.2.0/invoke/vendor/yaml/composer.py000066400000000000000000000114231445356551000207520ustar00rootroot00000000000000 __all__ = ['Composer', 'ComposerError'] from .error import MarkedYAMLError from .events import * from .nodes import * class ComposerError(MarkedYAMLError): pass class Composer: def __init__(self): self.anchors = {} def check_node(self): # Drop the STREAM-START event. if self.check_event(StreamStartEvent): self.get_event() # If there are more documents available? return not self.check_event(StreamEndEvent) def get_node(self): # Get the root node of the next document. if not self.check_event(StreamEndEvent): return self.compose_document() def get_single_node(self): # Drop the STREAM-START event. self.get_event() # Compose a document if the stream is not empty. document = None if not self.check_event(StreamEndEvent): document = self.compose_document() # Ensure that the stream contains no more documents. if not self.check_event(StreamEndEvent): event = self.get_event() raise ComposerError("expected a single document in the stream", document.start_mark, "but found another document", event.start_mark) # Drop the STREAM-END event. self.get_event() return document def compose_document(self): # Drop the DOCUMENT-START event. self.get_event() # Compose the root node. node = self.compose_node(None, None) # Drop the DOCUMENT-END event. self.get_event() self.anchors = {} return node def compose_node(self, parent, index): if self.check_event(AliasEvent): event = self.get_event() anchor = event.anchor if anchor not in self.anchors: raise ComposerError(None, None, "found undefined alias %r" % anchor, event.start_mark) return self.anchors[anchor] event = self.peek_event() anchor = event.anchor if anchor is not None: if anchor in self.anchors: raise ComposerError("found duplicate anchor %r; first occurrence" % anchor, self.anchors[anchor].start_mark, "second occurrence", event.start_mark) self.descend_resolver(parent, index) if self.check_event(ScalarEvent): node = self.compose_scalar_node(anchor) elif self.check_event(SequenceStartEvent): node = self.compose_sequence_node(anchor) elif self.check_event(MappingStartEvent): node = self.compose_mapping_node(anchor) self.ascend_resolver() return node def compose_scalar_node(self, anchor): event = self.get_event() tag = event.tag if tag is None or tag == '!': tag = self.resolve(ScalarNode, event.value, event.implicit) node = ScalarNode(tag, event.value, event.start_mark, event.end_mark, style=event.style) if anchor is not None: self.anchors[anchor] = node return node def compose_sequence_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == '!': tag = self.resolve(SequenceNode, None, start_event.implicit) node = SequenceNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node index = 0 while not self.check_event(SequenceEndEvent): node.value.append(self.compose_node(node, index)) index += 1 end_event = self.get_event() node.end_mark = end_event.end_mark return node def compose_mapping_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == '!': tag = self.resolve(MappingNode, None, start_event.implicit) node = MappingNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node while not self.check_event(MappingEndEvent): #key_event = self.peek_event() item_key = self.compose_node(node, None) #if item_key in node.value: # raise ComposerError("while composing a mapping", start_event.start_mark, # "found duplicate key", key_event.start_mark) item_value = self.compose_node(node, item_key) #node.value[item_key] = item_value node.value.append((item_key, item_value)) end_event = self.get_event() node.end_mark = end_event.end_mark return node invoke-2.2.0/invoke/vendor/yaml/constructor.py000066400000000000000000000677371445356551000215330ustar00rootroot00000000000000 __all__ = [ 'BaseConstructor', 'SafeConstructor', 'FullConstructor', 'UnsafeConstructor', 'Constructor', 'ConstructorError' ] from .error import * from .nodes import * import collections.abc, datetime, base64, binascii, re, sys, types class ConstructorError(MarkedYAMLError): pass class BaseConstructor: yaml_constructors = {} yaml_multi_constructors = {} def __init__(self): self.constructed_objects = {} self.recursive_objects = {} self.state_generators = [] self.deep_construct = False def check_data(self): # If there are more documents available? return self.check_node() def check_state_key(self, key): """Block special attributes/methods from being set in a newly created object, to prevent user-controlled methods from being called during deserialization""" if self.get_state_keys_blacklist_regexp().match(key): raise ConstructorError(None, None, "blacklisted key '%s' in instance state found" % (key,), None) def get_data(self): # Construct and return the next document. if self.check_node(): return self.construct_document(self.get_node()) def get_single_data(self): # Ensure that the stream contains a single document and construct it. node = self.get_single_node() if node is not None: return self.construct_document(node) return None def construct_document(self, node): data = self.construct_object(node) while self.state_generators: state_generators = self.state_generators self.state_generators = [] for generator in state_generators: for dummy in generator: pass self.constructed_objects = {} self.recursive_objects = {} self.deep_construct = False return data def construct_object(self, node, deep=False): if node in self.constructed_objects: return self.constructed_objects[node] if deep: old_deep = self.deep_construct self.deep_construct = True if node in self.recursive_objects: raise ConstructorError(None, None, "found unconstructable recursive node", node.start_mark) self.recursive_objects[node] = None constructor = None tag_suffix = None if node.tag in self.yaml_constructors: constructor = self.yaml_constructors[node.tag] else: for tag_prefix in self.yaml_multi_constructors: if tag_prefix is not None and node.tag.startswith(tag_prefix): tag_suffix = node.tag[len(tag_prefix):] constructor = self.yaml_multi_constructors[tag_prefix] break else: if None in self.yaml_multi_constructors: tag_suffix = node.tag constructor = self.yaml_multi_constructors[None] elif None in self.yaml_constructors: constructor = self.yaml_constructors[None] elif isinstance(node, ScalarNode): constructor = self.__class__.construct_scalar elif isinstance(node, SequenceNode): constructor = self.__class__.construct_sequence elif isinstance(node, MappingNode): constructor = self.__class__.construct_mapping if tag_suffix is None: data = constructor(self, node) else: data = constructor(self, tag_suffix, node) if isinstance(data, types.GeneratorType): generator = data data = next(generator) if self.deep_construct: for dummy in generator: pass else: self.state_generators.append(generator) self.constructed_objects[node] = data del self.recursive_objects[node] if deep: self.deep_construct = old_deep return data def construct_scalar(self, node): if not isinstance(node, ScalarNode): raise ConstructorError(None, None, "expected a scalar node, but found %s" % node.id, node.start_mark) return node.value def construct_sequence(self, node, deep=False): if not isinstance(node, SequenceNode): raise ConstructorError(None, None, "expected a sequence node, but found %s" % node.id, node.start_mark) return [self.construct_object(child, deep=deep) for child in node.value] def construct_mapping(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError(None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) mapping = {} for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) if not isinstance(key, collections.abc.Hashable): raise ConstructorError("while constructing a mapping", node.start_mark, "found unhashable key", key_node.start_mark) value = self.construct_object(value_node, deep=deep) mapping[key] = value return mapping def construct_pairs(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError(None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) pairs = [] for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) value = self.construct_object(value_node, deep=deep) pairs.append((key, value)) return pairs @classmethod def add_constructor(cls, tag, constructor): if not 'yaml_constructors' in cls.__dict__: cls.yaml_constructors = cls.yaml_constructors.copy() cls.yaml_constructors[tag] = constructor @classmethod def add_multi_constructor(cls, tag_prefix, multi_constructor): if not 'yaml_multi_constructors' in cls.__dict__: cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() cls.yaml_multi_constructors[tag_prefix] = multi_constructor class SafeConstructor(BaseConstructor): def construct_scalar(self, node): if isinstance(node, MappingNode): for key_node, value_node in node.value: if key_node.tag == 'tag:yaml.org,2002:value': return self.construct_scalar(value_node) return super().construct_scalar(node) def flatten_mapping(self, node): merge = [] index = 0 while index < len(node.value): key_node, value_node = node.value[index] if key_node.tag == 'tag:yaml.org,2002:merge': del node.value[index] if isinstance(value_node, MappingNode): self.flatten_mapping(value_node) merge.extend(value_node.value) elif isinstance(value_node, SequenceNode): submerge = [] for subnode in value_node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing a mapping", node.start_mark, "expected a mapping for merging, but found %s" % subnode.id, subnode.start_mark) self.flatten_mapping(subnode) submerge.append(subnode.value) submerge.reverse() for value in submerge: merge.extend(value) else: raise ConstructorError("while constructing a mapping", node.start_mark, "expected a mapping or list of mappings for merging, but found %s" % value_node.id, value_node.start_mark) elif key_node.tag == 'tag:yaml.org,2002:value': key_node.tag = 'tag:yaml.org,2002:str' index += 1 else: index += 1 if merge: node.value = merge + node.value def construct_mapping(self, node, deep=False): if isinstance(node, MappingNode): self.flatten_mapping(node) return super().construct_mapping(node, deep=deep) def construct_yaml_null(self, node): self.construct_scalar(node) return None bool_values = { 'yes': True, 'no': False, 'true': True, 'false': False, 'on': True, 'off': False, } def construct_yaml_bool(self, node): value = self.construct_scalar(node) return self.bool_values[value.lower()] def construct_yaml_int(self, node): value = self.construct_scalar(node) value = value.replace('_', '') sign = +1 if value[0] == '-': sign = -1 if value[0] in '+-': value = value[1:] if value == '0': return 0 elif value.startswith('0b'): return sign*int(value[2:], 2) elif value.startswith('0x'): return sign*int(value[2:], 16) elif value[0] == '0': return sign*int(value, 8) elif ':' in value: digits = [int(part) for part in value.split(':')] digits.reverse() base = 1 value = 0 for digit in digits: value += digit*base base *= 60 return sign*value else: return sign*int(value) inf_value = 1e300 while inf_value != inf_value*inf_value: inf_value *= inf_value nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). def construct_yaml_float(self, node): value = self.construct_scalar(node) value = value.replace('_', '').lower() sign = +1 if value[0] == '-': sign = -1 if value[0] in '+-': value = value[1:] if value == '.inf': return sign*self.inf_value elif value == '.nan': return self.nan_value elif ':' in value: digits = [float(part) for part in value.split(':')] digits.reverse() base = 1 value = 0.0 for digit in digits: value += digit*base base *= 60 return sign*value else: return sign*float(value) def construct_yaml_binary(self, node): try: value = self.construct_scalar(node).encode('ascii') except UnicodeEncodeError as exc: raise ConstructorError(None, None, "failed to convert base64 data into ascii: %s" % exc, node.start_mark) try: if hasattr(base64, 'decodebytes'): return base64.decodebytes(value) else: return base64.decodestring(value) except binascii.Error as exc: raise ConstructorError(None, None, "failed to decode base64 data: %s" % exc, node.start_mark) timestamp_regexp = re.compile( r'''^(?P[0-9][0-9][0-9][0-9]) -(?P[0-9][0-9]?) -(?P[0-9][0-9]?) (?:(?:[Tt]|[ \t]+) (?P[0-9][0-9]?) :(?P[0-9][0-9]) :(?P[0-9][0-9]) (?:\.(?P[0-9]*))? (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) (?::(?P[0-9][0-9]))?))?)?$''', re.X) def construct_yaml_timestamp(self, node): value = self.construct_scalar(node) match = self.timestamp_regexp.match(node.value) values = match.groupdict() year = int(values['year']) month = int(values['month']) day = int(values['day']) if not values['hour']: return datetime.date(year, month, day) hour = int(values['hour']) minute = int(values['minute']) second = int(values['second']) fraction = 0 tzinfo = None if values['fraction']: fraction = values['fraction'][:6] while len(fraction) < 6: fraction += '0' fraction = int(fraction) if values['tz_sign']: tz_hour = int(values['tz_hour']) tz_minute = int(values['tz_minute'] or 0) delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) if values['tz_sign'] == '-': delta = -delta tzinfo = datetime.timezone(delta) elif values['tz']: tzinfo = datetime.timezone.utc return datetime.datetime(year, month, day, hour, minute, second, fraction, tzinfo=tzinfo) def construct_yaml_omap(self, node): # Note: we do not check for duplicate keys, because it's too # CPU-expensive. omap = [] yield omap if not isinstance(node, SequenceNode): raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a sequence, but found %s" % node.id, node.start_mark) for subnode in node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a mapping of length 1, but found %s" % subnode.id, subnode.start_mark) if len(subnode.value) != 1: raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a single mapping item, but found %d items" % len(subnode.value), subnode.start_mark) key_node, value_node = subnode.value[0] key = self.construct_object(key_node) value = self.construct_object(value_node) omap.append((key, value)) def construct_yaml_pairs(self, node): # Note: the same code as `construct_yaml_omap`. pairs = [] yield pairs if not isinstance(node, SequenceNode): raise ConstructorError("while constructing pairs", node.start_mark, "expected a sequence, but found %s" % node.id, node.start_mark) for subnode in node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing pairs", node.start_mark, "expected a mapping of length 1, but found %s" % subnode.id, subnode.start_mark) if len(subnode.value) != 1: raise ConstructorError("while constructing pairs", node.start_mark, "expected a single mapping item, but found %d items" % len(subnode.value), subnode.start_mark) key_node, value_node = subnode.value[0] key = self.construct_object(key_node) value = self.construct_object(value_node) pairs.append((key, value)) def construct_yaml_set(self, node): data = set() yield data value = self.construct_mapping(node) data.update(value) def construct_yaml_str(self, node): return self.construct_scalar(node) def construct_yaml_seq(self, node): data = [] yield data data.extend(self.construct_sequence(node)) def construct_yaml_map(self, node): data = {} yield data value = self.construct_mapping(node) data.update(value) def construct_yaml_object(self, node, cls): data = cls.__new__(cls) yield data if hasattr(data, '__setstate__'): state = self.construct_mapping(node, deep=True) data.__setstate__(state) else: state = self.construct_mapping(node) data.__dict__.update(state) def construct_undefined(self, node): raise ConstructorError(None, None, "could not determine a constructor for the tag %r" % node.tag, node.start_mark) SafeConstructor.add_constructor( 'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null) SafeConstructor.add_constructor( 'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool) SafeConstructor.add_constructor( 'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int) SafeConstructor.add_constructor( 'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float) SafeConstructor.add_constructor( 'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary) SafeConstructor.add_constructor( 'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp) SafeConstructor.add_constructor( 'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap) SafeConstructor.add_constructor( 'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs) SafeConstructor.add_constructor( 'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set) SafeConstructor.add_constructor( 'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str) SafeConstructor.add_constructor( 'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq) SafeConstructor.add_constructor( 'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map) SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined) class FullConstructor(SafeConstructor): # 'extend' is blacklisted because it is used by # construct_python_object_apply to add `listitems` to a newly generate # python instance def get_state_keys_blacklist(self): return ['^extend$', '^__.*__$'] def get_state_keys_blacklist_regexp(self): if not hasattr(self, 'state_keys_blacklist_regexp'): self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')') return self.state_keys_blacklist_regexp def construct_python_str(self, node): return self.construct_scalar(node) def construct_python_unicode(self, node): return self.construct_scalar(node) def construct_python_bytes(self, node): try: value = self.construct_scalar(node).encode('ascii') except UnicodeEncodeError as exc: raise ConstructorError(None, None, "failed to convert base64 data into ascii: %s" % exc, node.start_mark) try: if hasattr(base64, 'decodebytes'): return base64.decodebytes(value) else: return base64.decodestring(value) except binascii.Error as exc: raise ConstructorError(None, None, "failed to decode base64 data: %s" % exc, node.start_mark) def construct_python_long(self, node): return self.construct_yaml_int(node) def construct_python_complex(self, node): return complex(self.construct_scalar(node)) def construct_python_tuple(self, node): return tuple(self.construct_sequence(node)) def find_python_module(self, name, mark, unsafe=False): if not name: raise ConstructorError("while constructing a Python module", mark, "expected non-empty name appended to the tag", mark) if unsafe: try: __import__(name) except ImportError as exc: raise ConstructorError("while constructing a Python module", mark, "cannot find module %r (%s)" % (name, exc), mark) if name not in sys.modules: raise ConstructorError("while constructing a Python module", mark, "module %r is not imported" % name, mark) return sys.modules[name] def find_python_name(self, name, mark, unsafe=False): if not name: raise ConstructorError("while constructing a Python object", mark, "expected non-empty name appended to the tag", mark) if '.' in name: module_name, object_name = name.rsplit('.', 1) else: module_name = 'builtins' object_name = name if unsafe: try: __import__(module_name) except ImportError as exc: raise ConstructorError("while constructing a Python object", mark, "cannot find module %r (%s)" % (module_name, exc), mark) if module_name not in sys.modules: raise ConstructorError("while constructing a Python object", mark, "module %r is not imported" % module_name, mark) module = sys.modules[module_name] if not hasattr(module, object_name): raise ConstructorError("while constructing a Python object", mark, "cannot find %r in the module %r" % (object_name, module.__name__), mark) return getattr(module, object_name) def construct_python_name(self, suffix, node): value = self.construct_scalar(node) if value: raise ConstructorError("while constructing a Python name", node.start_mark, "expected the empty value, but found %r" % value, node.start_mark) return self.find_python_name(suffix, node.start_mark) def construct_python_module(self, suffix, node): value = self.construct_scalar(node) if value: raise ConstructorError("while constructing a Python module", node.start_mark, "expected the empty value, but found %r" % value, node.start_mark) return self.find_python_module(suffix, node.start_mark) def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False, unsafe=False): if not args: args = [] if not kwds: kwds = {} cls = self.find_python_name(suffix, node.start_mark) if not (unsafe or isinstance(cls, type)): raise ConstructorError("while constructing a Python instance", node.start_mark, "expected a class, but found %r" % type(cls), node.start_mark) if newobj and isinstance(cls, type): return cls.__new__(cls, *args, **kwds) else: return cls(*args, **kwds) def set_python_instance_state(self, instance, state, unsafe=False): if hasattr(instance, '__setstate__'): instance.__setstate__(state) else: slotstate = {} if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if hasattr(instance, '__dict__'): if not unsafe and state: for key in state.keys(): self.check_state_key(key) instance.__dict__.update(state) elif state: slotstate.update(state) for key, value in slotstate.items(): if not unsafe: self.check_state_key(key) setattr(instance, key, value) def construct_python_object(self, suffix, node): # Format: # !!python/object:module.name { ... state ... } instance = self.make_python_instance(suffix, node, newobj=True) yield instance deep = hasattr(instance, '__setstate__') state = self.construct_mapping(node, deep=deep) self.set_python_instance_state(instance, state) def construct_python_object_apply(self, suffix, node, newobj=False): # Format: # !!python/object/apply # (or !!python/object/new) # args: [ ... arguments ... ] # kwds: { ... keywords ... } # state: ... state ... # listitems: [ ... listitems ... ] # dictitems: { ... dictitems ... } # or short format: # !!python/object/apply [ ... arguments ... ] # The difference between !!python/object/apply and !!python/object/new # is how an object is created, check make_python_instance for details. if isinstance(node, SequenceNode): args = self.construct_sequence(node, deep=True) kwds = {} state = {} listitems = [] dictitems = {} else: value = self.construct_mapping(node, deep=True) args = value.get('args', []) kwds = value.get('kwds', {}) state = value.get('state', {}) listitems = value.get('listitems', []) dictitems = value.get('dictitems', {}) instance = self.make_python_instance(suffix, node, args, kwds, newobj) if state: self.set_python_instance_state(instance, state) if listitems: instance.extend(listitems) if dictitems: for key in dictitems: instance[key] = dictitems[key] return instance def construct_python_object_new(self, suffix, node): return self.construct_python_object_apply(suffix, node, newobj=True) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/none', FullConstructor.construct_yaml_null) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/bool', FullConstructor.construct_yaml_bool) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/str', FullConstructor.construct_python_str) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/unicode', FullConstructor.construct_python_unicode) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/bytes', FullConstructor.construct_python_bytes) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/int', FullConstructor.construct_yaml_int) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/long', FullConstructor.construct_python_long) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/float', FullConstructor.construct_yaml_float) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/complex', FullConstructor.construct_python_complex) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/list', FullConstructor.construct_yaml_seq) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/tuple', FullConstructor.construct_python_tuple) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/dict', FullConstructor.construct_yaml_map) FullConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/name:', FullConstructor.construct_python_name) class UnsafeConstructor(FullConstructor): def find_python_module(self, name, mark): return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True) def find_python_name(self, name, mark): return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True) def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): return super(UnsafeConstructor, self).make_python_instance( suffix, node, args, kwds, newobj, unsafe=True) def set_python_instance_state(self, instance, state): return super(UnsafeConstructor, self).set_python_instance_state( instance, state, unsafe=True) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/module:', UnsafeConstructor.construct_python_module) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/object:', UnsafeConstructor.construct_python_object) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/object/new:', UnsafeConstructor.construct_python_object_new) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/object/apply:', UnsafeConstructor.construct_python_object_apply) # Constructor is same as UnsafeConstructor. Need to leave this in place in case # people have extended it directly. class Constructor(UnsafeConstructor): pass invoke-2.2.0/invoke/vendor/yaml/cyaml.py000066400000000000000000000074131445356551000202340ustar00rootroot00000000000000 __all__ = [ 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper' ] from yaml._yaml import CParser, CEmitter from .constructor import * from .serializer import * from .representer import * from .resolver import * class CBaseLoader(CParser, BaseConstructor, BaseResolver): def __init__(self, stream): CParser.__init__(self, stream) BaseConstructor.__init__(self) BaseResolver.__init__(self) class CSafeLoader(CParser, SafeConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) SafeConstructor.__init__(self) Resolver.__init__(self) class CFullLoader(CParser, FullConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) FullConstructor.__init__(self) Resolver.__init__(self) class CUnsafeLoader(CParser, UnsafeConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) UnsafeConstructor.__init__(self) Resolver.__init__(self) class CLoader(CParser, Constructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) Constructor.__init__(self) Resolver.__init__(self) class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class CSafeDumper(CEmitter, SafeRepresenter, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) SafeRepresenter.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class CDumper(CEmitter, Serializer, Representer, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) invoke-2.2.0/invoke/vendor/yaml/dumper.py000066400000000000000000000054251445356551000204240ustar00rootroot00000000000000 __all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] from .emitter import * from .serializer import * from .representer import * from .resolver import * class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) SafeRepresenter.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class Dumper(Emitter, Serializer, Representer, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) invoke-2.2.0/invoke/vendor/yaml/emitter.py000066400000000000000000001237761445356551000206130ustar00rootroot00000000000000 # Emitter expects events obeying the following grammar: # stream ::= STREAM-START document* STREAM-END # document ::= DOCUMENT-START node DOCUMENT-END # node ::= SCALAR | sequence | mapping # sequence ::= SEQUENCE-START node* SEQUENCE-END # mapping ::= MAPPING-START (node node)* MAPPING-END __all__ = ['Emitter', 'EmitterError'] from .error import YAMLError from .events import * class EmitterError(YAMLError): pass class ScalarAnalysis: def __init__(self, scalar, empty, multiline, allow_flow_plain, allow_block_plain, allow_single_quoted, allow_double_quoted, allow_block): self.scalar = scalar self.empty = empty self.multiline = multiline self.allow_flow_plain = allow_flow_plain self.allow_block_plain = allow_block_plain self.allow_single_quoted = allow_single_quoted self.allow_double_quoted = allow_double_quoted self.allow_block = allow_block class Emitter: DEFAULT_TAG_PREFIXES = { '!' : '!', 'tag:yaml.org,2002:' : '!!', } def __init__(self, stream, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None): # The stream should have the methods `write` and possibly `flush`. self.stream = stream # Encoding can be overridden by STREAM-START. self.encoding = None # Emitter is a state machine with a stack of states to handle nested # structures. self.states = [] self.state = self.expect_stream_start # Current event and the event queue. self.events = [] self.event = None # The current indentation level and the stack of previous indents. self.indents = [] self.indent = None # Flow level. self.flow_level = 0 # Contexts. self.root_context = False self.sequence_context = False self.mapping_context = False self.simple_key_context = False # Characteristics of the last emitted character: # - current position. # - is it a whitespace? # - is it an indention character # (indentation space, '-', '?', or ':')? self.line = 0 self.column = 0 self.whitespace = True self.indention = True # Whether the document requires an explicit document indicator self.open_ended = False # Formatting details. self.canonical = canonical self.allow_unicode = allow_unicode self.best_indent = 2 if indent and 1 < indent < 10: self.best_indent = indent self.best_width = 80 if width and width > self.best_indent*2: self.best_width = width self.best_line_break = '\n' if line_break in ['\r', '\n', '\r\n']: self.best_line_break = line_break # Tag prefixes. self.tag_prefixes = None # Prepared anchor and tag. self.prepared_anchor = None self.prepared_tag = None # Scalar analysis and style. self.analysis = None self.style = None def dispose(self): # Reset the state attributes (to clear self-references) self.states = [] self.state = None def emit(self, event): self.events.append(event) while not self.need_more_events(): self.event = self.events.pop(0) self.state() self.event = None # In some cases, we wait for a few next events before emitting. def need_more_events(self): if not self.events: return True event = self.events[0] if isinstance(event, DocumentStartEvent): return self.need_events(1) elif isinstance(event, SequenceStartEvent): return self.need_events(2) elif isinstance(event, MappingStartEvent): return self.need_events(3) else: return False def need_events(self, count): level = 0 for event in self.events[1:]: if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): level += 1 elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): level -= 1 elif isinstance(event, StreamEndEvent): level = -1 if level < 0: return False return (len(self.events) < count+1) def increase_indent(self, flow=False, indentless=False): self.indents.append(self.indent) if self.indent is None: if flow: self.indent = self.best_indent else: self.indent = 0 elif not indentless: self.indent += self.best_indent # States. # Stream handlers. def expect_stream_start(self): if isinstance(self.event, StreamStartEvent): if self.event.encoding and not hasattr(self.stream, 'encoding'): self.encoding = self.event.encoding self.write_stream_start() self.state = self.expect_first_document_start else: raise EmitterError("expected StreamStartEvent, but got %s" % self.event) def expect_nothing(self): raise EmitterError("expected nothing, but got %s" % self.event) # Document handlers. def expect_first_document_start(self): return self.expect_document_start(first=True) def expect_document_start(self, first=False): if isinstance(self.event, DocumentStartEvent): if (self.event.version or self.event.tags) and self.open_ended: self.write_indicator('...', True) self.write_indent() if self.event.version: version_text = self.prepare_version(self.event.version) self.write_version_directive(version_text) self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() if self.event.tags: handles = sorted(self.event.tags.keys()) for handle in handles: prefix = self.event.tags[handle] self.tag_prefixes[prefix] = handle handle_text = self.prepare_tag_handle(handle) prefix_text = self.prepare_tag_prefix(prefix) self.write_tag_directive(handle_text, prefix_text) implicit = (first and not self.event.explicit and not self.canonical and not self.event.version and not self.event.tags and not self.check_empty_document()) if not implicit: self.write_indent() self.write_indicator('---', True) if self.canonical: self.write_indent() self.state = self.expect_document_root elif isinstance(self.event, StreamEndEvent): if self.open_ended: self.write_indicator('...', True) self.write_indent() self.write_stream_end() self.state = self.expect_nothing else: raise EmitterError("expected DocumentStartEvent, but got %s" % self.event) def expect_document_end(self): if isinstance(self.event, DocumentEndEvent): self.write_indent() if self.event.explicit: self.write_indicator('...', True) self.write_indent() self.flush_stream() self.state = self.expect_document_start else: raise EmitterError("expected DocumentEndEvent, but got %s" % self.event) def expect_document_root(self): self.states.append(self.expect_document_end) self.expect_node(root=True) # Node handlers. def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False): self.root_context = root self.sequence_context = sequence self.mapping_context = mapping self.simple_key_context = simple_key if isinstance(self.event, AliasEvent): self.expect_alias() elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): self.process_anchor('&') self.process_tag() if isinstance(self.event, ScalarEvent): self.expect_scalar() elif isinstance(self.event, SequenceStartEvent): if self.flow_level or self.canonical or self.event.flow_style \ or self.check_empty_sequence(): self.expect_flow_sequence() else: self.expect_block_sequence() elif isinstance(self.event, MappingStartEvent): if self.flow_level or self.canonical or self.event.flow_style \ or self.check_empty_mapping(): self.expect_flow_mapping() else: self.expect_block_mapping() else: raise EmitterError("expected NodeEvent, but got %s" % self.event) def expect_alias(self): if self.event.anchor is None: raise EmitterError("anchor is not specified for alias") self.process_anchor('*') self.state = self.states.pop() def expect_scalar(self): self.increase_indent(flow=True) self.process_scalar() self.indent = self.indents.pop() self.state = self.states.pop() # Flow sequence handlers. def expect_flow_sequence(self): self.write_indicator('[', True, whitespace=True) self.flow_level += 1 self.increase_indent(flow=True) self.state = self.expect_first_flow_sequence_item def expect_first_flow_sequence_item(self): if isinstance(self.event, SequenceEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 self.write_indicator(']', False) self.state = self.states.pop() else: if self.canonical or self.column > self.best_width: self.write_indent() self.states.append(self.expect_flow_sequence_item) self.expect_node(sequence=True) def expect_flow_sequence_item(self): if isinstance(self.event, SequenceEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 if self.canonical: self.write_indicator(',', False) self.write_indent() self.write_indicator(']', False) self.state = self.states.pop() else: self.write_indicator(',', False) if self.canonical or self.column > self.best_width: self.write_indent() self.states.append(self.expect_flow_sequence_item) self.expect_node(sequence=True) # Flow mapping handlers. def expect_flow_mapping(self): self.write_indicator('{', True, whitespace=True) self.flow_level += 1 self.increase_indent(flow=True) self.state = self.expect_first_flow_mapping_key def expect_first_flow_mapping_key(self): if isinstance(self.event, MappingEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 self.write_indicator('}', False) self.state = self.states.pop() else: if self.canonical or self.column > self.best_width: self.write_indent() if not self.canonical and self.check_simple_key(): self.states.append(self.expect_flow_mapping_simple_value) self.expect_node(mapping=True, simple_key=True) else: self.write_indicator('?', True) self.states.append(self.expect_flow_mapping_value) self.expect_node(mapping=True) def expect_flow_mapping_key(self): if isinstance(self.event, MappingEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 if self.canonical: self.write_indicator(',', False) self.write_indent() self.write_indicator('}', False) self.state = self.states.pop() else: self.write_indicator(',', False) if self.canonical or self.column > self.best_width: self.write_indent() if not self.canonical and self.check_simple_key(): self.states.append(self.expect_flow_mapping_simple_value) self.expect_node(mapping=True, simple_key=True) else: self.write_indicator('?', True) self.states.append(self.expect_flow_mapping_value) self.expect_node(mapping=True) def expect_flow_mapping_simple_value(self): self.write_indicator(':', False) self.states.append(self.expect_flow_mapping_key) self.expect_node(mapping=True) def expect_flow_mapping_value(self): if self.canonical or self.column > self.best_width: self.write_indent() self.write_indicator(':', True) self.states.append(self.expect_flow_mapping_key) self.expect_node(mapping=True) # Block sequence handlers. def expect_block_sequence(self): indentless = (self.mapping_context and not self.indention) self.increase_indent(flow=False, indentless=indentless) self.state = self.expect_first_block_sequence_item def expect_first_block_sequence_item(self): return self.expect_block_sequence_item(first=True) def expect_block_sequence_item(self, first=False): if not first and isinstance(self.event, SequenceEndEvent): self.indent = self.indents.pop() self.state = self.states.pop() else: self.write_indent() self.write_indicator('-', True, indention=True) self.states.append(self.expect_block_sequence_item) self.expect_node(sequence=True) # Block mapping handlers. def expect_block_mapping(self): self.increase_indent(flow=False) self.state = self.expect_first_block_mapping_key def expect_first_block_mapping_key(self): return self.expect_block_mapping_key(first=True) def expect_block_mapping_key(self, first=False): if not first and isinstance(self.event, MappingEndEvent): self.indent = self.indents.pop() self.state = self.states.pop() else: self.write_indent() if self.check_simple_key(): self.states.append(self.expect_block_mapping_simple_value) self.expect_node(mapping=True, simple_key=True) else: self.write_indicator('?', True, indention=True) self.states.append(self.expect_block_mapping_value) self.expect_node(mapping=True) def expect_block_mapping_simple_value(self): self.write_indicator(':', False) self.states.append(self.expect_block_mapping_key) self.expect_node(mapping=True) def expect_block_mapping_value(self): self.write_indent() self.write_indicator(':', True, indention=True) self.states.append(self.expect_block_mapping_key) self.expect_node(mapping=True) # Checkers. def check_empty_sequence(self): return (isinstance(self.event, SequenceStartEvent) and self.events and isinstance(self.events[0], SequenceEndEvent)) def check_empty_mapping(self): return (isinstance(self.event, MappingStartEvent) and self.events and isinstance(self.events[0], MappingEndEvent)) def check_empty_document(self): if not isinstance(self.event, DocumentStartEvent) or not self.events: return False event = self.events[0] return (isinstance(event, ScalarEvent) and event.anchor is None and event.tag is None and event.implicit and event.value == '') def check_simple_key(self): length = 0 if isinstance(self.event, NodeEvent) and self.event.anchor is not None: if self.prepared_anchor is None: self.prepared_anchor = self.prepare_anchor(self.event.anchor) length += len(self.prepared_anchor) if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ and self.event.tag is not None: if self.prepared_tag is None: self.prepared_tag = self.prepare_tag(self.event.tag) length += len(self.prepared_tag) if isinstance(self.event, ScalarEvent): if self.analysis is None: self.analysis = self.analyze_scalar(self.event.value) length += len(self.analysis.scalar) return (length < 128 and (isinstance(self.event, AliasEvent) or (isinstance(self.event, ScalarEvent) and not self.analysis.empty and not self.analysis.multiline) or self.check_empty_sequence() or self.check_empty_mapping())) # Anchor, Tag, and Scalar processors. def process_anchor(self, indicator): if self.event.anchor is None: self.prepared_anchor = None return if self.prepared_anchor is None: self.prepared_anchor = self.prepare_anchor(self.event.anchor) if self.prepared_anchor: self.write_indicator(indicator+self.prepared_anchor, True) self.prepared_anchor = None def process_tag(self): tag = self.event.tag if isinstance(self.event, ScalarEvent): if self.style is None: self.style = self.choose_scalar_style() if ((not self.canonical or tag is None) and ((self.style == '' and self.event.implicit[0]) or (self.style != '' and self.event.implicit[1]))): self.prepared_tag = None return if self.event.implicit[0] and tag is None: tag = '!' self.prepared_tag = None else: if (not self.canonical or tag is None) and self.event.implicit: self.prepared_tag = None return if tag is None: raise EmitterError("tag is not specified") if self.prepared_tag is None: self.prepared_tag = self.prepare_tag(tag) if self.prepared_tag: self.write_indicator(self.prepared_tag, True) self.prepared_tag = None def choose_scalar_style(self): if self.analysis is None: self.analysis = self.analyze_scalar(self.event.value) if self.event.style == '"' or self.canonical: return '"' if not self.event.style and self.event.implicit[0]: if (not (self.simple_key_context and (self.analysis.empty or self.analysis.multiline)) and (self.flow_level and self.analysis.allow_flow_plain or (not self.flow_level and self.analysis.allow_block_plain))): return '' if self.event.style and self.event.style in '|>': if (not self.flow_level and not self.simple_key_context and self.analysis.allow_block): return self.event.style if not self.event.style or self.event.style == '\'': if (self.analysis.allow_single_quoted and not (self.simple_key_context and self.analysis.multiline)): return '\'' return '"' def process_scalar(self): if self.analysis is None: self.analysis = self.analyze_scalar(self.event.value) if self.style is None: self.style = self.choose_scalar_style() split = (not self.simple_key_context) #if self.analysis.multiline and split \ # and (not self.style or self.style in '\'\"'): # self.write_indent() if self.style == '"': self.write_double_quoted(self.analysis.scalar, split) elif self.style == '\'': self.write_single_quoted(self.analysis.scalar, split) elif self.style == '>': self.write_folded(self.analysis.scalar) elif self.style == '|': self.write_literal(self.analysis.scalar) else: self.write_plain(self.analysis.scalar, split) self.analysis = None self.style = None # Analyzers. def prepare_version(self, version): major, minor = version if major != 1: raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) return '%d.%d' % (major, minor) def prepare_tag_handle(self, handle): if not handle: raise EmitterError("tag handle must not be empty") if handle[0] != '!' or handle[-1] != '!': raise EmitterError("tag handle must start and end with '!': %r" % handle) for ch in handle[1:-1]: if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_'): raise EmitterError("invalid character %r in the tag handle: %r" % (ch, handle)) return handle def prepare_tag_prefix(self, prefix): if not prefix: raise EmitterError("tag prefix must not be empty") chunks = [] start = end = 0 if prefix[0] == '!': end = 1 while end < len(prefix): ch = prefix[end] if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-;/?!:@&=+$,_.~*\'()[]': end += 1 else: if start < end: chunks.append(prefix[start:end]) start = end = end+1 data = ch.encode('utf-8') for ch in data: chunks.append('%%%02X' % ord(ch)) if start < end: chunks.append(prefix[start:end]) return ''.join(chunks) def prepare_tag(self, tag): if not tag: raise EmitterError("tag must not be empty") if tag == '!': return tag handle = None suffix = tag prefixes = sorted(self.tag_prefixes.keys()) for prefix in prefixes: if tag.startswith(prefix) \ and (prefix == '!' or len(prefix) < len(tag)): handle = self.tag_prefixes[prefix] suffix = tag[len(prefix):] chunks = [] start = end = 0 while end < len(suffix): ch = suffix[end] if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-;/?:@&=+$,_.~*\'()[]' \ or (ch == '!' and handle != '!'): end += 1 else: if start < end: chunks.append(suffix[start:end]) start = end = end+1 data = ch.encode('utf-8') for ch in data: chunks.append('%%%02X' % ch) if start < end: chunks.append(suffix[start:end]) suffix_text = ''.join(chunks) if handle: return '%s%s' % (handle, suffix_text) else: return '!<%s>' % suffix_text def prepare_anchor(self, anchor): if not anchor: raise EmitterError("anchor must not be empty") for ch in anchor: if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_'): raise EmitterError("invalid character %r in the anchor: %r" % (ch, anchor)) return anchor def analyze_scalar(self, scalar): # Empty scalar is a special case. if not scalar: return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, allow_flow_plain=False, allow_block_plain=True, allow_single_quoted=True, allow_double_quoted=True, allow_block=False) # Indicators and special characters. block_indicators = False flow_indicators = False line_breaks = False special_characters = False # Important whitespace combinations. leading_space = False leading_break = False trailing_space = False trailing_break = False break_space = False space_break = False # Check document indicators. if scalar.startswith('---') or scalar.startswith('...'): block_indicators = True flow_indicators = True # First character or preceded by a whitespace. preceded_by_whitespace = True # Last character or followed by a whitespace. followed_by_whitespace = (len(scalar) == 1 or scalar[1] in '\0 \t\r\n\x85\u2028\u2029') # The previous character is a space. previous_space = False # The previous character is a break. previous_break = False index = 0 while index < len(scalar): ch = scalar[index] # Check for indicators. if index == 0: # Leading indicators are special characters. if ch in '#,[]{}&*!|>\'\"%@`': flow_indicators = True block_indicators = True if ch in '?:': flow_indicators = True if followed_by_whitespace: block_indicators = True if ch == '-' and followed_by_whitespace: flow_indicators = True block_indicators = True else: # Some indicators cannot appear within a scalar as well. if ch in ',?[]{}': flow_indicators = True if ch == ':': flow_indicators = True if followed_by_whitespace: block_indicators = True if ch == '#' and preceded_by_whitespace: flow_indicators = True block_indicators = True # Check for line breaks, special, and unicode characters. if ch in '\n\x85\u2028\u2029': line_breaks = True if not (ch == '\n' or '\x20' <= ch <= '\x7E'): if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD' or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF': unicode_characters = True if not self.allow_unicode: special_characters = True else: special_characters = True # Detect important whitespace combinations. if ch == ' ': if index == 0: leading_space = True if index == len(scalar)-1: trailing_space = True if previous_break: break_space = True previous_space = True previous_break = False elif ch in '\n\x85\u2028\u2029': if index == 0: leading_break = True if index == len(scalar)-1: trailing_break = True if previous_space: space_break = True previous_space = False previous_break = True else: previous_space = False previous_break = False # Prepare for the next character. index += 1 preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029') followed_by_whitespace = (index+1 >= len(scalar) or scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029') # Let's decide what styles are allowed. allow_flow_plain = True allow_block_plain = True allow_single_quoted = True allow_double_quoted = True allow_block = True # Leading and trailing whitespaces are bad for plain scalars. if (leading_space or leading_break or trailing_space or trailing_break): allow_flow_plain = allow_block_plain = False # We do not permit trailing spaces for block scalars. if trailing_space: allow_block = False # Spaces at the beginning of a new line are only acceptable for block # scalars. if break_space: allow_flow_plain = allow_block_plain = allow_single_quoted = False # Spaces followed by breaks, as well as special character are only # allowed for double quoted scalars. if space_break or special_characters: allow_flow_plain = allow_block_plain = \ allow_single_quoted = allow_block = False # Although the plain scalar writer supports breaks, we never emit # multiline plain scalars. if line_breaks: allow_flow_plain = allow_block_plain = False # Flow indicators are forbidden for flow plain scalars. if flow_indicators: allow_flow_plain = False # Block indicators are forbidden for block plain scalars. if block_indicators: allow_block_plain = False return ScalarAnalysis(scalar=scalar, empty=False, multiline=line_breaks, allow_flow_plain=allow_flow_plain, allow_block_plain=allow_block_plain, allow_single_quoted=allow_single_quoted, allow_double_quoted=allow_double_quoted, allow_block=allow_block) # Writers. def flush_stream(self): if hasattr(self.stream, 'flush'): self.stream.flush() def write_stream_start(self): # Write BOM if needed. if self.encoding and self.encoding.startswith('utf-16'): self.stream.write('\uFEFF'.encode(self.encoding)) def write_stream_end(self): self.flush_stream() def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False): if self.whitespace or not need_whitespace: data = indicator else: data = ' '+indicator self.whitespace = whitespace self.indention = self.indention and indention self.column += len(data) self.open_ended = False if self.encoding: data = data.encode(self.encoding) self.stream.write(data) def write_indent(self): indent = self.indent or 0 if not self.indention or self.column > indent \ or (self.column == indent and not self.whitespace): self.write_line_break() if self.column < indent: self.whitespace = True data = ' '*(indent-self.column) self.column = indent if self.encoding: data = data.encode(self.encoding) self.stream.write(data) def write_line_break(self, data=None): if data is None: data = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) def write_version_directive(self, version_text): data = '%%YAML %s' % version_text if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.write_line_break() def write_tag_directive(self, handle_text, prefix_text): data = '%%TAG %s %s' % (handle_text, prefix_text) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.write_line_break() # Scalar streams. def write_single_quoted(self, text, split=True): self.write_indicator('\'', True) spaces = False breaks = False start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if spaces: if ch is None or ch != ' ': if start+1 == end and self.column > self.best_width and split \ and start != 0 and end != len(text): self.write_indent() else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end elif breaks: if ch is None or ch not in '\n\x85\u2028\u2029': if text[start] == '\n': self.write_line_break() for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) self.write_indent() start = end else: if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'': if start < end: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch == '\'': data = '\'\'' self.column += 2 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end + 1 if ch is not None: spaces = (ch == ' ') breaks = (ch in '\n\x85\u2028\u2029') end += 1 self.write_indicator('\'', False) ESCAPE_REPLACEMENTS = { '\0': '0', '\x07': 'a', '\x08': 'b', '\x09': 't', '\x0A': 'n', '\x0B': 'v', '\x0C': 'f', '\x0D': 'r', '\x1B': 'e', '\"': '\"', '\\': '\\', '\x85': 'N', '\xA0': '_', '\u2028': 'L', '\u2029': 'P', } def write_double_quoted(self, text, split=True): self.write_indicator('"', True) start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \ or not ('\x20' <= ch <= '\x7E' or (self.allow_unicode and ('\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD'))): if start < end: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch is not None: if ch in self.ESCAPE_REPLACEMENTS: data = '\\'+self.ESCAPE_REPLACEMENTS[ch] elif ch <= '\xFF': data = '\\x%02X' % ord(ch) elif ch <= '\uFFFF': data = '\\u%04X' % ord(ch) else: data = '\\U%08X' % ord(ch) self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end+1 if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \ and self.column+(end-start) > self.best_width and split: data = text[start:end]+'\\' if start < end: start = end self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.write_indent() self.whitespace = False self.indention = False if text[start] == ' ': data = '\\' self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) end += 1 self.write_indicator('"', False) def determine_block_hints(self, text): hints = '' if text: if text[0] in ' \n\x85\u2028\u2029': hints += str(self.best_indent) if text[-1] not in '\n\x85\u2028\u2029': hints += '-' elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': hints += '+' return hints def write_folded(self, text): hints = self.determine_block_hints(text) self.write_indicator('>'+hints, True) if hints[-1:] == '+': self.open_ended = True self.write_line_break() leading_space = True spaces = False breaks = True start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if breaks: if ch is None or ch not in '\n\x85\u2028\u2029': if not leading_space and ch is not None and ch != ' ' \ and text[start] == '\n': self.write_line_break() leading_space = (ch == ' ') for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) if ch is not None: self.write_indent() start = end elif spaces: if ch != ' ': if start+1 == end and self.column > self.best_width: self.write_indent() else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end else: if ch is None or ch in ' \n\x85\u2028\u2029': data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) if ch is None: self.write_line_break() start = end if ch is not None: breaks = (ch in '\n\x85\u2028\u2029') spaces = (ch == ' ') end += 1 def write_literal(self, text): hints = self.determine_block_hints(text) self.write_indicator('|'+hints, True) if hints[-1:] == '+': self.open_ended = True self.write_line_break() breaks = True start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if breaks: if ch is None or ch not in '\n\x85\u2028\u2029': for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) if ch is not None: self.write_indent() start = end else: if ch is None or ch in '\n\x85\u2028\u2029': data = text[start:end] if self.encoding: data = data.encode(self.encoding) self.stream.write(data) if ch is None: self.write_line_break() start = end if ch is not None: breaks = (ch in '\n\x85\u2028\u2029') end += 1 def write_plain(self, text, split=True): if self.root_context: self.open_ended = True if not text: return if not self.whitespace: data = ' ' self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.whitespace = False self.indention = False spaces = False breaks = False start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if spaces: if ch != ' ': if start+1 == end and self.column > self.best_width and split: self.write_indent() self.whitespace = False self.indention = False else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end elif breaks: if ch not in '\n\x85\u2028\u2029': if text[start] == '\n': self.write_line_break() for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) self.write_indent() self.whitespace = False self.indention = False start = end else: if ch is None or ch in ' \n\x85\u2028\u2029': data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch is not None: spaces = (ch == ' ') breaks = (ch in '\n\x85\u2028\u2029') end += 1 invoke-2.2.0/invoke/vendor/yaml/error.py000066400000000000000000000047451445356551000202650ustar00rootroot00000000000000 __all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] class Mark: def __init__(self, name, index, line, column, buffer, pointer): self.name = name self.index = index self.line = line self.column = column self.buffer = buffer self.pointer = pointer def get_snippet(self, indent=4, max_length=75): if self.buffer is None: return None head = '' start = self.pointer while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': start -= 1 if self.pointer-start > max_length/2-1: head = ' ... ' start += 5 break tail = '' end = self.pointer while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': end += 1 if end-self.pointer > max_length/2-1: tail = ' ... ' end -= 5 break snippet = self.buffer[start:end] return ' '*indent + head + snippet + tail + '\n' \ + ' '*(indent+self.pointer-start+len(head)) + '^' def __str__(self): snippet = self.get_snippet() where = " in \"%s\", line %d, column %d" \ % (self.name, self.line+1, self.column+1) if snippet is not None: where += ":\n"+snippet return where class YAMLError(Exception): pass class MarkedYAMLError(YAMLError): def __init__(self, context=None, context_mark=None, problem=None, problem_mark=None, note=None): self.context = context self.context_mark = context_mark self.problem = problem self.problem_mark = problem_mark self.note = note def __str__(self): lines = [] if self.context is not None: lines.append(self.context) if self.context_mark is not None \ and (self.problem is None or self.problem_mark is None or self.context_mark.name != self.problem_mark.name or self.context_mark.line != self.problem_mark.line or self.context_mark.column != self.problem_mark.column): lines.append(str(self.context_mark)) if self.problem is not None: lines.append(self.problem) if self.problem_mark is not None: lines.append(str(self.problem_mark)) if self.note is not None: lines.append(self.note) return '\n'.join(lines) invoke-2.2.0/invoke/vendor/yaml/events.py000066400000000000000000000046151445356551000204340ustar00rootroot00000000000000 # Abstract classes. class Event(object): def __init__(self, start_mark=None, end_mark=None): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] if hasattr(self, key)] arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) class NodeEvent(Event): def __init__(self, anchor, start_mark=None, end_mark=None): self.anchor = anchor self.start_mark = start_mark self.end_mark = end_mark class CollectionStartEvent(NodeEvent): def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, flow_style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style class CollectionEndEvent(Event): pass # Implementations. class StreamStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndEvent(Event): pass class DocumentStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None, version=None, tags=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit self.version = version self.tags = tags class DocumentEndEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit class AliasEvent(NodeEvent): pass class ScalarEvent(NodeEvent): def __init__(self, anchor, tag, implicit, value, start_mark=None, end_mark=None, style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style class SequenceStartEvent(CollectionStartEvent): pass class SequenceEndEvent(CollectionEndEvent): pass class MappingStartEvent(CollectionStartEvent): pass class MappingEndEvent(CollectionEndEvent): pass invoke-2.2.0/invoke/vendor/yaml/loader.py000066400000000000000000000040151445356551000203700ustar00rootroot00000000000000 __all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader'] from .reader import * from .scanner import * from .parser import * from .composer import * from .constructor import * from .resolver import * class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) BaseConstructor.__init__(self) BaseResolver.__init__(self) class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) FullConstructor.__init__(self) Resolver.__init__(self) class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) SafeConstructor.__init__(self) Resolver.__init__(self) class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) Constructor.__init__(self) Resolver.__init__(self) # UnsafeLoader is the same as Loader (which is and was always unsafe on # untrusted input). Use of either Loader or UnsafeLoader should be rare, since # FullLoad should be able to load almost all YAML safely. Loader is left intact # to ensure backwards compatibility. class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) Constructor.__init__(self) Resolver.__init__(self) invoke-2.2.0/invoke/vendor/yaml/nodes.py000066400000000000000000000026401445356551000202340ustar00rootroot00000000000000 class Node(object): def __init__(self, tag, value, start_mark, end_mark): self.tag = tag self.value = value self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): value = self.value #if isinstance(value, list): # if len(value) == 0: # value = '' # elif len(value) == 1: # value = '<1 item>' # else: # value = '<%d items>' % len(value) #else: # if len(value) > 75: # value = repr(value[:70]+u' ... ') # else: # value = repr(value) value = repr(value) return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) class ScalarNode(Node): id = 'scalar' def __init__(self, tag, value, start_mark=None, end_mark=None, style=None): self.tag = tag self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style class CollectionNode(Node): def __init__(self, tag, value, start_mark=None, end_mark=None, flow_style=None): self.tag = tag self.value = value self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style class SequenceNode(CollectionNode): id = 'sequence' class MappingNode(CollectionNode): id = 'mapping' invoke-2.2.0/invoke/vendor/yaml/parser.py000066400000000000000000000616271445356551000204320ustar00rootroot00000000000000 # The following YAML grammar is LL(1) and is parsed by a recursive descent # parser. # # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* # block_node_or_indentless_sequence ::= # ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # FIRST sets: # # stream: { STREAM-START } # explicit_document: { DIRECTIVE DOCUMENT-START } # implicit_document: FIRST(block_node) # block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_sequence: { BLOCK-SEQUENCE-START } # block_mapping: { BLOCK-MAPPING-START } # block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } # indentless_sequence: { ENTRY } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_sequence: { FLOW-SEQUENCE-START } # flow_mapping: { FLOW-MAPPING-START } # flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } # flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } __all__ = ['Parser', 'ParserError'] from .error import MarkedYAMLError from .tokens import * from .events import * from .scanner import * class ParserError(MarkedYAMLError): pass class Parser: # Since writing a recursive-descendant parser is a straightforward task, we # do not give many comments here. DEFAULT_TAGS = { '!': '!', '!!': 'tag:yaml.org,2002:', } def __init__(self): self.current_event = None self.yaml_version = None self.tag_handles = {} self.states = [] self.marks = [] self.state = self.parse_stream_start def dispose(self): # Reset the state attributes (to clear self-references) self.states = [] self.state = None def check_event(self, *choices): # Check the type of the next event. if self.current_event is None: if self.state: self.current_event = self.state() if self.current_event is not None: if not choices: return True for choice in choices: if isinstance(self.current_event, choice): return True return False def peek_event(self): # Get the next event. if self.current_event is None: if self.state: self.current_event = self.state() return self.current_event def get_event(self): # Get the next event and proceed further. if self.current_event is None: if self.state: self.current_event = self.state() value = self.current_event self.current_event = None return value # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* def parse_stream_start(self): # Parse the stream start. token = self.get_token() event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding) # Prepare the next state. self.state = self.parse_implicit_document_start return event def parse_implicit_document_start(self): # Parse an implicit document. if not self.check_token(DirectiveToken, DocumentStartToken, StreamEndToken): self.tag_handles = self.DEFAULT_TAGS token = self.peek_token() start_mark = end_mark = token.start_mark event = DocumentStartEvent(start_mark, end_mark, explicit=False) # Prepare the next state. self.states.append(self.parse_document_end) self.state = self.parse_block_node return event else: return self.parse_document_start() def parse_document_start(self): # Parse any extra document end indicators. while self.check_token(DocumentEndToken): self.get_token() # Parse an explicit document. if not self.check_token(StreamEndToken): token = self.peek_token() start_mark = token.start_mark version, tags = self.process_directives() if not self.check_token(DocumentStartToken): raise ParserError(None, None, "expected '', but found %r" % self.peek_token().id, self.peek_token().start_mark) token = self.get_token() end_mark = token.end_mark event = DocumentStartEvent(start_mark, end_mark, explicit=True, version=version, tags=tags) self.states.append(self.parse_document_end) self.state = self.parse_document_content else: # Parse the end of the stream. token = self.get_token() event = StreamEndEvent(token.start_mark, token.end_mark) assert not self.states assert not self.marks self.state = None return event def parse_document_end(self): # Parse the document end. token = self.peek_token() start_mark = end_mark = token.start_mark explicit = False if self.check_token(DocumentEndToken): token = self.get_token() end_mark = token.end_mark explicit = True event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) # Prepare the next state. self.state = self.parse_document_start return event def parse_document_content(self): if self.check_token(DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken): event = self.process_empty_scalar(self.peek_token().start_mark) self.state = self.states.pop() return event else: return self.parse_block_node() def process_directives(self): self.yaml_version = None self.tag_handles = {} while self.check_token(DirectiveToken): token = self.get_token() if token.name == 'YAML': if self.yaml_version is not None: raise ParserError(None, None, "found duplicate YAML directive", token.start_mark) major, minor = token.value if major != 1: raise ParserError(None, None, "found incompatible YAML document (version 1.* is required)", token.start_mark) self.yaml_version = token.value elif token.name == 'TAG': handle, prefix = token.value if handle in self.tag_handles: raise ParserError(None, None, "duplicate tag handle %r" % handle, token.start_mark) self.tag_handles[handle] = prefix if self.tag_handles: value = self.yaml_version, self.tag_handles.copy() else: value = self.yaml_version, None for key in self.DEFAULT_TAGS: if key not in self.tag_handles: self.tag_handles[key] = self.DEFAULT_TAGS[key] return value # block_node_or_indentless_sequence ::= ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping def parse_block_node(self): return self.parse_node(block=True) def parse_flow_node(self): return self.parse_node() def parse_block_node_or_indentless_sequence(self): return self.parse_node(block=True, indentless_sequence=True) def parse_node(self, block=False, indentless_sequence=False): if self.check_token(AliasToken): token = self.get_token() event = AliasEvent(token.value, token.start_mark, token.end_mark) self.state = self.states.pop() else: anchor = None tag = None start_mark = end_mark = tag_mark = None if self.check_token(AnchorToken): token = self.get_token() start_mark = token.start_mark end_mark = token.end_mark anchor = token.value if self.check_token(TagToken): token = self.get_token() tag_mark = token.start_mark end_mark = token.end_mark tag = token.value elif self.check_token(TagToken): token = self.get_token() start_mark = tag_mark = token.start_mark end_mark = token.end_mark tag = token.value if self.check_token(AnchorToken): token = self.get_token() end_mark = token.end_mark anchor = token.value if tag is not None: handle, suffix = tag if handle is not None: if handle not in self.tag_handles: raise ParserError("while parsing a node", start_mark, "found undefined tag handle %r" % handle, tag_mark) tag = self.tag_handles[handle]+suffix else: tag = suffix #if tag == '!': # raise ParserError("while parsing a node", start_mark, # "found non-specific tag '!'", tag_mark, # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") if start_mark is None: start_mark = end_mark = self.peek_token().start_mark event = None implicit = (tag is None or tag == '!') if indentless_sequence and self.check_token(BlockEntryToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark) self.state = self.parse_indentless_sequence_entry else: if self.check_token(ScalarToken): token = self.get_token() end_mark = token.end_mark if (token.plain and tag is None) or tag == '!': implicit = (True, False) elif tag is None: implicit = (False, True) else: implicit = (False, False) event = ScalarEvent(anchor, tag, implicit, token.value, start_mark, end_mark, style=token.style) self.state = self.states.pop() elif self.check_token(FlowSequenceStartToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_sequence_first_entry elif self.check_token(FlowMappingStartToken): end_mark = self.peek_token().end_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_mapping_first_key elif block and self.check_token(BlockSequenceStartToken): end_mark = self.peek_token().start_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_sequence_first_entry elif block and self.check_token(BlockMappingStartToken): end_mark = self.peek_token().start_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_mapping_first_key elif anchor is not None or tag is not None: # Empty scalars are allowed even if a tag or an anchor is # specified. event = ScalarEvent(anchor, tag, (implicit, False), '', start_mark, end_mark) self.state = self.states.pop() else: if block: node = 'block' else: node = 'flow' token = self.peek_token() raise ParserError("while parsing a %s node" % node, start_mark, "expected the node content, but found %r" % token.id, token.start_mark) return event # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END def parse_block_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_sequence_entry() def parse_block_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, BlockEndToken): self.states.append(self.parse_block_sequence_entry) return self.parse_block_node() else: self.state = self.parse_block_sequence_entry return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block collection", self.marks[-1], "expected , but found %r" % token.id, token.start_mark) token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ def parse_indentless_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_indentless_sequence_entry) return self.parse_block_node() else: self.state = self.parse_indentless_sequence_entry return self.process_empty_scalar(token.end_mark) token = self.peek_token() event = SequenceEndEvent(token.start_mark, token.start_mark) self.state = self.states.pop() return event # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END def parse_block_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_mapping_key() def parse_block_mapping_key(self): if self.check_token(KeyToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_value) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_value return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block mapping", self.marks[-1], "expected , but found %r" % token.id, token.start_mark) token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_block_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_key) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_block_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # Note that while production rules for both flow_sequence_entry and # flow_mapping_entry are equal, their interpretations are different. # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` # generate an inline mapping (set syntax). def parse_flow_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_sequence_entry(first=True) def parse_flow_sequence_entry(self, first=False): if not self.check_token(FlowSequenceEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow sequence", self.marks[-1], "expected ',' or ']', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.peek_token() event = MappingStartEvent(None, None, True, token.start_mark, token.end_mark, flow_style=True) self.state = self.parse_flow_sequence_entry_mapping_key return event elif not self.check_token(FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry) return self.parse_flow_node() token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_sequence_entry_mapping_key(self): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_value return self.process_empty_scalar(token.end_mark) def parse_flow_sequence_entry_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_end) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_end return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_sequence_entry_mapping_end token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_sequence_entry_mapping_end(self): self.state = self.parse_flow_sequence_entry token = self.peek_token() return MappingEndEvent(token.start_mark, token.start_mark) # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? def parse_flow_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_mapping_key(first=True) def parse_flow_mapping_key(self, first=False): if not self.check_token(FlowMappingEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow mapping", self.marks[-1], "expected ',' or '}', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_value return self.process_empty_scalar(token.end_mark) elif not self.check_token(FlowMappingEndToken): self.states.append(self.parse_flow_mapping_empty_value) return self.parse_flow_node() token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_key) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_mapping_empty_value(self): self.state = self.parse_flow_mapping_key return self.process_empty_scalar(self.peek_token().start_mark) def process_empty_scalar(self, mark): return ScalarEvent(None, None, (True, False), '', mark, mark) invoke-2.2.0/invoke/vendor/yaml/reader.py000066400000000000000000000152121445356551000203650ustar00rootroot00000000000000# This module contains abstractions for the input stream. You don't have to # looks further, there are no pretty code. # # We define two classes here. # # Mark(source, line, column) # It's just a record and its only use is producing nice error messages. # Parser does not use it for any other purposes. # # Reader(source, data) # Reader determines the encoding of `data` and converts it to unicode. # Reader provides the following methods and attributes: # reader.peek(length=1) - return the next `length` characters # reader.forward(length=1) - move the current position to `length` characters. # reader.index - the number of the current character. # reader.line, stream.column - the line and the column of the current character. __all__ = ['Reader', 'ReaderError'] from .error import YAMLError, Mark import codecs, re class ReaderError(YAMLError): def __init__(self, name, position, character, encoding, reason): self.name = name self.character = character self.position = position self.encoding = encoding self.reason = reason def __str__(self): if isinstance(self.character, bytes): return "'%s' codec can't decode byte #x%02x: %s\n" \ " in \"%s\", position %d" \ % (self.encoding, ord(self.character), self.reason, self.name, self.position) else: return "unacceptable character #x%04x: %s\n" \ " in \"%s\", position %d" \ % (self.character, self.reason, self.name, self.position) class Reader(object): # Reader: # - determines the data encoding and converts it to a unicode string, # - checks if characters are in allowed range, # - adds '\0' to the end. # Reader accepts # - a `bytes` object, # - a `str` object, # - a file-like object with its `read` method returning `str`, # - a file-like object with its `read` method returning `unicode`. # Yeah, it's ugly and slow. def __init__(self, stream): self.name = None self.stream = None self.stream_pointer = 0 self.eof = True self.buffer = '' self.pointer = 0 self.raw_buffer = None self.raw_decode = None self.encoding = None self.index = 0 self.line = 0 self.column = 0 if isinstance(stream, str): self.name = "" self.check_printable(stream) self.buffer = stream+'\0' elif isinstance(stream, bytes): self.name = "" self.raw_buffer = stream self.determine_encoding() else: self.stream = stream self.name = getattr(stream, 'name', "") self.eof = False self.raw_buffer = None self.determine_encoding() def peek(self, index=0): try: return self.buffer[self.pointer+index] except IndexError: self.update(index+1) return self.buffer[self.pointer+index] def prefix(self, length=1): if self.pointer+length >= len(self.buffer): self.update(length) return self.buffer[self.pointer:self.pointer+length] def forward(self, length=1): if self.pointer+length+1 >= len(self.buffer): self.update(length+1) while length: ch = self.buffer[self.pointer] self.pointer += 1 self.index += 1 if ch in '\n\x85\u2028\u2029' \ or (ch == '\r' and self.buffer[self.pointer] != '\n'): self.line += 1 self.column = 0 elif ch != '\uFEFF': self.column += 1 length -= 1 def get_mark(self): if self.stream is None: return Mark(self.name, self.index, self.line, self.column, self.buffer, self.pointer) else: return Mark(self.name, self.index, self.line, self.column, None, None) def determine_encoding(self): while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): self.update_raw() if isinstance(self.raw_buffer, bytes): if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): self.raw_decode = codecs.utf_16_le_decode self.encoding = 'utf-16-le' elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): self.raw_decode = codecs.utf_16_be_decode self.encoding = 'utf-16-be' else: self.raw_decode = codecs.utf_8_decode self.encoding = 'utf-8' self.update(1) NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]') def check_printable(self, data): match = self.NON_PRINTABLE.search(data) if match: character = match.group() position = self.index+(len(self.buffer)-self.pointer)+match.start() raise ReaderError(self.name, position, ord(character), 'unicode', "special characters are not allowed") def update(self, length): if self.raw_buffer is None: return self.buffer = self.buffer[self.pointer:] self.pointer = 0 while len(self.buffer) < length: if not self.eof: self.update_raw() if self.raw_decode is not None: try: data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof) except UnicodeDecodeError as exc: character = self.raw_buffer[exc.start] if self.stream is not None: position = self.stream_pointer-len(self.raw_buffer)+exc.start else: position = exc.start raise ReaderError(self.name, position, character, exc.encoding, exc.reason) else: data = self.raw_buffer converted = len(data) self.check_printable(data) self.buffer += data self.raw_buffer = self.raw_buffer[converted:] if self.eof: self.buffer += '\0' self.raw_buffer = None break def update_raw(self, size=4096): data = self.stream.read(size) if self.raw_buffer is None: self.raw_buffer = data else: self.raw_buffer += data self.stream_pointer += len(data) if not data: self.eof = True invoke-2.2.0/invoke/vendor/yaml/representer.py000066400000000000000000000335501445356551000214660ustar00rootroot00000000000000 __all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', 'RepresenterError'] from .error import * from .nodes import * import datetime, copyreg, types, base64, collections class RepresenterError(YAMLError): pass class BaseRepresenter: yaml_representers = {} yaml_multi_representers = {} def __init__(self, default_style=None, default_flow_style=False, sort_keys=True): self.default_style = default_style self.sort_keys = sort_keys self.default_flow_style = default_flow_style self.represented_objects = {} self.object_keeper = [] self.alias_key = None def represent(self, data): node = self.represent_data(data) self.serialize(node) self.represented_objects = {} self.object_keeper = [] self.alias_key = None def represent_data(self, data): if self.ignore_aliases(data): self.alias_key = None else: self.alias_key = id(data) if self.alias_key is not None: if self.alias_key in self.represented_objects: node = self.represented_objects[self.alias_key] #if node is None: # raise RepresenterError("recursive objects are not allowed: %r" % data) return node #self.represented_objects[alias_key] = None self.object_keeper.append(data) data_types = type(data).__mro__ if data_types[0] in self.yaml_representers: node = self.yaml_representers[data_types[0]](self, data) else: for data_type in data_types: if data_type in self.yaml_multi_representers: node = self.yaml_multi_representers[data_type](self, data) break else: if None in self.yaml_multi_representers: node = self.yaml_multi_representers[None](self, data) elif None in self.yaml_representers: node = self.yaml_representers[None](self, data) else: node = ScalarNode(None, str(data)) #if alias_key is not None: # self.represented_objects[alias_key] = node return node @classmethod def add_representer(cls, data_type, representer): if not 'yaml_representers' in cls.__dict__: cls.yaml_representers = cls.yaml_representers.copy() cls.yaml_representers[data_type] = representer @classmethod def add_multi_representer(cls, data_type, representer): if not 'yaml_multi_representers' in cls.__dict__: cls.yaml_multi_representers = cls.yaml_multi_representers.copy() cls.yaml_multi_representers[data_type] = representer def represent_scalar(self, tag, value, style=None): if style is None: style = self.default_style node = ScalarNode(tag, value, style=style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node return node def represent_sequence(self, tag, sequence, flow_style=None): value = [] node = SequenceNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True for item in sequence: node_item = self.represent_data(item) if not (isinstance(node_item, ScalarNode) and not node_item.style): best_style = False value.append(node_item) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node def represent_mapping(self, tag, mapping, flow_style=None): value = [] node = MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, 'items'): mapping = list(mapping.items()) if self.sort_keys: try: mapping = sorted(mapping) except TypeError: pass for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, ScalarNode) and not node_key.style): best_style = False if not (isinstance(node_value, ScalarNode) and not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node def ignore_aliases(self, data): return False class SafeRepresenter(BaseRepresenter): def ignore_aliases(self, data): if data is None: return True if isinstance(data, tuple) and data == (): return True if isinstance(data, (str, bytes, bool, int, float)): return True def represent_none(self, data): return self.represent_scalar('tag:yaml.org,2002:null', 'null') def represent_str(self, data): return self.represent_scalar('tag:yaml.org,2002:str', data) def represent_binary(self, data): if hasattr(base64, 'encodebytes'): data = base64.encodebytes(data).decode('ascii') else: data = base64.encodestring(data).decode('ascii') return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') def represent_bool(self, data): if data: value = 'true' else: value = 'false' return self.represent_scalar('tag:yaml.org,2002:bool', value) def represent_int(self, data): return self.represent_scalar('tag:yaml.org,2002:int', str(data)) inf_value = 1e300 while repr(inf_value) != repr(inf_value*inf_value): inf_value *= inf_value def represent_float(self, data): if data != data or (data == 0.0 and data == 1.0): value = '.nan' elif data == self.inf_value: value = '.inf' elif data == -self.inf_value: value = '-.inf' else: value = repr(data).lower() # Note that in some cases `repr(data)` represents a float number # without the decimal parts. For instance: # >>> repr(1e17) # '1e17' # Unfortunately, this is not a valid float representation according # to the definition of the `!!float` tag. We fix this by adding # '.0' before the 'e' symbol. if '.' not in value and 'e' in value: value = value.replace('e', '.0e', 1) return self.represent_scalar('tag:yaml.org,2002:float', value) def represent_list(self, data): #pairs = (len(data) > 0 and isinstance(data, list)) #if pairs: # for item in data: # if not isinstance(item, tuple) or len(item) != 2: # pairs = False # break #if not pairs: return self.represent_sequence('tag:yaml.org,2002:seq', data) #value = [] #for item_key, item_value in data: # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', # [(item_key, item_value)])) #return SequenceNode(u'tag:yaml.org,2002:pairs', value) def represent_dict(self, data): return self.represent_mapping('tag:yaml.org,2002:map', data) def represent_set(self, data): value = {} for key in data: value[key] = None return self.represent_mapping('tag:yaml.org,2002:set', value) def represent_date(self, data): value = data.isoformat() return self.represent_scalar('tag:yaml.org,2002:timestamp', value) def represent_datetime(self, data): value = data.isoformat(' ') return self.represent_scalar('tag:yaml.org,2002:timestamp', value) def represent_yaml_object(self, tag, data, cls, flow_style=None): if hasattr(data, '__getstate__'): state = data.__getstate__() else: state = data.__dict__.copy() return self.represent_mapping(tag, state, flow_style=flow_style) def represent_undefined(self, data): raise RepresenterError("cannot represent an object", data) SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none) SafeRepresenter.add_representer(str, SafeRepresenter.represent_str) SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary) SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool) SafeRepresenter.add_representer(int, SafeRepresenter.represent_int) SafeRepresenter.add_representer(float, SafeRepresenter.represent_float) SafeRepresenter.add_representer(list, SafeRepresenter.represent_list) SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list) SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict) SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date) SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime) SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) class Representer(SafeRepresenter): def represent_complex(self, data): if data.imag == 0.0: data = '%r' % data.real elif data.real == 0.0: data = '%rj' % data.imag elif data.imag > 0: data = '%r+%rj' % (data.real, data.imag) else: data = '%r%rj' % (data.real, data.imag) return self.represent_scalar('tag:yaml.org,2002:python/complex', data) def represent_tuple(self, data): return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) def represent_name(self, data): name = '%s.%s' % (data.__module__, data.__name__) return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '') def represent_module(self, data): return self.represent_scalar( 'tag:yaml.org,2002:python/module:'+data.__name__, '') def represent_object(self, data): # We use __reduce__ API to save the data. data.__reduce__ returns # a tuple of length 2-5: # (function, args, state, listitems, dictitems) # For reconstructing, we calls function(*args), then set its state, # listitems, and dictitems if they are not None. # A special case is when function.__name__ == '__newobj__'. In this # case we create the object with args[0].__new__(*args). # Another special case is when __reduce__ returns a string - we don't # support it. # We produce a !!python/object, !!python/object/new or # !!python/object/apply node. cls = type(data) if cls in copyreg.dispatch_table: reduce = copyreg.dispatch_table[cls](data) elif hasattr(data, '__reduce_ex__'): reduce = data.__reduce_ex__(2) elif hasattr(data, '__reduce__'): reduce = data.__reduce__() else: raise RepresenterError("cannot represent an object", data) reduce = (list(reduce)+[None]*5)[:5] function, args, state, listitems, dictitems = reduce args = list(args) if state is None: state = {} if listitems is not None: listitems = list(listitems) if dictitems is not None: dictitems = dict(dictitems) if function.__name__ == '__newobj__': function = args[0] args = args[1:] tag = 'tag:yaml.org,2002:python/object/new:' newobj = True else: tag = 'tag:yaml.org,2002:python/object/apply:' newobj = False function_name = '%s.%s' % (function.__module__, function.__name__) if not args and not listitems and not dictitems \ and isinstance(state, dict) and newobj: return self.represent_mapping( 'tag:yaml.org,2002:python/object:'+function_name, state) if not listitems and not dictitems \ and isinstance(state, dict) and not state: return self.represent_sequence(tag+function_name, args) value = {} if args: value['args'] = args if state or not isinstance(state, dict): value['state'] = state if listitems: value['listitems'] = listitems if dictitems: value['dictitems'] = dictitems return self.represent_mapping(tag+function_name, value) def represent_ordered_dict(self, data): # Provide uniform representation across different Python versions. data_type = type(data) tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \ % (data_type.__module__, data_type.__name__) items = [[key, value] for key, value in data.items()] return self.represent_sequence(tag, [items]) Representer.add_representer(complex, Representer.represent_complex) Representer.add_representer(tuple, Representer.represent_tuple) Representer.add_representer(type, Representer.represent_name) Representer.add_representer(collections.OrderedDict, Representer.represent_ordered_dict) Representer.add_representer(types.FunctionType, Representer.represent_name) Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) Representer.add_representer(types.ModuleType, Representer.represent_module) Representer.add_multi_representer(object, Representer.represent_object) invoke-2.2.0/invoke/vendor/yaml/resolver.py000066400000000000000000000214471445356551000207730ustar00rootroot00000000000000 __all__ = ['BaseResolver', 'Resolver'] from .error import * from .nodes import * import re class ResolverError(YAMLError): pass class BaseResolver: DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' yaml_implicit_resolvers = {} yaml_path_resolvers = {} def __init__(self): self.resolver_exact_paths = [] self.resolver_prefix_paths = [] @classmethod def add_implicit_resolver(cls, tag, regexp, first): if not 'yaml_implicit_resolvers' in cls.__dict__: implicit_resolvers = {} for key in cls.yaml_implicit_resolvers: implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] cls.yaml_implicit_resolvers = implicit_resolvers if first is None: first = [None] for ch in first: cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) @classmethod def add_path_resolver(cls, tag, path, kind=None): # Note: `add_path_resolver` is experimental. The API could be changed. # `new_path` is a pattern that is matched against the path from the # root to the node that is being considered. `node_path` elements are # tuples `(node_check, index_check)`. `node_check` is a node class: # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` # matches any kind of a node. `index_check` could be `None`, a boolean # value, a string value, or a number. `None` and `False` match against # any _value_ of sequence and mapping nodes. `True` matches against # any _key_ of a mapping node. A string `index_check` matches against # a mapping value that corresponds to a scalar key which content is # equal to the `index_check` value. An integer `index_check` matches # against a sequence value with the index equal to `index_check`. if not 'yaml_path_resolvers' in cls.__dict__: cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() new_path = [] for element in path: if isinstance(element, (list, tuple)): if len(element) == 2: node_check, index_check = element elif len(element) == 1: node_check = element[0] index_check = True else: raise ResolverError("Invalid path element: %s" % element) else: node_check = None index_check = element if node_check is str: node_check = ScalarNode elif node_check is list: node_check = SequenceNode elif node_check is dict: node_check = MappingNode elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ and not isinstance(node_check, str) \ and node_check is not None: raise ResolverError("Invalid node checker: %s" % node_check) if not isinstance(index_check, (str, int)) \ and index_check is not None: raise ResolverError("Invalid index checker: %s" % index_check) new_path.append((node_check, index_check)) if kind is str: kind = ScalarNode elif kind is list: kind = SequenceNode elif kind is dict: kind = MappingNode elif kind not in [ScalarNode, SequenceNode, MappingNode] \ and kind is not None: raise ResolverError("Invalid node kind: %s" % kind) cls.yaml_path_resolvers[tuple(new_path), kind] = tag def descend_resolver(self, current_node, current_index): if not self.yaml_path_resolvers: return exact_paths = {} prefix_paths = [] if current_node: depth = len(self.resolver_prefix_paths) for path, kind in self.resolver_prefix_paths[-1]: if self.check_resolver_prefix(depth, path, kind, current_node, current_index): if len(path) > depth: prefix_paths.append((path, kind)) else: exact_paths[kind] = self.yaml_path_resolvers[path, kind] else: for path, kind in self.yaml_path_resolvers: if not path: exact_paths[kind] = self.yaml_path_resolvers[path, kind] else: prefix_paths.append((path, kind)) self.resolver_exact_paths.append(exact_paths) self.resolver_prefix_paths.append(prefix_paths) def ascend_resolver(self): if not self.yaml_path_resolvers: return self.resolver_exact_paths.pop() self.resolver_prefix_paths.pop() def check_resolver_prefix(self, depth, path, kind, current_node, current_index): node_check, index_check = path[depth-1] if isinstance(node_check, str): if current_node.tag != node_check: return elif node_check is not None: if not isinstance(current_node, node_check): return if index_check is True and current_index is not None: return if (index_check is False or index_check is None) \ and current_index is None: return if isinstance(index_check, str): if not (isinstance(current_index, ScalarNode) and index_check == current_index.value): return elif isinstance(index_check, int) and not isinstance(index_check, bool): if index_check != current_index: return return True def resolve(self, kind, value, implicit): if kind is ScalarNode and implicit[0]: if value == '': resolvers = self.yaml_implicit_resolvers.get('', []) else: resolvers = self.yaml_implicit_resolvers.get(value[0], []) wildcard_resolvers = self.yaml_implicit_resolvers.get(None, []) for tag, regexp in resolvers + wildcard_resolvers: if regexp.match(value): return tag implicit = implicit[1] if self.yaml_path_resolvers: exact_paths = self.resolver_exact_paths[-1] if kind in exact_paths: return exact_paths[kind] if None in exact_paths: return exact_paths[None] if kind is ScalarNode: return self.DEFAULT_SCALAR_TAG elif kind is SequenceNode: return self.DEFAULT_SEQUENCE_TAG elif kind is MappingNode: return self.DEFAULT_MAPPING_TAG class Resolver(BaseResolver): pass Resolver.add_implicit_resolver( 'tag:yaml.org,2002:bool', re.compile(r'''^(?:yes|Yes|YES|no|No|NO |true|True|TRUE|false|False|FALSE |on|On|ON|off|Off|OFF)$''', re.X), list('yYnNtTfFoO')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:float', re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? |\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* |[-+]?\.(?:inf|Inf|INF) |\.(?:nan|NaN|NAN))$''', re.X), list('-+0123456789.')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:int', re.compile(r'''^(?:[-+]?0b[0-1_]+ |[-+]?0[0-7_]+ |[-+]?(?:0|[1-9][0-9_]*) |[-+]?0x[0-9a-fA-F_]+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), list('-+0123456789')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:merge', re.compile(r'^(?:<<)$'), ['<']) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:null', re.compile(r'''^(?: ~ |null|Null|NULL | )$''', re.X), ['~', 'n', 'N', '']) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:timestamp', re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? (?:[Tt]|[ \t]+)[0-9][0-9]? :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), list('0123456789')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:value', re.compile(r'^(?:=)$'), ['=']) # The following resolver is only for documentation purposes. It cannot work # because plain scalars cannot start with '!', '&', or '*'. Resolver.add_implicit_resolver( 'tag:yaml.org,2002:yaml', re.compile(r'^(?:!|&|\*)$'), list('!&*')) invoke-2.2.0/invoke/vendor/yaml/scanner.py000066400000000000000000001441151445356551000205610ustar00rootroot00000000000000 # Scanner produces tokens of the following types: # STREAM-START # STREAM-END # DIRECTIVE(name, value) # DOCUMENT-START # DOCUMENT-END # BLOCK-SEQUENCE-START # BLOCK-MAPPING-START # BLOCK-END # FLOW-SEQUENCE-START # FLOW-MAPPING-START # FLOW-SEQUENCE-END # FLOW-MAPPING-END # BLOCK-ENTRY # FLOW-ENTRY # KEY # VALUE # ALIAS(value) # ANCHOR(value) # TAG(value) # SCALAR(value, plain, style) # # Read comments in the Scanner code for more details. # __all__ = ['Scanner', 'ScannerError'] from .error import MarkedYAMLError from .tokens import * class ScannerError(MarkedYAMLError): pass class SimpleKey: # See below simple keys treatment. def __init__(self, token_number, required, index, line, column, mark): self.token_number = token_number self.required = required self.index = index self.line = line self.column = column self.mark = mark class Scanner: def __init__(self): """Initialize the scanner.""" # It is assumed that Scanner and Reader will have a common descendant. # Reader do the dirty work of checking for BOM and converting the # input data to Unicode. It also adds NUL to the end. # # Reader supports the following methods # self.peek(i=0) # peek the next i-th character # self.prefix(l=1) # peek the next l characters # self.forward(l=1) # read the next l characters and move the pointer. # Had we reached the end of the stream? self.done = False # The number of unclosed '{' and '['. `flow_level == 0` means block # context. self.flow_level = 0 # List of processed tokens that are not yet emitted. self.tokens = [] # Add the STREAM-START token. self.fetch_stream_start() # Number of tokens that were emitted through the `get_token` method. self.tokens_taken = 0 # The current indentation level. self.indent = -1 # Past indentation levels. self.indents = [] # Variables related to simple keys treatment. # A simple key is a key that is not denoted by the '?' indicator. # Example of simple keys: # --- # block simple key: value # ? not a simple key: # : { flow simple key: value } # We emit the KEY token before all keys, so when we find a potential # simple key, we try to locate the corresponding ':' indicator. # Simple keys should be limited to a single line and 1024 characters. # Can a simple key start at the current position? A simple key may # start: # - at the beginning of the line, not counting indentation spaces # (in block context), # - after '{', '[', ',' (in the flow context), # - after '?', ':', '-' (in the block context). # In the block context, this flag also signifies if a block collection # may start at the current position. self.allow_simple_key = True # Keep track of possible simple keys. This is a dictionary. The key # is `flow_level`; there can be no more that one possible simple key # for each level. The value is a SimpleKey record: # (token_number, required, index, line, column, mark) # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), # '[', or '{' tokens. self.possible_simple_keys = {} # Public methods. def check_token(self, *choices): # Check if the next token is one of the given types. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: if not choices: return True for choice in choices: if isinstance(self.tokens[0], choice): return True return False def peek_token(self): # Return the next token, but do not delete if from the queue. # Return None if no more tokens. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: return self.tokens[0] else: return None def get_token(self): # Return the next token. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: self.tokens_taken += 1 return self.tokens.pop(0) # Private methods. def need_more_tokens(self): if self.done: return False if not self.tokens: return True # The current token may be a potential simple key, so we # need to look further. self.stale_possible_simple_keys() if self.next_possible_simple_key() == self.tokens_taken: return True def fetch_more_tokens(self): # Eat whitespaces and comments until we reach the next token. self.scan_to_next_token() # Remove obsolete possible simple keys. self.stale_possible_simple_keys() # Compare the current indentation and column. It may add some tokens # and decrease the current indentation level. self.unwind_indent(self.column) # Peek the next character. ch = self.peek() # Is it the end of stream? if ch == '\0': return self.fetch_stream_end() # Is it a directive? if ch == '%' and self.check_directive(): return self.fetch_directive() # Is it the document start? if ch == '-' and self.check_document_start(): return self.fetch_document_start() # Is it the document end? if ch == '.' and self.check_document_end(): return self.fetch_document_end() # TODO: support for BOM within a stream. #if ch == '\uFEFF': # return self.fetch_bom() <-- issue BOMToken # Note: the order of the following checks is NOT significant. # Is it the flow sequence start indicator? if ch == '[': return self.fetch_flow_sequence_start() # Is it the flow mapping start indicator? if ch == '{': return self.fetch_flow_mapping_start() # Is it the flow sequence end indicator? if ch == ']': return self.fetch_flow_sequence_end() # Is it the flow mapping end indicator? if ch == '}': return self.fetch_flow_mapping_end() # Is it the flow entry indicator? if ch == ',': return self.fetch_flow_entry() # Is it the block entry indicator? if ch == '-' and self.check_block_entry(): return self.fetch_block_entry() # Is it the key indicator? if ch == '?' and self.check_key(): return self.fetch_key() # Is it the value indicator? if ch == ':' and self.check_value(): return self.fetch_value() # Is it an alias? if ch == '*': return self.fetch_alias() # Is it an anchor? if ch == '&': return self.fetch_anchor() # Is it a tag? if ch == '!': return self.fetch_tag() # Is it a literal scalar? if ch == '|' and not self.flow_level: return self.fetch_literal() # Is it a folded scalar? if ch == '>' and not self.flow_level: return self.fetch_folded() # Is it a single quoted scalar? if ch == '\'': return self.fetch_single() # Is it a double quoted scalar? if ch == '\"': return self.fetch_double() # It must be a plain scalar then. if self.check_plain(): return self.fetch_plain() # No? It's an error. Let's produce a nice error message. raise ScannerError("while scanning for the next token", None, "found character %r that cannot start any token" % ch, self.get_mark()) # Simple keys treatment. def next_possible_simple_key(self): # Return the number of the nearest possible simple key. Actually we # don't need to loop through the whole dictionary. We may replace it # with the following code: # if not self.possible_simple_keys: # return None # return self.possible_simple_keys[ # min(self.possible_simple_keys.keys())].token_number min_token_number = None for level in self.possible_simple_keys: key = self.possible_simple_keys[level] if min_token_number is None or key.token_number < min_token_number: min_token_number = key.token_number return min_token_number def stale_possible_simple_keys(self): # Remove entries that are no longer possible simple keys. According to # the YAML specification, simple keys # - should be limited to a single line, # - should be no longer than 1024 characters. # Disabling this procedure will allow simple keys of any length and # height (may cause problems if indentation is broken though). for level in list(self.possible_simple_keys): key = self.possible_simple_keys[level] if key.line != self.line \ or self.index-key.index > 1024: if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not find expected ':'", self.get_mark()) del self.possible_simple_keys[level] def save_possible_simple_key(self): # The next token may start a simple key. We check if it's possible # and save its position. This function is called for # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. # Check if a simple key is required at the current position. required = not self.flow_level and self.indent == self.column # The next token might be a simple key. Let's save it's number and # position. if self.allow_simple_key: self.remove_possible_simple_key() token_number = self.tokens_taken+len(self.tokens) key = SimpleKey(token_number, required, self.index, self.line, self.column, self.get_mark()) self.possible_simple_keys[self.flow_level] = key def remove_possible_simple_key(self): # Remove the saved possible key position at the current flow level. if self.flow_level in self.possible_simple_keys: key = self.possible_simple_keys[self.flow_level] if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not find expected ':'", self.get_mark()) del self.possible_simple_keys[self.flow_level] # Indentation functions. def unwind_indent(self, column): ## In flow context, tokens should respect indentation. ## Actually the condition should be `self.indent >= column` according to ## the spec. But this condition will prohibit intuitively correct ## constructions such as ## key : { ## } #if self.flow_level and self.indent > column: # raise ScannerError(None, None, # "invalid indentation or unclosed '[' or '{'", # self.get_mark()) # In the flow context, indentation is ignored. We make the scanner less # restrictive then specification requires. if self.flow_level: return # In block context, we may need to issue the BLOCK-END tokens. while self.indent > column: mark = self.get_mark() self.indent = self.indents.pop() self.tokens.append(BlockEndToken(mark, mark)) def add_indent(self, column): # Check if we need to increase indentation. if self.indent < column: self.indents.append(self.indent) self.indent = column return True return False # Fetchers. def fetch_stream_start(self): # We always add STREAM-START as the first token and STREAM-END as the # last token. # Read the token. mark = self.get_mark() # Add STREAM-START. self.tokens.append(StreamStartToken(mark, mark, encoding=self.encoding)) def fetch_stream_end(self): # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. self.remove_possible_simple_key() self.allow_simple_key = False self.possible_simple_keys = {} # Read the token. mark = self.get_mark() # Add STREAM-END. self.tokens.append(StreamEndToken(mark, mark)) # The steam is finished. self.done = True def fetch_directive(self): # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. self.remove_possible_simple_key() self.allow_simple_key = False # Scan and add DIRECTIVE. self.tokens.append(self.scan_directive()) def fetch_document_start(self): self.fetch_document_indicator(DocumentStartToken) def fetch_document_end(self): self.fetch_document_indicator(DocumentEndToken) def fetch_document_indicator(self, TokenClass): # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. Note that there could not be a block collection # after '---'. self.remove_possible_simple_key() self.allow_simple_key = False # Add DOCUMENT-START or DOCUMENT-END. start_mark = self.get_mark() self.forward(3) end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_start(self): self.fetch_flow_collection_start(FlowSequenceStartToken) def fetch_flow_mapping_start(self): self.fetch_flow_collection_start(FlowMappingStartToken) def fetch_flow_collection_start(self, TokenClass): # '[' and '{' may start a simple key. self.save_possible_simple_key() # Increase the flow level. self.flow_level += 1 # Simple keys are allowed after '[' and '{'. self.allow_simple_key = True # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_end(self): self.fetch_flow_collection_end(FlowSequenceEndToken) def fetch_flow_mapping_end(self): self.fetch_flow_collection_end(FlowMappingEndToken) def fetch_flow_collection_end(self, TokenClass): # Reset possible simple key on the current level. self.remove_possible_simple_key() # Decrease the flow level. self.flow_level -= 1 # No simple keys after ']' or '}'. self.allow_simple_key = False # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_entry(self): # Simple keys are allowed after ','. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add FLOW-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(FlowEntryToken(start_mark, end_mark)) def fetch_block_entry(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a new entry? if not self.allow_simple_key: raise ScannerError(None, None, "sequence entries are not allowed here", self.get_mark()) # We may need to add BLOCK-SEQUENCE-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockSequenceStartToken(mark, mark)) # It's an error for the block entry to occur in the flow context, # but we let the parser detect this. else: pass # Simple keys are allowed after '-'. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add BLOCK-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(BlockEntryToken(start_mark, end_mark)) def fetch_key(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a key (not necessary a simple)? if not self.allow_simple_key: raise ScannerError(None, None, "mapping keys are not allowed here", self.get_mark()) # We may need to add BLOCK-MAPPING-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after '?' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add KEY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(KeyToken(start_mark, end_mark)) def fetch_value(self): # Do we determine a simple key? if self.flow_level in self.possible_simple_keys: # Add KEY. key = self.possible_simple_keys[self.flow_level] del self.possible_simple_keys[self.flow_level] self.tokens.insert(key.token_number-self.tokens_taken, KeyToken(key.mark, key.mark)) # If this key starts a new block mapping, we need to add # BLOCK-MAPPING-START. if not self.flow_level: if self.add_indent(key.column): self.tokens.insert(key.token_number-self.tokens_taken, BlockMappingStartToken(key.mark, key.mark)) # There cannot be two simple keys one after another. self.allow_simple_key = False # It must be a part of a complex key. else: # Block context needs additional checks. # (Do we really need them? They will be caught by the parser # anyway.) if not self.flow_level: # We are allowed to start a complex value if and only if # we can start a simple key. if not self.allow_simple_key: raise ScannerError(None, None, "mapping values are not allowed here", self.get_mark()) # If this value starts a new block mapping, we need to add # BLOCK-MAPPING-START. It will be detected as an error later by # the parser. if not self.flow_level: if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after ':' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add VALUE. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(ValueToken(start_mark, end_mark)) def fetch_alias(self): # ALIAS could be a simple key. self.save_possible_simple_key() # No simple keys after ALIAS. self.allow_simple_key = False # Scan and add ALIAS. self.tokens.append(self.scan_anchor(AliasToken)) def fetch_anchor(self): # ANCHOR could start a simple key. self.save_possible_simple_key() # No simple keys after ANCHOR. self.allow_simple_key = False # Scan and add ANCHOR. self.tokens.append(self.scan_anchor(AnchorToken)) def fetch_tag(self): # TAG could start a simple key. self.save_possible_simple_key() # No simple keys after TAG. self.allow_simple_key = False # Scan and add TAG. self.tokens.append(self.scan_tag()) def fetch_literal(self): self.fetch_block_scalar(style='|') def fetch_folded(self): self.fetch_block_scalar(style='>') def fetch_block_scalar(self, style): # A simple key may follow a block scalar. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Scan and add SCALAR. self.tokens.append(self.scan_block_scalar(style)) def fetch_single(self): self.fetch_flow_scalar(style='\'') def fetch_double(self): self.fetch_flow_scalar(style='"') def fetch_flow_scalar(self, style): # A flow scalar could be a simple key. self.save_possible_simple_key() # No simple keys after flow scalars. self.allow_simple_key = False # Scan and add SCALAR. self.tokens.append(self.scan_flow_scalar(style)) def fetch_plain(self): # A plain scalar could be a simple key. self.save_possible_simple_key() # No simple keys after plain scalars. But note that `scan_plain` will # change this flag if the scan is finished at the beginning of the # line. self.allow_simple_key = False # Scan and add SCALAR. May change `allow_simple_key`. self.tokens.append(self.scan_plain()) # Checkers. def check_directive(self): # DIRECTIVE: ^ '%' ... # The '%' indicator is already checked. if self.column == 0: return True def check_document_start(self): # DOCUMENT-START: ^ '---' (' '|'\n') if self.column == 0: if self.prefix(3) == '---' \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return True def check_document_end(self): # DOCUMENT-END: ^ '...' (' '|'\n') if self.column == 0: if self.prefix(3) == '...' \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return True def check_block_entry(self): # BLOCK-ENTRY: '-' (' '|'\n') return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' def check_key(self): # KEY(flow context): '?' if self.flow_level: return True # KEY(block context): '?' (' '|'\n') else: return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' def check_value(self): # VALUE(flow context): ':' if self.flow_level: return True # VALUE(block context): ':' (' '|'\n') else: return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' def check_plain(self): # A plain scalar may start with any non-space character except: # '-', '?', ':', ',', '[', ']', '{', '}', # '#', '&', '*', '!', '|', '>', '\'', '\"', # '%', '@', '`'. # # It may also start with # '-', '?', ':' # if it is followed by a non-space character. # # Note that we limit the last rule to the block context (except the # '-' character) because we want the flow context to be space # independent. ch = self.peek() return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029' and (ch == '-' or (not self.flow_level and ch in '?:'))) # Scanners. def scan_to_next_token(self): # We ignore spaces, line breaks and comments. # If we find a line break in the block context, we set the flag # `allow_simple_key` on. # The byte order mark is stripped if it's the first character in the # stream. We do not yet support BOM inside the stream as the # specification requires. Any such mark will be considered as a part # of the document. # # TODO: We need to make tab handling rules more sane. A good rule is # Tabs cannot precede tokens # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, # KEY(block), VALUE(block), BLOCK-ENTRY # So the checking code is # if : # self.allow_simple_keys = False # We also need to add the check for `allow_simple_keys == True` to # `unwind_indent` before issuing BLOCK-END. # Scanners for block, flow, and plain scalars need to be modified. if self.index == 0 and self.peek() == '\uFEFF': self.forward() found = False while not found: while self.peek() == ' ': self.forward() if self.peek() == '#': while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() if self.scan_line_break(): if not self.flow_level: self.allow_simple_key = True else: found = True def scan_directive(self): # See the specification for details. start_mark = self.get_mark() self.forward() name = self.scan_directive_name(start_mark) value = None if name == 'YAML': value = self.scan_yaml_directive_value(start_mark) end_mark = self.get_mark() elif name == 'TAG': value = self.scan_tag_directive_value(start_mark) end_mark = self.get_mark() else: end_mark = self.get_mark() while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() self.scan_directive_ignored_line(start_mark) return DirectiveToken(name, value, start_mark, end_mark) def scan_directive_name(self, start_mark): # See the specification for details. length = 0 ch = self.peek(length) while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) return value def scan_yaml_directive_value(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() major = self.scan_yaml_directive_number(start_mark) if self.peek() != '.': raise ScannerError("while scanning a directive", start_mark, "expected a digit or '.', but found %r" % self.peek(), self.get_mark()) self.forward() minor = self.scan_yaml_directive_number(start_mark) if self.peek() not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a digit or ' ', but found %r" % self.peek(), self.get_mark()) return (major, minor) def scan_yaml_directive_number(self, start_mark): # See the specification for details. ch = self.peek() if not ('0' <= ch <= '9'): raise ScannerError("while scanning a directive", start_mark, "expected a digit, but found %r" % ch, self.get_mark()) length = 0 while '0' <= self.peek(length) <= '9': length += 1 value = int(self.prefix(length)) self.forward(length) return value def scan_tag_directive_value(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() handle = self.scan_tag_directive_handle(start_mark) while self.peek() == ' ': self.forward() prefix = self.scan_tag_directive_prefix(start_mark) return (handle, prefix) def scan_tag_directive_handle(self, start_mark): # See the specification for details. value = self.scan_tag_handle('directive', start_mark) ch = self.peek() if ch != ' ': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch, self.get_mark()) return value def scan_tag_directive_prefix(self, start_mark): # See the specification for details. value = self.scan_tag_uri('directive', start_mark) ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch, self.get_mark()) return value def scan_directive_ignored_line(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() if self.peek() == '#': while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in '\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a comment or a line break, but found %r" % ch, self.get_mark()) self.scan_line_break() def scan_anchor(self, TokenClass): # The specification does not restrict characters for anchors and # aliases. This may lead to problems, for instance, the document: # [ *alias, value ] # can be interpreted in two ways, as # [ "value" ] # and # [ *alias , "value" ] # Therefore we restrict aliases to numbers and ASCII letters. start_mark = self.get_mark() indicator = self.peek() if indicator == '*': name = 'alias' else: name = 'anchor' self.forward() length = 0 ch = self.peek(length) while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`': raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) end_mark = self.get_mark() return TokenClass(value, start_mark, end_mark) def scan_tag(self): # See the specification for details. start_mark = self.get_mark() ch = self.peek(1) if ch == '<': handle = None self.forward(2) suffix = self.scan_tag_uri('tag', start_mark) if self.peek() != '>': raise ScannerError("while parsing a tag", start_mark, "expected '>', but found %r" % self.peek(), self.get_mark()) self.forward() elif ch in '\0 \t\r\n\x85\u2028\u2029': handle = None suffix = '!' self.forward() else: length = 1 use_handle = False while ch not in '\0 \r\n\x85\u2028\u2029': if ch == '!': use_handle = True break length += 1 ch = self.peek(length) handle = '!' if use_handle: handle = self.scan_tag_handle('tag', start_mark) else: handle = '!' self.forward() suffix = self.scan_tag_uri('tag', start_mark) ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a tag", start_mark, "expected ' ', but found %r" % ch, self.get_mark()) value = (handle, suffix) end_mark = self.get_mark() return TagToken(value, start_mark, end_mark) def scan_block_scalar(self, style): # See the specification for details. if style == '>': folded = True else: folded = False chunks = [] start_mark = self.get_mark() # Scan the header. self.forward() chomping, increment = self.scan_block_scalar_indicators(start_mark) self.scan_block_scalar_ignored_line(start_mark) # Determine the indentation level and go to the first non-empty line. min_indent = self.indent+1 if min_indent < 1: min_indent = 1 if increment is None: breaks, max_indent, end_mark = self.scan_block_scalar_indentation() indent = max(min_indent, max_indent) else: indent = min_indent+increment-1 breaks, end_mark = self.scan_block_scalar_breaks(indent) line_break = '' # Scan the inner part of the block scalar. while self.column == indent and self.peek() != '\0': chunks.extend(breaks) leading_non_space = self.peek() not in ' \t' length = 0 while self.peek(length) not in '\0\r\n\x85\u2028\u2029': length += 1 chunks.append(self.prefix(length)) self.forward(length) line_break = self.scan_line_break() breaks, end_mark = self.scan_block_scalar_breaks(indent) if self.column == indent and self.peek() != '\0': # Unfortunately, folding rules are ambiguous. # # This is the folding according to the specification: if folded and line_break == '\n' \ and leading_non_space and self.peek() not in ' \t': if not breaks: chunks.append(' ') else: chunks.append(line_break) # This is Clark Evans's interpretation (also in the spec # examples): # #if folded and line_break == '\n': # if not breaks: # if self.peek() not in ' \t': # chunks.append(' ') # else: # chunks.append(line_break) #else: # chunks.append(line_break) else: break # Chomp the tail. if chomping is not False: chunks.append(line_break) if chomping is True: chunks.extend(breaks) # We are done. return ScalarToken(''.join(chunks), False, start_mark, end_mark, style) def scan_block_scalar_indicators(self, start_mark): # See the specification for details. chomping = None increment = None ch = self.peek() if ch in '+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch in '0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() elif ch in '0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() ch = self.peek() if ch in '+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected chomping or indentation indicators, but found %r" % ch, self.get_mark()) return chomping, increment def scan_block_scalar_ignored_line(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() if self.peek() == '#': while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in '\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected a comment or a line break, but found %r" % ch, self.get_mark()) self.scan_line_break() def scan_block_scalar_indentation(self): # See the specification for details. chunks = [] max_indent = 0 end_mark = self.get_mark() while self.peek() in ' \r\n\x85\u2028\u2029': if self.peek() != ' ': chunks.append(self.scan_line_break()) end_mark = self.get_mark() else: self.forward() if self.column > max_indent: max_indent = self.column return chunks, max_indent, end_mark def scan_block_scalar_breaks(self, indent): # See the specification for details. chunks = [] end_mark = self.get_mark() while self.column < indent and self.peek() == ' ': self.forward() while self.peek() in '\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) end_mark = self.get_mark() while self.column < indent and self.peek() == ' ': self.forward() return chunks, end_mark def scan_flow_scalar(self, style): # See the specification for details. # Note that we loose indentation rules for quoted scalars. Quoted # scalars don't need to adhere indentation because " and ' clearly # mark the beginning and the end of them. Therefore we are less # restrictive then the specification requires. We only need to check # that document separators are not included in scalars. if style == '"': double = True else: double = False chunks = [] start_mark = self.get_mark() quote = self.peek() self.forward() chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) while self.peek() != quote: chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) self.forward() end_mark = self.get_mark() return ScalarToken(''.join(chunks), False, start_mark, end_mark, style) ESCAPE_REPLACEMENTS = { '0': '\0', 'a': '\x07', 'b': '\x08', 't': '\x09', '\t': '\x09', 'n': '\x0A', 'v': '\x0B', 'f': '\x0C', 'r': '\x0D', 'e': '\x1B', ' ': '\x20', '\"': '\"', '\\': '\\', '/': '/', 'N': '\x85', '_': '\xA0', 'L': '\u2028', 'P': '\u2029', } ESCAPE_CODES = { 'x': 2, 'u': 4, 'U': 8, } def scan_flow_scalar_non_spaces(self, double, start_mark): # See the specification for details. chunks = [] while True: length = 0 while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029': length += 1 if length: chunks.append(self.prefix(length)) self.forward(length) ch = self.peek() if not double and ch == '\'' and self.peek(1) == '\'': chunks.append('\'') self.forward(2) elif (double and ch == '\'') or (not double and ch in '\"\\'): chunks.append(ch) self.forward() elif double and ch == '\\': self.forward() ch = self.peek() if ch in self.ESCAPE_REPLACEMENTS: chunks.append(self.ESCAPE_REPLACEMENTS[ch]) self.forward() elif ch in self.ESCAPE_CODES: length = self.ESCAPE_CODES[ch] self.forward() for k in range(length): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a double-quoted scalar", start_mark, "expected escape sequence of %d hexdecimal numbers, but found %r" % (length, self.peek(k)), self.get_mark()) code = int(self.prefix(length), 16) chunks.append(chr(code)) self.forward(length) elif ch in '\r\n\x85\u2028\u2029': self.scan_line_break() chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) else: raise ScannerError("while scanning a double-quoted scalar", start_mark, "found unknown escape character %r" % ch, self.get_mark()) else: return chunks def scan_flow_scalar_spaces(self, double, start_mark): # See the specification for details. chunks = [] length = 0 while self.peek(length) in ' \t': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch == '\0': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected end of stream", self.get_mark()) elif ch in '\r\n\x85\u2028\u2029': line_break = self.scan_line_break() breaks = self.scan_flow_scalar_breaks(double, start_mark) if line_break != '\n': chunks.append(line_break) elif not breaks: chunks.append(' ') chunks.extend(breaks) else: chunks.append(whitespaces) return chunks def scan_flow_scalar_breaks(self, double, start_mark): # See the specification for details. chunks = [] while True: # Instead of checking indentation, we check for document # separators. prefix = self.prefix(3) if (prefix == '---' or prefix == '...') \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected document separator", self.get_mark()) while self.peek() in ' \t': self.forward() if self.peek() in '\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) else: return chunks def scan_plain(self): # See the specification for details. # We add an additional restriction for the flow context: # plain scalars in the flow context cannot contain ',' or '?'. # We also keep track of the `allow_simple_key` flag here. # Indentation rules are loosed for the flow context. chunks = [] start_mark = self.get_mark() end_mark = start_mark indent = self.indent+1 # We allow zero indentation for scalars, but then we need to check for # document separators at the beginning of the line. #if indent == 0: # indent = 1 spaces = [] while True: length = 0 if self.peek() == '#': break while True: ch = self.peek(length) if ch in '\0 \t\r\n\x85\u2028\u2029' \ or (ch == ':' and self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029' + (u',[]{}' if self.flow_level else u''))\ or (self.flow_level and ch in ',?[]{}'): break length += 1 if length == 0: break self.allow_simple_key = False chunks.extend(spaces) chunks.append(self.prefix(length)) self.forward(length) end_mark = self.get_mark() spaces = self.scan_plain_spaces(indent, start_mark) if not spaces or self.peek() == '#' \ or (not self.flow_level and self.column < indent): break return ScalarToken(''.join(chunks), True, start_mark, end_mark) def scan_plain_spaces(self, indent, start_mark): # See the specification for details. # The specification is really confusing about tabs in plain scalars. # We just forbid them completely. Do not use tabs in YAML! chunks = [] length = 0 while self.peek(length) in ' ': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch in '\r\n\x85\u2028\u2029': line_break = self.scan_line_break() self.allow_simple_key = True prefix = self.prefix(3) if (prefix == '---' or prefix == '...') \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return breaks = [] while self.peek() in ' \r\n\x85\u2028\u2029': if self.peek() == ' ': self.forward() else: breaks.append(self.scan_line_break()) prefix = self.prefix(3) if (prefix == '---' or prefix == '...') \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return if line_break != '\n': chunks.append(line_break) elif not breaks: chunks.append(' ') chunks.extend(breaks) elif whitespaces: chunks.append(whitespaces) return chunks def scan_tag_handle(self, name, start_mark): # See the specification for details. # For some strange reasons, the specification does not allow '_' in # tag handles. I have allowed it anyway. ch = self.peek() if ch != '!': raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch, self.get_mark()) length = 1 ch = self.peek(length) if ch != ' ': while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_': length += 1 ch = self.peek(length) if ch != '!': self.forward(length) raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch, self.get_mark()) length += 1 value = self.prefix(length) self.forward(length) return value def scan_tag_uri(self, name, start_mark): # See the specification for details. # Note: we do not check if URI is well-formed. chunks = [] length = 0 ch = self.peek(length) while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-;/?:@&=+$,_.!~*\'()[]%': if ch == '%': chunks.append(self.prefix(length)) self.forward(length) length = 0 chunks.append(self.scan_uri_escapes(name, start_mark)) else: length += 1 ch = self.peek(length) if length: chunks.append(self.prefix(length)) self.forward(length) length = 0 if not chunks: raise ScannerError("while parsing a %s" % name, start_mark, "expected URI, but found %r" % ch, self.get_mark()) return ''.join(chunks) def scan_uri_escapes(self, name, start_mark): # See the specification for details. codes = [] mark = self.get_mark() while self.peek() == '%': self.forward() for k in range(2): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a %s" % name, start_mark, "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % self.peek(k), self.get_mark()) codes.append(int(self.prefix(2), 16)) self.forward(2) try: value = bytes(codes).decode('utf-8') except UnicodeDecodeError as exc: raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) return value def scan_line_break(self): # Transforms: # '\r\n' : '\n' # '\r' : '\n' # '\n' : '\n' # '\x85' : '\n' # '\u2028' : '\u2028' # '\u2029 : '\u2029' # default : '' ch = self.peek() if ch in '\r\n\x85': if self.prefix(2) == '\r\n': self.forward(2) else: self.forward() return '\n' elif ch in '\u2028\u2029': self.forward() return ch return '' invoke-2.2.0/invoke/vendor/yaml/serializer.py000066400000000000000000000101051445356551000212700ustar00rootroot00000000000000 __all__ = ['Serializer', 'SerializerError'] from .error import YAMLError from .events import * from .nodes import * class SerializerError(YAMLError): pass class Serializer: ANCHOR_TEMPLATE = 'id%03d' def __init__(self, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): self.use_encoding = encoding self.use_explicit_start = explicit_start self.use_explicit_end = explicit_end self.use_version = version self.use_tags = tags self.serialized_nodes = {} self.anchors = {} self.last_anchor_id = 0 self.closed = None def open(self): if self.closed is None: self.emit(StreamStartEvent(encoding=self.use_encoding)) self.closed = False elif self.closed: raise SerializerError("serializer is closed") else: raise SerializerError("serializer is already opened") def close(self): if self.closed is None: raise SerializerError("serializer is not opened") elif not self.closed: self.emit(StreamEndEvent()) self.closed = True #def __del__(self): # self.close() def serialize(self, node): if self.closed is None: raise SerializerError("serializer is not opened") elif self.closed: raise SerializerError("serializer is closed") self.emit(DocumentStartEvent(explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags)) self.anchor_node(node) self.serialize_node(node, None, None) self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) self.serialized_nodes = {} self.anchors = {} self.last_anchor_id = 0 def anchor_node(self, node): if node in self.anchors: if self.anchors[node] is None: self.anchors[node] = self.generate_anchor(node) else: self.anchors[node] = None if isinstance(node, SequenceNode): for item in node.value: self.anchor_node(item) elif isinstance(node, MappingNode): for key, value in node.value: self.anchor_node(key) self.anchor_node(value) def generate_anchor(self, node): self.last_anchor_id += 1 return self.ANCHOR_TEMPLATE % self.last_anchor_id def serialize_node(self, node, parent, index): alias = self.anchors[node] if node in self.serialized_nodes: self.emit(AliasEvent(alias)) else: self.serialized_nodes[node] = True self.descend_resolver(parent, index) if isinstance(node, ScalarNode): detected_tag = self.resolve(ScalarNode, node.value, (True, False)) default_tag = self.resolve(ScalarNode, node.value, (False, True)) implicit = (node.tag == detected_tag), (node.tag == default_tag) self.emit(ScalarEvent(alias, node.tag, implicit, node.value, style=node.style)) elif isinstance(node, SequenceNode): implicit = (node.tag == self.resolve(SequenceNode, node.value, True)) self.emit(SequenceStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) index = 0 for item in node.value: self.serialize_node(item, node, index) index += 1 self.emit(SequenceEndEvent()) elif isinstance(node, MappingNode): implicit = (node.tag == self.resolve(MappingNode, node.value, True)) self.emit(MappingStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) for key, value in node.value: self.serialize_node(key, node, None) self.serialize_node(value, node, key) self.emit(MappingEndEvent()) self.ascend_resolver() invoke-2.2.0/invoke/vendor/yaml/tokens.py000066400000000000000000000050151445356551000204260ustar00rootroot00000000000000 class Token(object): def __init__(self, start_mark, end_mark): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in self.__dict__ if not key.endswith('_mark')] attributes.sort() arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) #class BOMToken(Token): # id = '' class DirectiveToken(Token): id = '' def __init__(self, name, value, start_mark, end_mark): self.name = name self.value = value self.start_mark = start_mark self.end_mark = end_mark class DocumentStartToken(Token): id = '' class DocumentEndToken(Token): id = '' class StreamStartToken(Token): id = '' def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndToken(Token): id = '' class BlockSequenceStartToken(Token): id = '' class BlockMappingStartToken(Token): id = '' class BlockEndToken(Token): id = '' class FlowSequenceStartToken(Token): id = '[' class FlowMappingStartToken(Token): id = '{' class FlowSequenceEndToken(Token): id = ']' class FlowMappingEndToken(Token): id = '}' class KeyToken(Token): id = '?' class ValueToken(Token): id = ':' class BlockEntryToken(Token): id = '-' class FlowEntryToken(Token): id = ',' class AliasToken(Token): id = '' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class AnchorToken(Token): id = '' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class TagToken(Token): id = '' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class ScalarToken(Token): id = '' def __init__(self, value, plain, start_mark, end_mark, style=None): self.value = value self.plain = plain self.start_mark = start_mark self.end_mark = end_mark self.style = style invoke-2.2.0/invoke/watchers.py000066400000000000000000000117511445356551000165100ustar00rootroot00000000000000import re import threading from typing import Generator, Iterable from .exceptions import ResponseNotAccepted class StreamWatcher(threading.local): """ A class whose subclasses may act on seen stream data from subprocesses. Subclasses must exhibit the following API; see `Responder` for a concrete example. * ``__init__`` is completely up to each subclass, though as usual, subclasses *of* subclasses should be careful to make use of `super` where appropriate. * `submit` must accept the entire current contents of the stream being watched, as a string, and may optionally return an iterable of strings (or act as a generator iterator, i.e. multiple calls to ``yield ``), which will each be written to the subprocess' standard input. .. note:: `StreamWatcher` subclasses exist in part to enable state tracking, such as detecting when a submitted password didn't work & erroring (or prompting a user, or etc). Such bookkeeping isn't easily achievable with simple callback functions. .. note:: `StreamWatcher` subclasses `threading.local` so that its instances can be used to 'watch' both subprocess stdout and stderr in separate threads. .. versionadded:: 1.0 """ def submit(self, stream: str) -> Iterable[str]: """ Act on ``stream`` data, potentially returning responses. :param str stream: All data read on this stream since the beginning of the session. :returns: An iterable of ``str`` (which may be empty). .. versionadded:: 1.0 """ raise NotImplementedError class Responder(StreamWatcher): """ A parameterizable object that submits responses to specific patterns. Commonly used to implement password auto-responds for things like ``sudo``. .. versionadded:: 1.0 """ def __init__(self, pattern: str, response: str) -> None: r""" Imprint this `Responder` with necessary parameters. :param pattern: A raw string (e.g. ``r"\[sudo\] password for .*:"``) which will be turned into a regular expression. :param response: The string to submit to the subprocess' stdin when ``pattern`` is detected. """ # TODO: precompile the keys into regex objects self.pattern = pattern self.response = response self.index = 0 def pattern_matches( self, stream: str, pattern: str, index_attr: str ) -> Iterable[str]: """ Generic "search for pattern in stream, using index" behavior. Used here and in some subclasses that want to track multiple patterns concurrently. :param str stream: The same data passed to ``submit``. :param str pattern: The pattern to search for. :param str index_attr: The name of the index attribute to use. :returns: An iterable of string matches. .. versionadded:: 1.0 """ # NOTE: generifies scanning so it can be used to scan for >1 pattern at # once, e.g. in FailingResponder. # Only look at stream contents we haven't seen yet, to avoid dupes. index = getattr(self, index_attr) new = stream[index:] # Search, across lines if necessary matches = re.findall(pattern, new, re.S) # Update seek index if we've matched if matches: setattr(self, index_attr, index + len(new)) return matches def submit(self, stream: str) -> Generator[str, None, None]: # Iterate over findall() response in case >1 match occurred. for _ in self.pattern_matches(stream, self.pattern, "index"): yield self.response class FailingResponder(Responder): """ Variant of `Responder` which is capable of detecting incorrect responses. This class adds a ``sentinel`` parameter to ``__init__``, and its ``submit`` will raise `.ResponseNotAccepted` if it detects that sentinel value in the stream. .. versionadded:: 1.0 """ def __init__(self, pattern: str, response: str, sentinel: str) -> None: super().__init__(pattern, response) self.sentinel = sentinel self.failure_index = 0 self.tried = False def submit(self, stream: str) -> Generator[str, None, None]: # Behave like regular Responder initially response = super().submit(stream) # Also check stream for our failure sentinel failed = self.pattern_matches(stream, self.sentinel, "failure_index") # Error out if we seem to have failed after a previous response. if self.tried and failed: err = 'Auto-response to r"{}" failed with {!r}!'.format( self.pattern, self.sentinel ) raise ResponseNotAccepted(err) # Once we see that we had a response, take note if response: self.tried = True # Again, behave regularly by default. return response invoke-2.2.0/pyproject.toml000066400000000000000000000023421445356551000157330ustar00rootroot00000000000000[tool.mypy] # check_untyped_defs = true # follow_imports_for_stubs = true # disallow_any_decorated = true # disallow_any_generics = true # disallow_any_unimported = true # disallow_incomplete_defs = true # disallow_subclassing_any = true # disallow_untyped_calls = true # disallow_untyped_decorators = true disallow_untyped_defs = true # enable_error_code = [ # "redundant-expr", # "truthy-bool", # "ignore-without-code", # "unused-awaitable", # exclude = [ "integration/", "tests/", "setup.py", "sites/www/conf.py", "build/", ] ignore_missing_imports = true # implicit_reexport = False # no_implicit_optional = true # pretty = true # show_column_numbers = true # show_error_codes = true # strict_equality = true warn_incomplete_stub = true warn_redundant_casts = true # warn_return_any = true # warn_unreachable = true warn_unused_ignores = true [[tool.mypy.overrides]] module = "invoke.vendor.*" ignore_errors = true [[tool.mypy.overrides]] module = "alabaster" ignore_missing_imports = true [[tool.mypy.overrides]] module = "icecream" ignore_missing_imports = true [[tool.mypy.overrides]] module = "invocations" ignore_missing_imports = true [[tool.mypy.overrides]] module = "pytest_relaxed" ignore_missing_imports = true invoke-2.2.0/pytest.ini000066400000000000000000000000541445356551000150460ustar00rootroot00000000000000[pytest] testpaths = tests python_files = * invoke-2.2.0/setup.py000066400000000000000000000052271445356551000145360ustar00rootroot00000000000000#!/usr/bin/env python # Support setuptools only, distutils has a divergent and more annoying API and # few folks will lack setuptools. from setuptools import setup, find_packages # Version info -- read without importing _locals = {} with open("invoke/_version.py") as fp: exec(fp.read(), None, _locals) version = _locals["__version__"] exclude = [] # Frankenstein long_description long_description = """ {} For a high level introduction, including example code, please see `our main project website `_; or for detailed API docs, see `the versioned API website `_. """.format( open("README.rst").read() ) setup( name="invoke", version=version, description="Pythonic task execution", license="BSD", long_description=long_description, author="Jeff Forcier", author_email="jeff@bitprophet.org", url="https://pyinvoke.org", project_urls={ "Docs": "https://docs.pyinvoke.org", "Source": "https://github.com/pyinvoke/invoke", "Issues": "https://github.com/pyinvoke/invoke/issues", "Changelog": "https://www.pyinvoke.org/changelog.html", "CI": "https://app.circleci.com/pipelines/github/pyinvoke/invoke", }, python_requires=">=3.6", packages=find_packages(exclude=exclude), include_package_data=True, entry_points={ "console_scripts": [ "invoke = invoke.main:program.run", "inv = invoke.main:program.run", ] }, classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: BSD License", "Operating System :: POSIX", "Operating System :: Unix", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Software Development", "Topic :: Software Development :: Build Tools", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Software Distribution", "Topic :: System :: Systems Administration", ], ) invoke-2.2.0/sites/000077500000000000000000000000001445356551000141455ustar00rootroot00000000000000invoke-2.2.0/sites/docs/000077500000000000000000000000001445356551000150755ustar00rootroot00000000000000invoke-2.2.0/sites/docs/_static/000077500000000000000000000000001445356551000165235ustar00rootroot00000000000000invoke-2.2.0/sites/docs/_static/rtd.css000066400000000000000000000356341445356551000200410ustar00rootroot00000000000000/* * rtd.css * ~~~~~~~~~~~~~~~ * * Sphinx stylesheet -- sphinxdoc theme. Originally created by * Armin Ronacher for Werkzeug. * * Customized for ReadTheDocs by Eric Pierce & Eric Holscher * * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ /* RTD colors * light blue: #e8ecef * medium blue: #8ca1af * dark blue: #465158 * dark grey: #444444 * * white hover: #d1d9df; * medium blue hover: #697983; * green highlight: #8ecc4c * light blue (project bar): #e8ecef */ @import url("basic.css"); /* PAGE LAYOUT -------------------------------------------------------------- */ body { font: 100%/1.5 "ff-meta-web-pro-1","ff-meta-web-pro-2",Arial,"Helvetica Neue",sans-serif; text-align: center; color: black; background-color: #465158; padding: 0; margin: 0; } div.document { text-align: left; background-color: #e8ecef; } div.bodywrapper { background-color: #ffffff; border-left: 1px solid #ccc; border-bottom: 1px solid #ccc; margin: 0 0 0 16em; } div.body { margin: 0; padding: 0.5em 1.3em; max-width: 55em; min-width: 20em; } div.related { font-size: 1em; background-color: #465158; } div.documentwrapper { float: left; width: 100%; background-color: #e8ecef; } /* HEADINGS --------------------------------------------------------------- */ h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; line-height: 1.15; color: #111; clear: both; } h2 { margin: 2em 0 0.2em 0; font-size: 1.35em; padding: 0; color: #465158; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.2em; color: #6c818f; } div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { color: black; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa !important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } /* LINKS ------------------------------------------------------------------ */ /* Normal links get a pseudo-underline */ a { color: #444; text-decoration: none; border-bottom: 1px solid #ccc; } /* Links in sidebar, TOC, index trees and tables have no underline */ .sphinxsidebar a, .toctree-wrapper a, .indextable a, #indices-and-tables a { color: #444; text-decoration: none; border-bottom: none; } /* Most links get an underline-effect when hovered */ a:hover, div.toctree-wrapper a:hover, .indextable a:hover, #indices-and-tables a:hover { color: #111; text-decoration: none; border-bottom: 1px solid #111; } /* Footer links */ div.footer a { color: #86989B; text-decoration: none; border: none; } div.footer a:hover { color: #a6b8bb; text-decoration: underline; border: none; } /* Permalink anchor (subtle grey with a red hover) */ div.body a.headerlink { color: #ccc; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none; border: none; } div.body a.headerlink:hover { color: #c60f0f; border: none; } /* NAVIGATION BAR --------------------------------------------------------- */ div.related ul { height: 2.5em; } div.related ul li { margin: 0; padding: 0.65em 0; float: left; display: block; color: white; /* For the >> separators */ font-size: 0.8em; } div.related ul li.right { float: right; margin-right: 5px; color: transparent; /* Hide the | separators */ } /* "Breadcrumb" links in nav bar */ div.related ul li a { order: none; background-color: inherit; font-weight: bold; margin: 6px 0 6px 4px; line-height: 1.75em; color: #ffffff; padding: 0.4em 0.8em; border: none; border-radius: 3px; } /* previous / next / modules / index links look more like buttons */ div.related ul li.right a { margin: 0.375em 0; background-color: #697983; text-shadow: 0 1px rgba(0, 0, 0, 0.5); border-radius: 3px; -webkit-border-radius: 3px; -moz-border-radius: 3px; } /* All navbar links light up as buttons when hovered */ div.related ul li a:hover { background-color: #8ca1af; color: #ffffff; text-decoration: none; border-radius: 3px; -webkit-border-radius: 3px; -moz-border-radius: 3px; } /* Take extra precautions for tt within links */ a tt, div.related ul li a tt { background: inherit !important; color: inherit !important; } /* SIDEBAR ---------------------------------------------------------------- */ div.sphinxsidebarwrapper { padding: 0; } div.sphinxsidebar { margin: 0; margin-left: -100%; float: left; top: 3em; left: 0; padding: 0 1em; width: 14em; font-size: 1em; text-align: left; background-color: #e8ecef; } div.sphinxsidebar img { max-width: 12em; } div.sphinxsidebar h3, div.sphinxsidebar h4 { margin: 1.2em 0 0.3em 0; font-size: 1em; padding: 0; color: #222222; font-family: "ff-meta-web-pro-1", "ff-meta-web-pro-2", "Arial", "Helvetica Neue", sans-serif; } div.sphinxsidebar h3 a { color: #444444; } div.sphinxsidebar ul, div.sphinxsidebar p { margin-top: 0; padding-left: 0; line-height: 130%; background-color: #e8ecef; } /* No bullets for nested lists, but a little extra indentation */ div.sphinxsidebar ul ul { list-style-type: none; margin-left: 1.5em; padding: 0; } /* A little top/bottom padding to prevent adjacent links' borders * from overlapping each other */ div.sphinxsidebar ul li { padding: 1px 0; } /* A little left-padding to make these align with the ULs */ div.sphinxsidebar p.topless { padding-left: 0 0 0 1em; } /* Make these into hidden one-liners */ div.sphinxsidebar ul li, div.sphinxsidebar p.topless { white-space: nowrap; overflow: hidden; } /* ...which become visible when hovered */ div.sphinxsidebar ul li:hover, div.sphinxsidebar p.topless:hover { overflow: visible; } /* Search text box and "Go" button */ #searchbox { margin-top: 2em; margin-bottom: 1em; background: #ddd; padding: 0.5em; border-radius: 6px; -moz-border-radius: 6px; -webkit-border-radius: 6px; } #searchbox h3 { margin-top: 0; } /* Make search box and button abut and have a border */ input, div.sphinxsidebar input { border: 1px solid #999; float: left; } /* Search textbox */ input[type="text"] { margin: 0; padding: 0 3px; height: 20px; width: 144px; border-top-left-radius: 3px; border-bottom-left-radius: 3px; -moz-border-radius-topleft: 3px; -moz-border-radius-bottomleft: 3px; -webkit-border-top-left-radius: 3px; -webkit-border-bottom-left-radius: 3px; } /* Search button */ input[type="submit"] { margin: 0 0 0 -1px; /* -1px prevents a double-border with textbox */ height: 22px; color: #444; background-color: #e8ecef; padding: 1px 4px; font-weight: bold; border-top-right-radius: 3px; border-bottom-right-radius: 3px; -moz-border-radius-topright: 3px; -moz-border-radius-bottomright: 3px; -webkit-border-top-right-radius: 3px; -webkit-border-bottom-right-radius: 3px; } input[type="submit"]:hover { color: #ffffff; background-color: #8ecc4c; } div.sphinxsidebar p.searchtip { clear: both; padding: 0.5em 0 0 0; background: #ddd; color: #666; font-size: 0.9em; } /* Sidebar links are unusual */ div.sphinxsidebar li a, div.sphinxsidebar p a { background: #e8ecef; /* In case links overlap main content */ border-radius: 3px; -moz-border-radius: 3px; -webkit-border-radius: 3px; border: 1px solid transparent; /* To prevent things jumping around on hover */ padding: 0 5px 0 5px; } div.sphinxsidebar li a:hover, div.sphinxsidebar p a:hover { color: #111; text-decoration: none; border: 1px solid #888; } /* Tweak any link appearing in a heading */ div.sphinxsidebar h3 a { } /* OTHER STUFF ------------------------------------------------------------ */ cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } tt { background-color: #f2f2f2; color: #444; } tt.descname, tt.descclassname, tt.xref { border: 0; } hr { border: 1px solid #abc; margin: 2em; } pre, #_fontwidthtest { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; margin: 1em 2em; font-size: 0.95em; letter-spacing: 0.015em; line-height: 120%; padding: 0.5em; border: 1px solid #ccc; background-color: #eee; border-radius: 6px; -moz-border-radius: 6px; -webkit-border-radius: 6px; } pre a { color: inherit; text-decoration: underline; } td.linenos pre { padding: 0.5em 0; } div.quotebar { background-color: #f8f8f8; max-width: 250px; float: right; padding: 2px 7px; border: 1px solid #ccc; } div.topic { background-color: #f8f8f8; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } /* ADMONITIONS AND WARNINGS ------------------------------------------------- */ /* Shared by admonitions and warnings */ div.admonition, div.warning { font-size: 0.9em; margin: 2em; padding: 0; /* border-radius: 6px; -moz-border-radius: 6px; -webkit-border-radius: 6px; */ } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; font-weight: bold; font-size: 1.1em; text-shadow: 0 1px rgba(0, 0, 0, 0.5); } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } /* Admonitions only */ div.admonition { border: 1px solid #609060; background-color: #e9ffe9; } div.admonition p.admonition-title { background-color: #70A070; border-bottom: 1px solid #609060; } /* Warnings only */ div.warning { border: 1px solid #900000; background-color: #ffe9e9; } div.warning p.admonition-title { background-color: #b04040; border-bottom: 1px solid #900000; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } .viewcode-back { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; } dl { margin: 1em 0 2.5em 0; } /* Highlight target when you click an internal link */ dt:target { background: #ffe080; } /* Don't highlight whole divs */ div.highlight { background: transparent; } /* But do highlight spans (so search results can be highlighted) */ span.highlight { background: #ffe080; } div.footer { background-color: #465158; color: #eeeeee; padding: 0 2em 2em 2em; clear: both; font-size: 0.8em; text-align: center; } p { margin: 0.8em 0 0.5em 0; } .section p img { margin: 1em 2em; } /* MOBILE LAYOUT -------------------------------------------------------------- */ @media screen and (max-width: 600px) { h1, h2, h3, h4, h5 { position: relative; } ul { padding-left: 1.75em; } div.bodywrapper a.headerlink, #indices-and-tables h1 a { color: #e6e6e6; font-size: 80%; float: right; line-height: 1.8; position: absolute; right: -0.7em; visibility: inherit; } div.bodywrapper h1 a.headerlink, #indices-and-tables h1 a { line-height: 1.5; } pre { font-size: 0.7em; overflow: auto; word-wrap: break-word; white-space: pre-wrap; } div.related ul { height: 2.5em; padding: 0; text-align: left; } div.related ul li { clear: both; color: #465158; padding: 0.2em 0; } div.related ul li:last-child { border-bottom: 1px dotted #8ca1af; padding-bottom: 0.4em; margin-bottom: 1em; width: 100%; } div.related ul li a { color: #465158; padding-right: 0; } div.related ul li a:hover { background: inherit; color: inherit; } div.related ul li.right { clear: none; padding: 0.65em 0; margin-bottom: 0.5em; } div.related ul li.right a { color: #fff; padding-right: 0.8em; } div.related ul li.right a:hover { background-color: #8ca1af; } div.body { clear: both; min-width: 0; word-wrap: break-word; } div.bodywrapper { margin: 0 0 0 0; } div.sphinxsidebar { float: none; margin: 0; width: auto; } div.sphinxsidebar input[type="text"] { height: 2em; line-height: 2em; width: 70%; } div.sphinxsidebar input[type="submit"] { height: 2em; margin-left: 0.5em; width: 20%; } div.sphinxsidebar p.searchtip { background: inherit; margin-bottom: 1em; } div.sphinxsidebar ul li, div.sphinxsidebar p.topless { white-space: normal; } .bodywrapper img { display: block; margin-left: auto; margin-right: auto; max-width: 100%; } div.documentwrapper { float: none; } div.admonition, div.warning, pre, blockquote { margin-left: 0em; margin-right: 0em; } .body p img { margin: 0; } #searchbox { background: transparent; } .related:not(:first-child) li { display: none; } .related:not(:first-child) li.right { display: block; } div.footer { padding: 1em; } .rtd_doc_footer .badge { float: none; margin: 1em auto; position: static; } .rtd_doc_footer .badge.revsys-inline { margin-right: auto; margin-bottom: 2em; } table.indextable { display: block; width: auto; } .indextable tr { display: block; } .indextable td { display: block; padding: 0; width: auto !important; } .indextable td dt { margin: 1em 0; } ul.search { margin-left: 0.25em; } ul.search li div.context { font-size: 90%; line-height: 1.1; margin-bottom: 1; margin-left: 0; } } invoke-2.2.0/sites/docs/api/000077500000000000000000000000001445356551000156465ustar00rootroot00000000000000invoke-2.2.0/sites/docs/api/__init__.rst000066400000000000000000000000771445356551000201430ustar00rootroot00000000000000============ ``__init__`` ============ .. automodule:: invoke invoke-2.2.0/sites/docs/api/collection.rst000066400000000000000000000002341445356551000205320ustar00rootroot00000000000000============== ``collection`` ============== .. autoclass:: invoke.collection.Collection :special-members: :exclude-members: __weakref__, __init__ invoke-2.2.0/sites/docs/api/config.rst000066400000000000000000000001001445356551000176340ustar00rootroot00000000000000========== ``config`` ========== .. automodule:: invoke.config invoke-2.2.0/sites/docs/api/context.rst000066400000000000000000000001041445356551000200570ustar00rootroot00000000000000=========== ``context`` =========== .. automodule:: invoke.context invoke-2.2.0/sites/docs/api/exceptions.rst000066400000000000000000000001201445356551000205520ustar00rootroot00000000000000============== ``exceptions`` ============== .. automodule:: invoke.exceptions invoke-2.2.0/sites/docs/api/executor.rst000066400000000000000000000001201445356551000202270ustar00rootroot00000000000000============ ``executor`` ============ .. autoclass:: invoke.executor.Executor invoke-2.2.0/sites/docs/api/loader.rst000066400000000000000000000003451445356551000176500ustar00rootroot00000000000000========== ``loader`` ========== .. autoclass:: invoke.loader.Loader :special-members: :exclude-members: __weakref__ .. autoclass:: invoke.loader.FilesystemLoader :special-members: :exclude-members: __weakref__ invoke-2.2.0/sites/docs/api/parser.rst000066400000000000000000000006211445356551000176730ustar00rootroot00000000000000========== ``parser`` ========== The command-line parsing framework is split up into a handful of sub-modules: - ``parser.argument`` - ``parser.context`` (not to be confused with the top level ``context``!) - ``parser.parser`` API docs for all are below. .. automodule:: invoke.parser.parser :member-order: bysource .. automodule:: invoke.parser.context .. automodule:: invoke.parser.argument invoke-2.2.0/sites/docs/api/program.rst000066400000000000000000000001041445356551000200420ustar00rootroot00000000000000=========== ``program`` =========== .. automodule:: invoke.program invoke-2.2.0/sites/docs/api/runners.rst000066400000000000000000000001401445356551000200670ustar00rootroot00000000000000=========== ``runners`` =========== .. automodule:: invoke.runners :member-order: bysource invoke-2.2.0/sites/docs/api/tasks.rst000066400000000000000000000000741445356551000175260ustar00rootroot00000000000000========= ``tasks`` ========= .. automodule:: invoke.tasks invoke-2.2.0/sites/docs/api/terminals.rst000066400000000000000000000001141445356551000203720ustar00rootroot00000000000000============= ``terminals`` ============= .. automodule:: invoke.terminals invoke-2.2.0/sites/docs/api/util.rst000066400000000000000000000002351445356551000173550ustar00rootroot00000000000000======== ``util`` ======== .. automodule:: invoke.util :exclude-members: ExceptionWrapper .. autoclass:: invoke.util.ExceptionWrapper :no-members: invoke-2.2.0/sites/docs/api/watchers.rst000066400000000000000000000001101445356551000202100ustar00rootroot00000000000000============ ``watchers`` ============ .. automodule:: invoke.watchers invoke-2.2.0/sites/docs/concepts/000077500000000000000000000000001445356551000167135ustar00rootroot00000000000000invoke-2.2.0/sites/docs/concepts/configuration.rst000066400000000000000000000424451445356551000223250ustar00rootroot00000000000000.. _configuration: ============= Configuration ============= Introduction ============ Invoke offers a multifaceted configuration mechanism allowing you to configure both core behavior and that of your tasks, via a hierarchy of configuration files, environment variables, :doc:`task namespaces ` and CLI flags. The end result of configuration seeking, loading, parsing & merging is a `.Config` object, which behaves like a (nested) Python dictionary. Invoke references this object when it runs (determining the default behavior of methods like `.Context.run`) and exposes it to users' tasks as `.Context.config` or as shorthand attribute access on the `.Context` itself. .. _config-hierarchy: The configuration hierarchy =========================== In brief, the order in which configuration values override one another is as follows: #. **Internal default values** for behaviors which are controllable via configuration. See :ref:`default-values` for details. #. **Collection-driven configurations** defined in tasks modules via `.Collection.configure`. (See :ref:`collection-configuration` below for details.) - Sub-collections' configurations get merged into the top level collection and the final result forms the basis of the overall configuration setup. #. **System-level configuration file** stored in ``/etc/``, such as ``/etc/invoke.yaml``. (See :ref:`config-files` for details on this and the other config-file entries.) #. **User-level configuration file** found in the running user's home directory, e.g. ``~/.invoke.yaml``. #. **Project-level configuration file** living next to your top level ``tasks.py``. For example, if your run of Invoke loads ``/home/user/myproject/tasks.py`` (see our docs on :doc:`the load process `), this might be ``/home/user/myproject/invoke.yaml``. #. **Environment variables** found in the invoking shell environment. - These aren't as strongly hierarchical as the rest, nor is the shell environment namespace owned wholly by Invoke, so we must rely on slightly verbose prefixing instead - see :ref:`env-vars` for details. #. **Runtime configuration file** whose path is given to :option:`-f`, e.g. ``inv -f /random/path/to/config_file.yaml``. This path may also be set via the ``INVOKE_RUNTIME_CONFIG`` env var. #. **Command-line flags** for certain core settings, such as :option:`-e`. #. **Modifications made by user code** at runtime. .. _default-values: Default configuration values ============================ Below is a list of all the configuration values and/or section Invoke itself uses to control behaviors such as `.Context.run`'s ``echo`` and ``pty`` flags, task deduplication, and so forth. .. note:: The storage location for these values is inside the `.Config` class, specifically as the return value of `.Config.global_defaults`; see its API docs for more details. For convenience, we refer to nested setting names with a dotted syntax, so e.g. ``foo.bar`` refers to what would be (in a Python config context) ``{'foo': {'bar': }}``. Typically, these can be read or set on `.Config` and `.Context` objects using attribute syntax, which looks nearly identical: ``c.foo.bar``. - The ``tasks`` config tree holds settings relating to task execution. - ``tasks.dedupe`` controls :ref:`deduping` and defaults to ``True``. It can also be overridden at runtime via :option:`--no-dedupe`. - ``tasks.auto_dash_names`` controls whether task and collection names have underscores turned to dashes on the CLI. Default: ``True``. See also :ref:`dashes-vs-underscores`. - ``tasks.collection_name`` controls the Python import name sought out by :ref:`collection discovery `, and defaults to ``"tasks"``. - ``tasks.executor_class`` allows users to override the class instantiated and used for task execution. Must be a fully-qualified dotted path of the form ``module(.submodule...).class``, where all but ``.class`` will be handed to `importlib.import_module`, and ``class`` is expected to be an attribute on that resulting module object. Defaults to ``None``, meaning to use the running `.Program` object's ``executor_class`` attribute. .. warning:: Take care if using this setting in tandem with :ref:`custom program binaries `, since custom programs may specify their own default executor class (which your use of this setting will override!) and assume certain behaviors stemming from that. - ``tasks.ignore_unknown_help`` (default: ``False``) lets users disable "help keys were supplied for nonexistent arguments" errors. Normally, Invoke assumes such a situation implies a typo in the ``help`` argument to ``@task``, but sometimes users have good reasons for this. - ``tasks.search_root`` allows overriding the default :ref:`collection discovery ` root search location. It defaults to ``None``, which indicates to use the executing process' current working directory. - The ``run`` tree controls the behavior of `.Runner.run`. Each member of this tree (such as ``run.echo`` or ``run.pty``) maps directly to a `.Runner.run` keyword argument of the same name; see that method's docstring for details on what these settings do & what their default values are. - The ``runners`` tree controls _which_ runner classes map to which execution contexts; if you're using Invoke by itself, this will only tend to have a single member, ``runners.local``. Client libraries may extend it with additional key/value pairs, such as ``runners.remote``. - The ``sudo`` tree controls the behavior of `.Context.sudo`: - ``sudo.password`` controls the autoresponse password submitted to sudo's password prompt. Default: ``None``. .. warning:: While it's possible to store this setting, like any other, in :doc:`configuration files ` -- doing so is inherently insecure. We highly recommend filling this config value in at runtime from a secrets management system of some kind. - ``sudo.prompt`` holds the sudo password prompt text, which is both supplied to ``sudo -p``, and searched for when performing :doc:`auto-response `. Default: ``[sudo] password:``. - A top level config setting, ``debug``, controls whether debug-level output is logged; it defaults to ``False``. ``debug`` can be toggled via the :option:`-d` CLI flag, which enables debugging after CLI parsing runs. It can also be toggled via the ``INVOKE_DEBUG`` environment variable which - unlike regular env vars - is honored from the start of execution and is thus useful for troubleshooting parsing and/or config loading. - A small config tree, ``timeouts``, holds various kinds of timeout controls. At present, for Invoke, this only holds a ``command`` subkey, which controls subprocess execution timeouts. - Client code often adds more to this tree, and Invoke itself may add more in the future as well. .. _config-files: Configuration files =================== Loading ------- For each configuration file location mentioned in the previous section, we search for files ending in ``.yaml``, ``.yml``, ``.json`` or ``.py`` (**in that order!**), load the first one we find, and ignore any others that might exist. For example, if Invoke is run on a system containing both ``/etc/invoke.yml`` *and* ``/etc/invoke.json``, **only the YAML file will be loaded**. This helps keep things simple, both conceptually and in the implementation. Format ------ Invoke's configuration allows arbitrary nesting, and thus so do our config file formats. All three of the below examples result in a configuration equivalent to ``{'debug': True, 'run': {'echo': True}}``: - **YAML** .. code-block:: yaml debug: true run: echo: true - **JSON** .. code-block:: javascript { "debug": true, "run": { "echo": true } } - **Python**:: debug = True run = { "echo": True } For further details, see these languages' own documentation. .. _env-vars: Environment variables ===================== Environment variables are a bit different from other configuration-setting methods, since they don't provide a clean way to nest configuration keys, and are also implicitly shared amongst the entire system's installed application base. In addition, due to implementation concerns, env vars must be pre-determined by the levels below them in the config hierarchy (in other words - env vars may only be used to override existing config values). If you need Invoke to understand a ``FOOBAR`` environment variable, you must first declare a ``foobar`` setting in a configuration file or in your task collections. Basic rules ----------- To mitigate the shell namespace problem, we simply prefix all our env vars with ``INVOKE_``. Nesting is performed via underscore separation, so a setting that looks like e.g. ``{'run': {'echo': True}}`` at the Python level becomes ``INVOKE_RUN_ECHO=1`` in a typical shell. See :ref:`env-var-nesting` below for more on this. Type casting ------------ Since env vars can only be used to override existing settings, the previous value of a given setting is used as a guide in casting the strings we get back from the shell: - If the current value is a Unicode string, it is replaced with the value from the environment, with no casting whatsoever; - If the current value is ``None``, it too is replaced with the string from the environment; - Booleans are set as follows: ``0`` and the empty value/string (e.g. ``SETTING=``, or ``unset SETTING``, or etc) evaluate to ``False``, and any other value evaluates to ``True``. - Lists and tuples are currently unsupported and will raise an exception; - In the future we may implement convenience transformations, such as splitting on commas to form a list; however since users can always perform such operations themselves, it may not be a high priority. - All other types - integers, longs, floats, etc - are simply used as constructors for the incoming value. - For example, a ``foobar`` setting whose default value is the integer ``1`` will run all env var inputs through `int`, and thus ``FOOBAR=5`` will result in the Python value ``5``, not ``"5"``. .. _env-var-nesting: Nesting vs underscored names ---------------------------- Since environment variable keys are single strings, we must use some form of string parsing to allow access to nested configuration settings. As mentioned above, in basic use cases this just means using an underscore character: ``{'run': {'echo': True}}`` becomes ``INVOKE_RUN_ECHO=1``. However, ambiguity is introduced when the settings names themselves contain underscores: is ``INVOKE_FOO_BAR=baz`` equivalent to ``{'foo': {'bar': 'baz'}}``, or to ``{'foo_bar': 'baz'}``? Thankfully, because env vars can only be used to modify settings declared at the Python level or in config files, we look at the current state of the config to determine the answer. There is still a corner case where *both* possible interpretations exist as valid config paths (e.g. ``{'foo': {'bar': 'default'}, 'foo_bar': 'otherdefault'}``). In this situation, we honor the `Zen of Python `_ and refuse to guess; an error is raised instead, counseling users to modify their configuration layout or avoid using env vars for the setting in question. .. _collection-configuration: `.Collection`-based configuration ================================= `.Collection` objects may contain a config mapping, set via `.Collection.configure`, and (as per :ref:`the hierarchy `) this typically forms the lowest level of configuration in the system. When collections are :doc:`nested `, configuration is merged 'downwards' by default: when conflicts arise, outer namespaces closer to the root will win, versus inner ones closer to the task being invoked. .. note:: 'Inner' tasks here are specifically those on the path from the root to the one housing the invoked task. 'Sibling' subcollections are ignored. A quick example of what this means:: from invoke import Collection, task # This task & collection could just as easily come from # another module somewhere. @task def mytask(c): print(c['conflicted']) inner = Collection('inner', mytask) inner.configure({'conflicted': 'default value'}) # Our project's root namespace. ns = Collection(inner) ns.configure({'conflicted': 'override value'}) The result of calling ``inner.mytask``:: $ inv inner.mytask override value Example of real-world config use ================================ The previous sections had small examples within them; this section provides a more realistic-looking set of examples showing how the config system works. Setup ----- We'll start out with semi-realistic tasks that hardcode their values, and build up to using the various configuration mechanisms. A small module for building `Sphinx `_ docs might begin like this:: from invoke import task @task def clean(c): c.run("rm -rf docs/_build") @task def build(c): c.run("sphinx-build docs docs/_build") Then maybe you refactor the build target:: target = "docs/_build" @task def clean(c): c.run("rm -rf {}".format(target)) @task def build(c): c.run("sphinx-build docs {}".format(target)) We can also allow runtime parameterization:: default_target = "docs/_build" @task def clean(c, target=default_target): c.run("rm -rf {}".format(target)) @task def build(c, target=default_target): c.run("sphinx-build docs {}".format(target)) This task module works for a single set of users, but what if we want to allow reuse? Somebody may want to use this module with a different default target. Using the configuration data (made available via the context arg) to configure these settings is usually the better solution [1]_. Configuring via task collection ------------------------------- The configuration `setting <.Collection.configure>` and `getting <.Context.config>` APIs enable moving otherwise 'hardcoded' default values into a config structure which downstream users are free to redefine. Let's apply this to our example. First we add an explicit namespace object:: from invoke import Collection, task default_target = "docs/_build" @task def clean(c, target=default_target): c.run("rm -rf {}".format(target)) @task def build(c, target=default_target): c.run("sphinx-build docs {}".format(target)) ns = Collection(clean, build) Then we can move the default build target value into the collection's default configuration, and refer to it via the context. At this point we also change our kwarg default value to be ``None`` so we can determine whether or not a runtime value was given. The result:: @task def clean(c, target=None): if target is None: target = c.sphinx.target c.run("rm -rf {}".format(target)) @task def build(c, target=None): if target is None: target = c.sphinx.target c.run("sphinx-build docs {}".format(target)) ns = Collection(clean, build) ns.configure({'sphinx': {'target': "docs/_build"}}) The result isn't significantly more complex than what we began with, and as we'll see next, it's now trivial for users to override your defaults in various ways. Configuration overriding ------------------------ The lowest-level override is, of course, just modifying the local `.Collection` tree into which a distributed module has been imported. E.g. if the above module is distributed as ``myproject.docs``, someone can define a ``tasks.py`` that does this:: from invoke import Collection, task from myproject import docs @task def mylocaltask(c): # Some local stuff goes here pass # Add 'docs' to our local root namespace, plus our own task ns = Collection(mylocaltask, docs) And then they can add this to the bottom:: # Our docs live in 'built_docs', not 'docs/_build' ns.configure({'sphinx': {'target': "built_docs"}}) Now we have a ``docs`` sub-namespace whose build target defaults to ``built_docs`` instead of ``docs/_build``. Runtime users can still override this via flags (e.g. ``inv docs.build --target='some/other/dir'``) just as before. If you prefer configuration files over in-Python tweaking of your namespace tree, that works just as well; instead of adding the line above to the previous snippet, instead drop this into a file next to ``tasks.py`` named ``invoke.yaml``:: sphinx: target: built_docs For this example, that sort of local-to-project conf file makes the most sense, but don't forget that the :ref:`config hierarchy ` offers additional configuration methods which may be suitable depending on your needs. .. rubric:: Footnotes .. [1] Copying and modifying the file breaks code reuse; overriding the module-level ``default_path`` variable won't play well with concurrency; wrapping the tasks with different default arguments works but is fragile and adds boilerplate. invoke-2.2.0/sites/docs/concepts/invoking-tasks.rst000066400000000000000000000405011445356551000224140ustar00rootroot00000000000000.. _invoking-tasks: ============== Invoking tasks ============== This page explains how to invoke your tasks on the CLI, both in terms of parser mechanics (how your tasks' arguments are exposed as command-line options) and execution strategies (which tasks actually get run, and in what order). (For details on Invoke's core flags and options, see :doc:`/invoke`.) .. contents:: :local: .. _basic-cli-layout: Basic command line layout ========================= Invoke may be executed as ``invoke`` (or ``inv`` for short) and its command line layout looks like this:: $ inv [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts] Put plainly, Invoke's `CLI parser <.Parser>` splits your command line up into multiple "`parser contexts <.ParserContext>`" which allows it to reason about the args and options it will accept: - Before any task names are given, the parser is in the "core" parse context, and looks for core options and flags such as :option:`--echo`, :option:`--list` or :option:`--help`. - Any non-argument-like token (such as ``mytask``) causes a switch into a per-task context (or an error, if no task matching that name seems to exist in the :doc:`loaded collection `). - At this point, argument-like tokens are expected to correspond to the arguments for the previously named task (see :ref:`task-arguments`). - Then this cycle repeats infinitely, allowing chained execution of arbitrary numbers of tasks. (In practice, most users only execute one or two at a time.) For the core arguments and flags, see :doc:`/invoke`; for details on how your tasks affect the CLI, read onwards. .. note:: There is a minor convenience-minded exception to how parse contexts behave: core options *may* also be given inside per-task contexts, *if and only if* there is no conflict with similarly-named/prefixed arguments of the being-parsed task. For example, ``invoke mytask --echo`` will behave identically to ``invoke --echo mytask``, *unless* ``mytask`` has its own ``echo`` flag (in which case that flag is handed to the task context, as normal). Similarly, ``invoke mytask -e`` will turn on command echoing too, unless ``mytask`` has its own argument whose shortflag ends up set to ``-e`` (e.g. ``def mytask(env)``). .. _task-arguments: Task command-line arguments =========================== The simplest task invocation, for a task requiring no parameterization:: $ inv mytask Tasks may take parameters in the form of flag arguments:: $ inv build --format=html $ inv build --format html $ inv build -f pdf $ inv build -f=pdf Note that both long and short style flags are supported, and that equals signs are optional in both cases. Boolean options are simple flags with no arguments:: $ inv build --progress-bar Naturally, more than one flag may be given at a time:: $ inv build --progress-bar -f pdf Type casting ------------ Natively, a command-line string is just that -- a string -- requiring some leaps of logic to arrive at any non-string values on the Python end. Invoke has a number of these tricks already at hand, and more will be implemented in the future. Currently: - Arguments with default values use those default values as a type hint, so ``def mytask(c, count=1)`` will see ``inv mytask --count=5`` and result in the Python integer value ``5`` instead of the string ``"5"``. - Default values of ``None`` are effectively the same as having no default value at all - no type casting occurs and you're left with a string. - The primary exception to the previous rule is booleans: default values of ``True`` or ``False`` cause those arguments to show up as actual non-value-taking flags (``--argname`` to set the value to ``True`` if the default was ``False``, or ``--no-argment`` in the opposite case). See :ref:`boolean-flags` for more examples. - List values (which you wouldn't want to set as an argument's default value anyways -- it's a common Python misstep) are served by a special ``@task`` flag - see :ref:`iterable-flag-values` below. - There's currently no way to set other compound values (such as dicts) on the command-line; solving this more complex problem is left as an exercise to the reader (though we may add helpers for such things in the future). Per-task help / printing available flags ---------------------------------------- To get help for a specific task, you can give the task name as an argument to the core ``--help``/``-h`` option, or give ``--help``/``-h`` after the task (which will trigger custom-to-``help`` behavior where the task name itself is given to ``--help`` as its argument value). When help is requested, you'll see the task's docstring (if any) and per-argument/flag help output:: $ inv --help build # or invoke build --help Docstring: none Options for 'build': -f STRING, --format=STRING Which build format type to use -p, --progress-bar Display progress bar Globbed short flags ------------------- Boolean short flags may be combined into one flag expression, so that e.g.:: $ inv build -qv is equivalent to (and expanded into, during parsing):: $ inv build -q -v If the first flag in a globbed short flag token is not a boolean but takes a value, the rest of the glob is taken to be the value instead. E.g.:: $ inv build -fpdf is expanded into:: $ inv build -f pdf and **not**:: $ inv build -f -p -d -f .. _optional-values: Optional flag values -------------------- You saw a hint of this with ``--help`` specifically, but non-core options may also take optional values, if declared as ``optional``. For example, say your task has a ``--log`` flag that activates logging:: $ inv compile --log but you also want it to be configurable regarding *where* to log:: $ inv compile --log=foo.log You could implement this with an additional argument (e.g. ``--log`` and ``--log-location``) but sometimes the concise API is the more useful one. To enable this, specify which arguments are of this 'hybrid' optional-value type inside ``@task``:: @task(optional=['log']) def compile(c, log=None): if log: log_file = '/var/log/my.log' # Value was given, vs just-True if isinstance(log, str): log_file = log # Replace w/ your actual log setup... set_log_destination(log_file) # Do things that might log here... When optional flag values are used, the values seen post-parse follow these rules: * If the flag is not given at all (``invoke compile``) the default value is filled in as normal. * If it is given with a value (``invoke compile --log=foo.log``) then the value is stored normally. * If the flag is given with no value (``invoke compile --log``), it is treated as if it were a ``bool`` and set to ``True``. Resolving ambiguity ~~~~~~~~~~~~~~~~~~~ There are a number of situations where ambiguity could arise for a flag that takes an optional value: * When a task takes positional arguments and they haven't all been filled in by the time the parser arrives at the optional-value flag; * When the token following one of these flags looks like it is itself a flag; or * When that token has the same name as another task. In most of these situations, Invoke's parser will `refuse the temptation to guess `_ and raise an error. However, in the case where the ambiguous token is flag-like, the current parse context is checked to resolve the ambiguity: - If the token is an otherwise legitimate argument, it is assumed that the user meant to give that argument immediately after the current one, and no optional value is set. - E.g. in ``invoke compile --log --verbose`` (assuming ``--verbose`` is another legit argument for ``compile``) the parser decides the user meant to give ``--log`` without a value, and followed it up with the ``--verbose`` flag. - Otherwise, the token is interpreted literally and stored as the value for the current flag. - E.g. if ``--verbose`` is *not* a legitimate argument for ``compile``, then ``invoke compile --log --verbose`` causes the parser to assign ``"--verbose"`` as the value given to ``--log``. (This will probably cause other problems in our contrived use case, but it illustrates our point.) .. _iterable-flag-values: Iterable flag values -------------------- A not-uncommon use case for CLI programs is the desire to build a list of values for a given option, instead of a single value. While this *can* be done via sub-string parsing -- e.g. having users invoke a command with ``--mylist item1,item2,item3`` and splitting on the comma -- it's often preferable to specify the option multiple times and store the values in a list (instead of overwriting or erroring.) In Invoke, this is enabled by hinting to the parser that one or more task arguments are ``iterable`` in nature (similar to how one specifies ``optional`` or ``positional``):: @task(iterable=['my_list']) def mytask(c, my_list): print(my_list) When not given at all, the default value for ``my_list`` will be an empty list; otherwise, the result is a list, appending each value seen, in order, without any other manipulation (so no deduplication, etc):: $ inv mytask [] $ inv mytask --my-list foo ['foo'] $ inv mytask --my-list foo --my-list bar ['foo', 'bar'] $ inv mytask --my-list foo --my-list bar --my-list foo ['foo', 'bar', 'foo'] .. _incrementable-flag-values: Incrementable flag values ------------------------- This is arguably a sub-case of :ref:`iterable flag values ` (seen above) - it has the same core interface of "give a CLI argument multiple times, and have that do something other than error or overwrite a single value." However, 'incrementables' (as you may have guessed) increment an integer instead of building a list of strings. This is commonly found in verbosity flags and similar functionality. An example of exactly that:: @task(incrementable=['verbose']) def mytask(c, verbose=0): print(verbose) And its use:: $ inv mytask 0 $ inv mytask --verbose 1 $ inv mytask -v 1 $inv mytask -vvv 3 Happily, because in Python ``0`` is 'falsey' and ``1`` (or any other number) is 'truthy', this functions a lot like a boolean flag as well, at least if one defaults it to ``0``. .. note:: You may supply any integer default value for such arguments (it simply serves as the starting value), but take care that consumers of the argument are written understanding that it is always going to appear 'truthy' unless it's ``0``! Dashes vs underscores in flag names ----------------------------------- In Python, it's common to use ``underscored_names`` for keyword arguments, e.g.:: @task def mytask(c, my_option=False): pass However, the typical convention for command-line flags is dashes, which aren't valid in Python identifiers:: $ inv mytask --my-option Invoke works around this by automatically generating dashed versions of underscored names, when it turns your task function signatures into command-line parser flags. Therefore, the two examples above actually work fine together -- ``my_option`` ends up mapping to ``--my-option``. In addition, leading (``_myopt``) and trailing (``myopt_``) underscores are ignored, since ``invoke ---myopt`` and ``invoke --myopt-`` don't make much sense. .. _boolean-flags: Automatic Boolean inverse flags ------------------------------- Boolean flags tend to work best when setting something that is normally ``False``, to ``True``:: $ inv mytask --yes-please-do-x However, in some cases, you want the opposite - a default of ``True``, which can be easily disabled. For example, colored output:: @task def run_tests(c, color=True): # ... Here, what we really want on the command line is a ``--no-color`` flag that sets ``color=False``. Invoke handles this for you: when setting up CLI flags, booleans which default to ``True`` generate a ``--no-`` flag instead. .. _how-tasks-run: How tasks run ============= Base case --------- In the simplest case, a task with no pre- or post-tasks runs one time. Example:: @task def hello(c): print("Hello, world!") Execution:: $ inv hello Hello, world! .. _pre-post-tasks: Pre- and post-tasks ------------------- Tasks that should always have another task executed before or after them, may use the ``@task`` deocator's ``pre`` and/or ``post`` kwargs, like so:: @task def clean(c): print("Cleaning") @task def publish(c): print("Publishing") @task(pre=[clean], post=[publish]) def build(c): print("Building") Execution:: $ inv build Cleaning Building Publishing These keyword arguments always take iterables. As a convenience, pre-tasks (and pre-tasks only) may be given as positional arguments, in a manner similar to build systems like ``make``. E.g. we could present part of the above example as:: @task def clean(c): print("Cleaning") @task(clean) def build(c): print("Building") As before, ``invoke build`` would cause ``clean`` to run, then ``build``. Recursive/chained pre/post-tasks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pre-tasks of pre-tasks will also be invoked (as will post-tasks of pre-tasks, pre-tasks of post-tasks, etc) in a depth-first manner, recursively. Here's a more complex (if slightly contrived) tasks file:: @task def clean_html(c): print("Cleaning HTML") @task def clean_tgz(c): print("Cleaning .tar.gz files") @task(clean_html, clean_tgz) def clean(c): print("Cleaned everything") @task def makedirs(c): print("Making directories") @task(clean, makedirs) def build(c): print("Building") @task(build) def deploy(c): print("Deploying") With a depth-first behavior, the below is hopefully intuitive to most users:: $ inv deploy Cleaning HTML Cleaning .tar.gz files Cleaned everything Making directories Building Deploying .. _parameterizing-pre-post-tasks: Parameterizing pre/post-tasks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, pre- and post-tasks are executed with no arguments, even if the task triggering their execution was given some. When this is not suitable, you can wrap the task objects with `~.tasks.call` objects which allow you to specify a call signature:: @task def clean(c, which=None): which = which or 'pyc' print("Cleaning {}".format(which)) @task(pre=[call(clean, which='all')]) # or call(clean, 'all') def build(c): print("Building") Example output:: $ inv build Cleaning all Building .. _deduping: Task deduplication ------------------ By default, any task that would run more than once during a session (due e.g. to inclusion in pre/post tasks), will only be run once. Example task file:: @task def clean(c): print("Cleaning") @task(clean) def build(c): print("Building") @task(build) def package(c): print("Packaging") With deduplication turned off (see below), the above would execute ``clean`` -> ``build`` -> ``build`` again -> ``package``. With deduplication, the double ``build`` does not occur:: $ inv build package Cleaning Building Packaging .. note:: Parameterized pre-tasks (using `~.tasks.call`) are deduped based on their argument lists. For example, if ``clean`` was parameterized and hooked up as a pre-task in two different ways - e.g. ``call(clean, 'html')`` and ``call(clean, 'all')`` - they would not get deduped should both end up running in the same session. However, two separate references to ``call(clean, 'html')`` *would* become deduplicated. Disabling deduplication ~~~~~~~~~~~~~~~~~~~~~~~ If you prefer your tasks to run every time no matter what, you can give the ``--no-dedupe`` core CLI option at runtime, or set the ``tasks.dedupe`` :doc:`config setting ` to ``False``. While it doesn't make a ton of real-world sense, let's imagine we wanted to apply ``--no-dedupe`` to the above example; we'd see the following output:: $ inv --no-dedupe build package Cleaning Building Building Packaging The build step is now running twice. invoke-2.2.0/sites/docs/concepts/library.rst000066400000000000000000000220221445356551000211070ustar00rootroot00000000000000========================= Using Invoke as a library ========================= While most of our documentation involves the user/CLI facing use cases of task management and command execution, Invoke was designed for its constituent parts to be usable independently by advanced users - either out of the box or with a minimum of extra work. CLI parsing, subprocess command execution, task organization, etc, are all written as broadly separated concerns. This document outlines use cases already known to work (because downstream tools like `Fabric `_ are already utilizing them). .. _reusing-as-a-binary: Reusing Invoke's CLI module as a distinct binary ================================================ A major use case is distribution of your own program using Invoke under the hood, bound to a different binary name, and usually setting a specific task :doc:`namespace ` as the default. (This maps somewhat closely to things like ``argparse`` from the standard library.) In some cases, removing, replacing and/or adding core CLI flags is also desired. Getting set up -------------- Say you want to distribute a test runner called ``tester`` offering two subcommands, ``unit`` and ``integration``, such that users could ``pip install tester`` and have access to commands like ``tester unit``, ``tester integration``, or ``tester integration --fail-fast``. First, as with any distinct Python package providing CLI 'binaries', you'd inform your ``setup.py`` of your entrypoint:: setup( name='tester', version='0.1.0', packages=['tester'], install_requires=['invoke'], entry_points={ 'console_scripts': ['tester = tester.main:program.run'] } ) .. note:: This is just an example snippet and is not a fully valid ``setup.py``; if you don't know how Python packaging works, a good starting place is `the Python Packaging User's Guide `_. Nothing here is specific to Invoke - it's a standard way of telling Python to install a ``tester`` script that executes the ``run`` method of a ``program`` object defined inside the module ``tester.main``. Creating a ``Program`` ---------------------- In our ``tester/main.py``, we start out importing Invoke's public CLI functionality:: from invoke import Program Then we define the ``program`` object we referenced in ``setup.py``, which is a simple `.Program` to do the heavy lifting, giving it our version number for starters:: program = Program(version='0.1.0') At this point, installing ``tester`` would give you the same functionality as Invoke's :doc:`built-in CLI tool `, except named ``tester`` and exposing its own version number:: $ tester --version Tester 0.1.0 $ tester --help Usage: tester [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts] Core options: ... core Invoke options here ... $ tester --list Can't find any collection named 'tasks'! This doesn't do us much good yet - there aren't any subcommands (and our users don't care about arbitrary 'tasks', so Invoke's own default ``--help`` and ``--list`` output isn't a good fit). Specifying subcommands ---------------------- For ``tester`` to expose ``unit`` and ``integration`` subcommands, we need to define them, in a regular Invoke tasks module or :doc:`namespace `. For our example, we'll just create ``tester/tasks.py`` (but as you'll see in a moment, this too is arbitrary and can be whatever you like):: from invoke import task @task def unit(c): print("Running unit tests!") @task def integration(c): print("Running integration tests!") As described in :doc:`/concepts/namespaces`, you can arrange this module however you want - the above snippet uses an implicit namespace for brevity's sake. .. note:: It's important to realize that there's nothing special about these "subcommands" - you could run them just as easily with vanilla Invoke, e.g. via ``invoke --collection=tester.tasks --list``. Now the useful part: telling our custom `.Program` that this namespace of tasks should be used as the subcommands for ``tester``, via the ``namespace`` kwarg:: from invoke import Collection, Program from tester import tasks program = Program(namespace=Collection.from_module(tasks), version='0.1.0') The result? :: $ tester --version Tester 0.1.0 $ tester --help Usage: tester [--core-opts] [--subcommand-opts] ... Core options: ... core options here, minus task-related ones ... Subcommands: unit integration $ tester --list No idea what '--list' is! $ tester unit Running unit tests! Notice how the 'usage' line changed (to specify 'subcommands' instead of 'tasks'); the list of specific subcommands is now printed as part of ``--help``; and ``--list`` has been removed from the options. You can enable :ref:`tab-completion` for your distinct binary and subcommands. Modifying core parser arguments ------------------------------- A common need for this use case is tweaking the core parser arguments. `.Program` makes it easy: default core `Arguments <.Argument>` are returned by `.Program.core_args`. Extend this method's return value with ``super`` and you're done:: # Presumably, this is your setup.py-designated CLI module... from invoke import Program, Argument class MyProgram(Program): def core_args(self): core_args = super().core_args() extra_args = [ Argument(names=('foo', 'f'), help="Foo the bars"), # ... ] return core_args + extra_args program = MyProgram() .. warning:: We don't recommend *omitting* any of the existing core arguments; a lot of basic functionality relies on their existence, even when left to default values. .. _customizing-config-defaults: Customizing the configuration system's defaults =============================================== Besides the CLI-oriented content of the previous section, another area of functionality that frequently needs updating when redistributing an Invoke codebase (CLI or no CLI) is configuration. There are typically two concerns here: - Configuration filenames and the env var prefix - crucial if you ever expect your users to use the configuration system; - Default configuration values - less critical (most defaults aren't labeled with anything Invoke-specific) but still sometimes desirable. .. note:: Both of these involve subclassing `.Config` (and, if using the CLI machinery, informing your `.Program` to use that subclass instead of the default one.) Changing filenames and/or env var prefix ---------------------------------------- By default, Invoke's config system looks for files like ``/etc/invoke.yaml``, ``~/.invoke.json``, etc. If you're distributing client code named something else, like the ``Tester`` example earlier, you might instead want the config system to load ``/etc/tester.json`` or ``$CWD/tester.py``. Similarly, the environment variable config level looks for env vars like ``INVOKE_RUN_ECHO``; you might prefer ``TESTER_RUN_ECHO``. There are a few `.Config` attributes controlling these values: - ``prefix``: A generic, catchall prefix used directly as the file prefix, and used via all-caps as the env var prefix; - ``file_prefix``: For overriding just the filename prefix - otherwise, it defaults to the value of ``prefix``; - ``env_prefix``: For overriding just the env var prefix - as you might have guessed, it too defaults to the value of ``prefix``. Continuing our 'Tester' example, you'd do something like this:: from invoke import Config class TesterConfig(Config): prefix = 'tester' Or, to seek ``tester.yaml`` as before, but ``TEST_RUN_ECHO`` instead of ``TESTER_RUN_ECHO``:: class TesterConfig(Config): prefix = 'tester' env_prefix = 'TEST' Modifying default config values ------------------------------- Default config values are simple - they're just the return value of the staticmethod `.Config.global_defaults`, so override that and return whatever you like - ideally something based on the superclass' values, as many defaults are assumed to exist by the rest of the system. (The helper function `invoke.config.merge_dicts` can be useful here.) For example, say you want Tester to always echo shell commands by default when your codebase calls `.Context.run`:: from invoke import Program from invoke.config import Config, merge_dicts class TesterConfig(Config): @staticmethod def global_defaults(): their_defaults = Config.global_defaults() my_defaults = { 'run': { 'echo': True, }, } return merge_dicts(their_defaults, my_defaults) program = Program(config_class=TesterConfig, version='0.1.0') For reference, Invoke's own base defaults (the...default defaults, you could say) are documented at :ref:`default-values`. invoke-2.2.0/sites/docs/concepts/loading.rst000066400000000000000000000055451445356551000210730ustar00rootroot00000000000000.. _loading-collections: =================== Loading collections =================== The core of Invoke's execution model involves one or more Collection objects. While these may be created programmatically, in typical use Invoke will create them for you from Python modules it finds or is told to use. .. _collection-discovery: Task module discovery ===================== With no other configuration, simply calling ``invoke`` will look for a single Python module or package named ``tasks``, and will treat it as the root namespace. ``tasks`` (or any other name given via :ref:`loading configuration options `) is searched for in the following ways: * First, if a valid tasks module by that name already exists on Python's `sys.path `_, no more searching is done -- that module is selected. * Failing that, search towards the root of the local filesystem, starting with the user's current working directory (`os.getcwd `_) and try importing again with each directory temporarily added to ``sys.path``. * Due to how Python's import machinery works, this approach will always favor a package directory (``tasks/`` containing an ``__init__.py``) over a module file (``tasks.py``) in the same location. * If a candidate is found and successfully imported, its parent directory will **stay** on ``sys.path`` during the rest of the Python session -- this allows task code to make convenient assumptions concerning sibling modules' importability. Candidate modules/packages are introspected to make sure they can actually be used as valid task collections. Any that fail are discarded, the ``sys.path`` munging done to import them is reverted, and the search continues. .. _configuring-loading: Configuring the loading process =============================== You can configure the above behavior, requesting that Invoke alter the collection name searched for and/or the path where filesystem-level loading starts looking. For example, you may already have a project-level ``tasks.py`` that you can't easily rename; or you may want to host a number of tasks collections stored outside the project root and make it easy to switch between them; or any number of reasons. Both the sought collection name and the search root can be specified via :ref:`configuration file options ` or as :doc:`runtime CLI flags `: - **Change the collection name**: Set the ``tasks.collection_name`` configuration option, or use :option:`--collection`. It should be a Python module name and not a file name (so ``mytasks``, not ``mytasks.py`` or ``mytasks/``.) - **Change the root search path**: Configure ``tasks.search_root`` or use :option:`--search-root`. This value may be any valid directory path. invoke-2.2.0/sites/docs/concepts/namespaces.rst000066400000000000000000000267461445356551000216030ustar00rootroot00000000000000.. _task-namespaces: ======================= Constructing namespaces ======================= The :doc:`base case ` of loading a single module of tasks works fine initially, but advanced users typically need more organization, such as separating tasks into a tree of nested namespaces. The `.Collection` class provides an API for organizing tasks (and :ref:`their configuration `) into a tree-like structure. When referenced by strings (e.g. on the CLI or in pre/post hooks) tasks in nested namespaces use a dot-separated syntax, e.g. ``docs.build``. In this section, we show how building namespaces with this API is flexible but also allows following Python package layouts with minimal boilerplate. Starting out ============ One unnamed ``Collection`` is always the namespace root; in the implicit base case, Invoke creates one for you from the tasks in your tasks module. Create your own, named ``namespace`` or ``ns``, to set up an explicit namespace (i.e. to skip the default "pull in all Task objects" behavior):: from invoke import Collection ns = Collection() # or: namespace = Collection() Add tasks with `.Collection.add_task`. `~.Collection.add_task` can take an `.Task` object, such as those generated by the `.task` decorator:: from invoke import Collection, task @task def release(c): c.run("python setup.py sdist register upload") ns = Collection() ns.add_task(release) Our available tasks list now looks like this:: $ invoke --list Available tasks: release Naming your tasks ================= By default, a task's function name is used as its namespace identifier, but you may override this by giving a ``name`` argument to either `@task <.task>` (i.e. at definition time) or `.Collection.add_task` (i.e. at binding/attachment time). For example, say you have a variable name collision in your tasks module -- perhaps you want to expose a ``dir`` task, which shadows a Python builtin. Naming your function itself ``dir`` is a bad idea, but you can name the function something like ``dir_`` and then tell ``@task`` the "real" name:: @task(name='dir') def dir_(c): # ... On the other side, you might have obtained a task object that doesn't fit with the names you want in your namespace, and can rename it at attachment time. Maybe we want to rename our ``release`` task to be called ``deploy`` instead:: ns = Collection() ns.add_task(release, name='deploy') The result:: $ invoke --list Available tasks: deploy .. note:: The ``name`` kwarg is the 2nd argument to `~.Collection.add_task`, so those in a hurry can phrase it as:: ns.add_task(release, 'deploy') Aliases ------- Tasks may have additional names or aliases, given as the ``aliases`` keyword argument; these are appended to, instead of replacing, any implicit or explicit ``name`` value:: ns.add_task(release, aliases=('deploy', 'pypi')) Result, with three names for the same task:: $ invoke --list Available tasks: release deploy pypi .. note:: The convenience decorator `@task <.task>` is another method of setting aliases (e.g. ``@task(aliases=('foo', 'bar'))``, and is useful for ensuring a given task always has some aliases set no matter how it's added to a namespace. .. _dashes-vs-underscores: Dashes vs underscores --------------------- In the common case of functions-as-tasks, you'll often find yourself writing task names that contain underscores:: @task def my_awesome_task(c): print("Awesome!") Similar to how task arguments are processed to turn their underscores into dashes (since that's a common command-line convention) all underscores in task or collection names are interpreted to be dashes instead, by default:: $ inv --list Available tasks: my-awesome-task $ inv my-awesome-task Awesome! If you'd prefer the underscores to remain instead, you can update your configuration to set ``tasks.auto_dash_names`` to ``False`` in one of the non-runtime :ref:`config files ` (system, user, or project.) For example, in ``~/.invoke.yml``:: tasks: auto_dash_names: false .. note:: In the interests of avoiding confusion, this setting is "exclusive" in nature - underscored version of task names *are not valid* on the CLI unless ``auto_dash_names`` is disabled. (However, at the pure function level within Python, they must continue to be referenced with underscores, as dashed names are not valid Python syntax!) Nesting collections =================== The point of namespacing is to have sub-namespaces; to do this in Invoke, create additional `.Collection` instances and add them to their parent collection via `.Collection.add_collection`. For example, let's say we have a couple of documentation tasks:: @task def build_docs(c): c.run("sphinx-build docs docs/_build") @task def clean_docs(c): c.run("rm -rf docs/_build") We can bundle them up into a new, named collection like so:: docs = Collection('docs') docs.add_task(build_docs, 'build') docs.add_task(clean_docs, 'clean') And then add this new collection under the root namespace with ``add_collection``:: ns.add_collection(docs) The result (assuming for now that ``ns`` currently just contains the original ``release`` task):: $ invoke --list Available tasks: release docs.build docs.clean As with tasks, collections may be explicitly bound to their parents with a different name than they were originally given (if any) via a ``name`` kwarg (also, as with ``add_task``, the 2nd regular arg):: ns.add_collection(docs, 'sphinx') Result:: $ invoke --list Available tasks: release sphinx.build sphinx.clean Importing modules as collections ================================ A simple tactic which Invoke itself uses in the trivial, single-module case is to use `.Collection.from_module` -- a classmethod serving as an alternate ``Collection`` constructor which takes a Python module object as its first argument. Modules given to this method are scanned for ``Task`` instances, which are added to a new ``Collection``. By default, this collection's name is taken from the module name (the ``__name__`` attribute), though it can also be supplied explicitly. .. note:: As with the default task module, you can override this default loading behavior by declaring a ``ns`` or ``namespace`` `.Collection` object at top level in the loaded module. For example, let's reorganize our earlier single-file example into a Python package with several submodules. First, ``tasks/release.py``:: from invoke import task @task def release(c): c.run("python setup.py sdist register upload") And ``tasks/docs.py``:: from invoke import task @task def build(c): c.run("sphinx-build docs docs/_build") @task def clean(c): c.run("rm -rf docs/_build") Tying them together is ``tasks/__init__.py``:: from invoke import Collection import release, docs ns = Collection() ns.add_collection(Collection.from_module(release)) ns.add_collection(Collection.from_module(docs)) This form of the API is a little unwieldy in practice. Thankfully there's a shortcut: ``add_collection`` will notice when handed a module object as its first argument and call ``Collection.from_module`` for you internally:: ns = Collection() ns.add_collection(release) ns.add_collection(docs) Either way, the result:: $ invoke --list Available tasks: release.release docs.build docs.clean Default tasks ============= Tasks may be declared as the default task to invoke for the collection they belong to, e.g. by giving ``default=True`` to `@task <.task>` (or to `.Collection.add_task`.) This is useful when you have a bunch of related tasks in a namespace but one of them is the most commonly used, and maps well to the namespace as a whole. For example, in the documentation submodule we've been experimenting with so far, the ``build`` task makes sense as a default, so we can say things like ``invoke docs`` as a shortcut to ``invoke docs.build``. This is easy to do:: @task(default=True) def build(c): # ... When imported into the root namespace (as shown above) this alters the output of ``--list``, highlighting the fact that ``docs.build`` can be invoked as ``docs`` if desired:: $ invoke --list Available tasks: release.release docs.build (docs) docs.clean Default subcollections ---------------------- As of version 1.5, this functionality is also extended to subcollections: a subcollection can be specified as the default when being added to its parent collection, and that subcollection's own default task (or sub-subcollection!) will be invoked as the default for the parent. An example probably makes that clearer. Here's a tiny inline task tree with two subcollections, each with their own default task:: from invoke import Collection, task @task(default=True) def build_all(c): print("build ALL THE THINGS!") @task def build_wheel(c): print("Just the wheel") build = Collection(all=build_all, wheel=build_wheel) @task(default=True) def build_docs(c): print("Code without docs is no code at all") docs = Collection(build_docs) Then we tie those into one top level collection, setting the ``build`` subcollection as the overall default:: ns = Collection() ns.add_collection(build, default=True) ns.add_collection(docs) The result is that ``build.all`` becomes the absolute default task:: $ invoke build ALL THE THINGS! Mix and match ============= You're not limited to the specific tactics shown above -- now that you know the basic tools of ``add_task`` and ``add_collection``, use whatever approach best fits your needs. For example, let's say you wanted to keep things organized into submodules, but wanted to "promote" ``release.release`` back to the top level for convenience's sake. Just because it's stored in a module doesn't mean we must use ``add_collection`` -- we could instead import the task itself and use ``add_task`` directly:: from invoke import Collection import docs from release import release ns = Collection() ns.add_collection(docs) ns.add_task(release) Result:: $ invoke --list Available tasks: release docs.build docs.clean More shortcuts ============== Finally, you can even skip ``add_collection`` and ``add_task`` if your needs are simple enough -- `.Collection`'s constructor will take unknown arguments and build the namespace from their values as appropriate:: from invoke import Collection import docs, release ns = Collection(release.release, docs) Notice how we gave both a task object (``release.release``) and a module containing tasks (``docs``). The result is identical to the above:: $ invoke --list Available tasks: release docs.build docs.clean If given as keyword arguments, the keywords act like the ``name`` arguments do in the ``add_*`` methods. Naturally, both can be mixed together as well:: ns = Collection(docs, deploy=release.release) Result:: $ invoke --list Available tasks: deploy docs.build docs.clean .. note:: You can still name these ``Collection`` objects with a leading string argument if desired, which can be handy when building sub-collections. invoke-2.2.0/sites/docs/concepts/testing.rst000066400000000000000000000237041445356551000211300ustar00rootroot00000000000000.. _testing-user-code: ============================== Testing Invoke-using codebases ============================== Strategies for testing codebases that use Invoke; some applicable to code focused on CLI tasks, and others applicable to more generic/refactored setups. Subclass & modify Invoke 'internals' ==================================== A quick foreword: most users will find the subsequent approaches suitable, but advanced users should note that Invoke has been designed so it is itself easily testable. This means that in many cases, even Invoke's "internals" are exposed as low/no-shared-responsibility, publicly documented classes which can be subclassed and modified to inject test-friendly values or mocks. Be sure to look over the :ref:`API documentation `! Use `.MockContext` ================== An instance of subclassing Invoke's public API for test purposes is our own `.MockContext`. Codebases which revolve heavily around `.Context` objects and their methods (most task-oriented code) will find it easy to test by injecting `.MockContext` objects which have been instantiated to yield partial `.Result` objects. For example, take this task:: from invoke import task @task def get_platform(c): uname = c.run("uname -s").stdout.strip() if uname == 'Darwin': return "You paid the Apple tax!" elif uname == 'Linux': return "Year of Linux on the desktop!" An example of testing it with `.MockContext` could be the following:: from invoke import MockContext, Result from mytasks import get_platform def test_get_platform_on_mac(): c = MockContext(run=Result("Darwin\n")) assert "Apple" in get_platform(c) def test_get_platform_on_linux(): c = MockContext(run=Result("Linux\n")) assert "desktop" in get_platform(c) Putting the ``Mock`` in `.MockContext` -------------------------------------- Starting in Invoke 1.5, `.MockContext` will attempt to import the ``mock`` library at instantiation time and wrap its methods within ``Mock`` objects. This lets you not only present realistic return values to your code, but make test assertions about what commands your code is running. Here's another "platform sensitive" task:: from invoke import task @task def replace(c, path, search, replacement): # Assume systems have sed, and that some (eg macOS w/ Homebrew) may # have gsed, implying regular sed is BSD style. has_gsed = c.run("which gsed", warn=True, hide=True) # Set command to run accordingly binary = "gsed" if has_gsed else "sed" c.run(f"{binary} -e 's/{search}/{replacement}/g' {path}") The test code (again, which presumes that eg ``MockContext.run`` is now a ``Mock`` wrapper) relies primarily on 'last call' assertions (``Mock.assert_called_with``) but you can of course use any ``Mock`` methods you need. It also shows how you can set the mock context to respond to multiple possible commands, using a dict value:: from invoke import MockContext, Result from mytasks import replace def test_regular_sed(): expected_sed = "sed -e s/foo/bar/g file.txt" c = MockContext(run={ "which gsed": Result(exited=1), expected_sed: Result(), }) replace(c, 'file.txt', 'foo', 'bar') c.run.assert_called_with(expected_sed) def test_homebrew_gsed(): expected_sed = "gsed -e s/foo/bar/g file.txt" c = MockContext(run={ "which gsed": Result(exited=0), expected_sed: Result(), }) replace(c, 'file.txt', 'foo', 'bar') c.run.assert_called_with(expected_sed) Boolean mock results -------------------- You may have noticed the above example uses a handful of 'empty' `.Result` objects; these stand in for "succeeded, but otherwise had no useful attributes" command executions (as `.Result` defaults to an exit code of ``0`` and empty strings for stdout/stderr). This is relatively common - think "interrogative" commands where the caller only cares for a boolean result, or times when a command is called purely for its side effects. To support this, there's a shorthand in `.MockContext`: passing ``True`` or ``False`` to stand in for otherwise blank Results with exit codes of ``0`` or ``1`` respectively. The example tests then look like this:: from invoke import MockContext, Result from mytasks import replace def test_regular_sed(): expected_sed = "sed -e s/foo/bar/g file.txt" c = MockContext(run={ "which gsed": False, expected_sed: True, }) replace(c, 'file.txt', 'foo', 'bar') c.run.assert_called_with(expected_sed) def test_homebrew_gsed(): expected_sed = "gsed -e s/foo/bar/g file.txt" c = MockContext(run={ "which gsed": True, expected_sed: True, }) replace(c, 'file.txt', 'foo', 'bar') c.run.assert_called_with(expected_sed) String mock results ------------------- Another convenient shorthand is using string values, which are interpreted to be the stdout of the resulting `.Result`. This only really saves you from writing out the class itself (since ``stdout`` is the first positional arg of `.Result`!) but "command X results in stdout Y" is a common enough use case that we implemented it anyway. By example, let's modify an earlier example where we cared about stdout:: from invoke import MockContext from mytasks import get_platform def test_get_platform_on_mac(): c = MockContext(run="Darwin\n") assert "Apple" in get_platform(c) def test_get_platform_on_linux(): c = MockContext(run="Linux\n") assert "desktop" in get_platform(c) As with everything else in this document, this tactic can be applied to iterators or mappings as well as individual values. Regular expression command matching ----------------------------------- The dict form of `.MockContext` kwarg can accept regular expression objects as keys, in addition to strings; ideal for situations where you either don't know the exact command being invoked, or simply don't need or want to write out the entire thing. Imagine you're writing a function to run package management commands on a few different Linux distros and you're trying to test its error handling. You might want to set up a context that pretends any arbitrary ``apt`` or ``yum`` command fails, and ensure the function returns stderr when it encounters a problem:: import re from invoke import MockContext from mypackage.tasks import install package_manager = re.compile(r"^(apt(-get)?|yum) .*") def test_package_success_returns_True(): c = MockContext(run={package_manager: True}) assert install(c, package="somepackage") is True def test_package_explosions_return_stderr(): c = MockContext(run={ package_manager: Result(stderr="oh no!", exited=1), }) assert install(c, package="otherpackage") == "oh no!" A bit contrived - there are a bunch of other ways to organize this exact test code so you don't truly need the regex - but hopefully it's clear that when you *do* need this flexibility, this is how you could go about it. Repeated results ---------------- By default, the values in these mock structures are consumed, causing `.MockContext` to raise ``NotImplementedError`` afterwards (as it does for any unexpected command executions). This was designed with the assumption that most code under test will run a given command once. If your situation doesn't match this, give ``repeat=True`` to the constructor, and you'll see values repeat indefinitely instead (or in cycles, for iterables). Expect `Results <.Result>` ========================== The core Invoke subprocess methods like `~.Context.run` all return `.Result` objects - which (as seen above) can be readily instantiated by themselves with only partial data (e.g. standard output, but no exit code or standard error). This means that well-organized code can be even easier to test and doesn't require as much use of `.MockContext`. An iteration on the initial `.MockContext`-using example above:: from invoke import task @task def get_platform(c): print(platform_response(c.run("uname -s"))) def platform_response(result): uname = result.stdout.strip() if uname == 'Darwin': return "You paid the Apple tax!" elif uname == 'Linux': return "Year of Linux on the desktop!" With the logic encapsulated in a subroutine, you can just unit test that function by itself, deferring testing of the task or its context:: from invoke import Result from mytasks import platform_response def test_platform_response_on_mac(): assert "Apple" in platform_response(Result("Darwin\n")) def test_platform_response_on_linux(): assert "desktop" in platform_response(Result("Linux\n")) Avoid mocking dependency code paths altogether ============================================== This is more of a general software engineering tactic, but the natural endpoint of the above code examples would be where your primary logic doesn't care about Invoke at all -- only about basic Python (or locally defined) data types. This allows you to test logic in isolation and either ignore testing the Invoke side of things, or write targeted tests solely for where your code interfaces with Invoke. Another minor tweak to the task code:: from invoke import task @task def show_platform(c): uname = c.run("uname -s").stdout.strip() print(platform_response(uname)) def platform_response(uname): if uname == 'Darwin': return "You paid the Apple tax!" elif uname == 'Linux': return "Year of Linux on the desktop!" And the tests:: from mytasks import platform_response def test_platform_response_on_mac(): assert "Apple" in platform_response("Darwin\n") def test_platform_response_on_linux(): assert "desktop" in platform_response("Linux\n") invoke-2.2.0/sites/docs/concepts/watchers.rst000066400000000000000000000045701445356551000212730ustar00rootroot00000000000000.. _autoresponding: ========================================== Automatically responding to program output ========================================== Background ========== Command-line programs tend to be designed for interactive shells, which frequently manifests as waiting around for user input, or "prompts". Well-designed programs offer options for pre-empting such prompts, resulting in an easily automated workflow -- but with the rest, interactivity is unavoidable. Thankfully, Invoke's `.Runner` class not only forwards your standard input to the running program (allowing you to manually respond to prompts) but it can also be configured to respond automatically on your behalf. Basic use ========= The mechanism for this automation is the ``watchers`` kwarg to the `.Runner.run` method (and its wrappers elsewhere, such as `.Context.run` and `invoke.run`), which is a list of `.StreamWatcher`-subclass instances configured to watch for patterns & respond accordingly. The simplest of these is `.Responder`, which just replies with its configured response every time its pattern is seen; others can be found in the :doc:`watchers module `. .. note:: As with all other arguments to ``run``, you can also set the default set of watchers globally via :doc:`configuration files `. Take for example this program which expects a manual response to a yes/no prompt:: $ excitable-program When you give the OK, I'm going to do the things. All of them!! Are you ready? [Y/n] y OK! I just did all sorts of neat stuff. You're welcome! Bye! You *could* call ``run("excitable-program")``, manually watch for the prompt, and mash Y by hand. But if you instead supply a `.Responder` like so:: @task def always_ready(c): responder = Responder( pattern=r"Are you ready? \[Y/n\] ", response="y\n", ) c.run("excitable-program", watchers=[responder]) Then `.Runner` passes the program's ``stdout`` and ``stderr`` through ``responder``, which watches for ``"Are you ready? [Y/n] "`` and automatically writes ``y`` (plus ``\n`` to simulate hitting Enter/Return) to the program's ``stdin``. .. note:: The pattern argument to `.Responder` is treated as a `regular expression `, requiring more care (note how we had to escape our square-brackets in the above example) but providing more power as well. invoke-2.2.0/sites/docs/conf.py000066400000000000000000000006751445356551000164040ustar00rootroot00000000000000# Obtain shared config values import os, sys sys.path.append(os.path.abspath("..")) sys.path.append(os.path.abspath("../..")) from shared_conf import * # Enable autodoc, intersphinx extensions.extend(["sphinx.ext.autodoc"]) # Autodoc settings autodoc_default_options = { "members": True, "special-members": True, } # Sister-site links to WWW html_theme_options["extra_nav_links"] = { "Main website": "https://www.pyinvoke.org" } invoke-2.2.0/sites/docs/getting-started.rst000066400000000000000000000167301445356551000207430ustar00rootroot00000000000000=============== Getting started =============== This document presents a whirlwind tour of Invoke's feature set. Please see the links throughout for detailed conceptual & API docs. For installation help, see the project's `installation page `_. .. _defining-and-running-task-functions: Defining and running task functions =================================== The core use case for Invoke is setting up a collection of task functions and executing them. This is pretty easy -- all you need is to make a file called ``tasks.py`` importing the `.task` decorator and decorating one or more functions. You will also need to add an arbitrarily-named context argument (convention is to use ``c``, ``ctx`` or ``context``) as the first positional arg. Don't worry about using this context parameter yet. Let's start with a dummy Sphinx docs building task:: from invoke import task @task def build(c): print("Building!") You can then execute that new task by telling Invoke's command line runner, ``invoke``, that you want it to run:: $ invoke build Building! The function body can be any Python you want -- anything at all. Task parameters =============== Functions can have arguments, and thus so can tasks. By default, your task functions' args/kwargs are mapped automatically to both long and short CLI flags, as per :ref:`the CLI docs `. For example, if we add a ``clean`` argument and give it a boolean default, it will show up as a set of toggle flags, ``--clean`` and ``-c``:: @task def build(c, clean=False): if clean: print("Cleaning!") print("Building!") Invocations:: $ invoke build -c $ invoke build --clean Naturally, other default argument values will allow giving string or integer values. Arguments with no default values are assumed to take strings, and can also be given as positional arguments. Take this incredibly contrived snippet for example:: @task def hi(c, name): print("Hi {}!".format(name)) It can be invoked in the following ways, all resulting in "Hi Name!":: $ invoke hi Name $ invoke hi --name Name $ invoke hi --name=Name $ invoke hi -n Name $ invoke hi -nName Adding metadata via `@task <.task>` ----------------------------------- `@task <.task>` can be used without any arguments, as above, but it's also a convenient vector for additional metadata about the task function it decorates. One common example is describing the task's arguments, via the ``help`` parameter (in addition to optionally giving task-level help via the docstring):: @task(help={'name': "Name of the person to say hi to."}) def hi(c, name): """ Say hi to someone. """ print("Hi {}!".format(name)) This description will show up when invoking ``--help``:: $ invoke --help hi Usage: inv[oke] [--core-opts] hi [--options] [other tasks here ...] Docstring: Say hi to someone. Options: -n STRING, --name=STRING Name of the person to say hi to. More details on task parameterization and metadata can be found in :doc:`/concepts/invoking-tasks` (for the command-line & parsing side of things) and in the `.task` API documentation (for the declaration side). Listing tasks ============= You'll sometimes want to see what tasks are available in a given ``tasks.py`` -- ``invoke`` can be told to list them instead of executing something:: $ invoke --list Available tasks: build This will also print the first line of each task’s docstring, if it has one. To see what else is available besides ``--list``, say ``invoke --help``. Running shell commands ====================== Many use cases for Invoke involve running local shell commands, similar to programs like Make or Rake. This is done via the `~.Context.run` function:: from invoke import task @task def build(c): c.run("sphinx-build docs docs/_build") You'll see the command's output in your terminal as it runs:: $ invoke build Running Sphinx v1.1.3 loading pickled environment... done ... build succeeded, 2 warnings. `~.Context.run` has a number of arguments controlling its behavior, such as activation of pseudo-terminals for complex programs requiring them, suppression of exit-on-error behavior, hiding of subprocess' output (while still capturing it for later review), and more. See `its API docs <.Context.run>` for details. `~.Context.run` always returns a useful `.Result` object providing access to the captured output, exit code, and other information. .. _why-context: Aside: what exactly is this 'context' arg anyway? ------------------------------------------------- A common problem task runners face is transmission of "global" data - values loaded from :doc:`configuration files ` or :ref:`other configuration vectors `, given via CLI flags, generated in 'setup' tasks, etc. Some libraries (such as `Fabric `_ 1.x) implement this via module-level attributes, which makes testing difficult and error prone, limits concurrency, and increases implementation complexity. Invoke encapsulates state in explicit `~.Context` objects, handed to tasks when they execute . The context is the primary API endpoint, offering methods which honor the current state (such as `.Context.run`) as well as access to that state itself. Declaring pre-tasks =================== Tasks may be configured in a number of ways via the `.task` decorator. One of these is to select one or more other tasks you wish to always run prior to execution of your task, indicated by name. Let's expand our docs builder with a new cleanup task that runs before every build (but which, of course, can still be executed on its own):: from invoke import task @task def clean(c): c.run("rm -rf docs/_build") @task(clean) def build(c): c.run("sphinx-build docs docs/_build") Now when you ``invoke build``, it will automatically run ``clean`` first. .. note:: If you're not a fan of the implicit "positional arguments are pre-run task names" API, you can instead explicitly give the ``pre`` kwarg: ``@task(pre=[clean])``. Details can be found in :ref:`how-tasks-run`. Creating namespaces =================== Right now, our ``tasks.py`` is implicitly for documentation only, but maybe our project needs other non-doc things, like packaging/deploying, testing, etc. At that point, a single flat namespace isn't enough, so Invoke lets you easily build a :doc:`nested namespace `. Here's a quick example. Let's first rename our ``tasks.py`` to be ``docs.py``; no other changes are needed there. Then we create a new ``tasks.py``, and for the sake of brevity populate it with a new, truly top level task called ``deploy``. Finally, we can use a new API member, the `.Collection` class, to bind this task and the ``docs`` module into a single explicit namespace. When Invoke loads your task module, if a `.Collection` object bound as ``ns`` or ``namespace`` exists it will get used for the root namespace:: from invoke import Collection, task import docs @task def deploy(c): c.run("python setup.py sdist") c.run("twine upload dist/*") namespace = Collection(docs, deploy) The result:: $ invoke --list Available tasks: deploy docs.build docs.clean For a more detailed breakdown of how namespacing works, please see :doc:`the docs `. invoke-2.2.0/sites/docs/index.rst000066400000000000000000000016571445356551000167470ustar00rootroot00000000000000================================== Welcome to Invoke's documentation! ================================== This site covers Invoke's conceptual & API documentation. For basic info on what Invoke is, including its public changelog & how the project is maintained, please see `the main project website `_. Getting started --------------- Many core ideas & API calls are explained in the tutorial/getting-started document: .. toctree:: :maxdepth: 2 getting-started The ``invoke`` CLI tool ----------------------- Details on the CLI interface to Invoke, available core flags, and tab completion options. .. toctree:: invoke Concepts -------- Dig deeper into specific topics: .. toctree:: :maxdepth: 2 :glob: concepts/* .. _api: API --- Know what you're looking for & just need API details? View our auto-generated API documentation: .. toctree:: :maxdepth: 1 :glob: api/* invoke-2.2.0/sites/docs/invoke.rst000066400000000000000000000302621445356551000171250ustar00rootroot00000000000000.. _inv: ======================== ``inv[oke]`` core usage ======================== .. seealso:: This page documents ``invoke``'s core arguments, options and behavior (which includes options present in :ref:`custom Invoke-based binaries `). For details on invoking user-specified tasks and other parser-related details, see :doc:`/concepts/invoking-tasks`. Core options and flags ====================== ``invoke``'s usage looks like:: $ inv[oke] [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts] All core options & flags are below; almost all of them must be given *before* any task names, with a few (such as :option:`--help`) being specially looked for anywhere in the command line. (For parsing details, see :ref:`basic-cli-layout`.) .. option:: --complete Print (line-separated) valid tab-completion options for an Invoke command line given as the 'remainder' (i.e. after a ``--``). Used for building :ref:`shell completion scripts `. For example, when the local tasks tree contains tasks named ``foo`` and ``bar``, and when ``foo`` takes flags ``--foo-arg`` and ``--foo-arg-2``, you might use it like this:: # Empty input: just task names $ inv --complete -- foo bar # Input not ending with a dash: task names still $ inv --complete -- foo --foo-arg foo bar # Input ending with a dash: current context's flag names $ inv --complete -- foo - --foo-arg --foo-arg-2 For more details on how to make best use of this option, see :option:`--print-completion-script`. .. option:: --hide=STRING Set default value of run()'s 'hide' kwarg. .. option:: --no-dedupe Disable task deduplication. .. _print-completion-script: .. option:: --print-completion-script=SHELL Print a completion script for desired ``SHELL`` (e.g. ``bash``, ``zsh``, etc). This can be sourced into the current session in order to enjoy :ref:`tab-completion for tasks and options `. These scripts are bundled with Invoke's distributed codebase, and internally make use of :option:`--complete`. .. _prompt-for-sudo-password: .. option:: --prompt-for-sudo-password Prompt at the start of the session (before executing any tasks) for the ``sudo.password`` configuration value. This allows users who don't want to keep sensitive material in the config system or their shell environment to rely on user input, without otherwise interrupting the flow of the program. .. option:: --write-pyc By default, Invoke disables bytecode caching as it can cause hard-to-debug problems with task files and (for the kinds of things Invoke is typically used for) offers no noticeable speed benefit. If you really want your ``.pyc`` files back, give this option. .. option:: -c STRING, --collection=STRING Specify collection name to load. .. option:: -d, --debug Enable debug output. .. option:: --dry Echo commands instead of actually running them; specifically, causes any ``run`` calls to: - Act as if the ``echo`` option has been turned on, printing the command-to-be-run to stdout; - Skip actual subprocess invocation (returning before any of that machinery starts running); - Return a dummy `~invoke.runners.Result` object with 'blank' values (empty stdout/err strings, ``0`` exit code, etc). .. option:: -D, --list-depth=INT Limit :option:`--list` display to the specified number of levels, e.g. ``--list-depth 1`` to show only top-level tasks and namespaces. If an argument is given to ``--list``, then this depth is relative; so ``--list build --list-depth 1`` shows everything at the top level of the ``build`` subtree. Default behavior if this is not given is to show all levels of the entire task tree. .. option:: -e, --echo Echo executed commands before running. .. option:: -f, --config Specify a :ref:`runtime configuration file ` to load. Note that you may instead use the ``INVOKE_RUNTIME_CONFIG`` environment variable in place of this option. If both are given, the CLI option will win out. .. option:: -F, --list-format=STRING Change the format used to display the output of :option:`--list`; may be one of: - ``flat`` (the default): single, flat vertical list with dotted task names. - ``nested``: a nested (4-space indented) vertical list, where each level implicitly includes its parent (with leading dots as a strong visual clue that these are still subcollection tasks.) - ``json``: intended for consumption by scripts or other programs, this format emits JSON representing the task tree, with each 'node' in the tree (the outermost document being the root node, and thus a JSON object) consisting of the following keys: - ``name``: String name of collection; for the root collection this is typically the module name, so unless you're supplying alternate collection name to the load process, it's usually ``"tasks"`` (from ``tasks.py``.) - ``help``: First line of collection's docstring, if it came from a module; null otherwise (or if module lacked a docstring.) - ``tasks``: Immediate children of this collection; an array of objects of the following form: - ``name``: Task's local name within its collection (i.e. not the full dotted path you might see with the ``flat`` format; reconstructing that path is left up to the consumer.) - ``help``: First line of task's docstring, or null if it had none. - ``aliases``: An array of string aliases for this task. - ``default``: String naming which task within this collection, if any, is the default task. Is null if no task is the default. - ``collections``: An array of any sub-collections within this collection, members of which which will have the same structure as this outermost document, recursively. The JSON emitted is not pretty-printed, but does end with a trailing newline. .. option:: -h STRING, --help=STRING When given without any task names, displays core help; when given with a task name (may come before *or* after the task name) displays help for that particular task. .. option:: -l, --list=STRING List available tasks. Shows all tasks by default; may give an explicit namespace to 'root' the displayed task tree to only that namespace. (This argument may contain periods, as with task names, so it's possible to show only a small, deep portion of the overall tree if desired.) .. option:: -p, --pty Use a pty when executing shell commands. .. option:: -r STRING, --search-root=STRING Change root directory used for finding task modules. .. option:: -T INT, --command-timeout=INT Set a default command execution timeout of INT seconds. Maps to the ``timeouts.command`` config setting. .. option:: -V, --version Show version and exit. .. option:: -w, --warn-only Warn, instead of failing, when shell commands fail. .. _tab-completion: Shell tab completion ==================== Generating a completion script ------------------------------ Invoke's philosophy is to implement generic APIs and then "bake in" a few common use cases built on top of those APIs; tab completion is no different. Generic tab completion functionality (outputting a shell-compatible list of completion tokens for a given command line context) is provided by the :option:`--complete` core CLI option described above. However, you probably won't need to use that flag yourself: we distribute a handful of ready-made wrapper scripts aimed at the most common shells like ``bash`` and ``zsh`` (plus others). These scripts can be automatically generated from Invoke or :ref:`any Invoke-driven command-line tool `, using :option:`--print-completion-script`; the printed scripts will contain the correct binary name(s) for the program generating them. For example, the following command prints (to stdout) a script which works for ``zsh``, instructs ``zsh`` to use it for the ``inv`` and ``invoke`` programs, and calls ``invoke --complete`` at runtime to get dynamic completion information:: $ invoke --print-completion-script zsh .. note:: You'll probably want to source this command or store its output somewhere permanently; more on that in the next section. Similarly, the `Fabric `_ tool inherits from Invoke, and only has a single binary name (``fab``); if you wanted to get Fabric completion in ``bash``, you would say:: $ fab --print-completion-script bash In the rest of this section, we'll use ``inv`` in examples, but please remember to replace it with the program you're actually using, if it's not Invoke itself! Sourcing the script ------------------- There are a few ways to utilize the output of the above commands, depending on your needs, where the program is installed, and your shell: - The simplest and least disruptive method is to ``source`` the printed completion script inline, which doesn't place anything on disk, and will only affect the current shell session:: $ source <(inv --print-completion-script zsh) - If you've got the program available in your system's global Python interpreter (and you're okay with running the program at the startup of each shell session - Python's speed is admittedly not its strong point) you could add that snippet to your shell's startup file, such as ``~/.zshrc`` or ``~/.bashrc``. - If the program's available globally but you'd prefer to *avoid* running an extra Python program at shell startup, you can cache the output of the command in its own file; where this file lives is entirely up to you and how your shell is configured. For example, you might just drop it into your home directory as a hidden file:: $ inv --print-completion-script zsh > ~/.invoke-completion.sh and then perhaps add the following to the end of ``~/.zshrc``:: source ~/.invoke-completion.sh But again, this is entirely up to you and your shell. .. note:: If you're using ``fish``, you *must* use this tactic, as our fish completion script is not suitable for direct sourcing. Fish shell users should direct the output of the command to a file in the ``~/.config/fish/completions/`` directory. - Finally, if your copy of the needing-completion program is only installed in a specific environment like a virtualenv, you can use either of the above techniques: - Caching the output and referencing it in a global shell startup file will still work in this case, as it does not require the program to be available when the shell loads -- only when you actually attempt to tab complete. - Using the ``source <(inv --print-completion-script yourshell)`` approach will work *as long as* you place it in some appropriate per-environment startup file, which will vary depending on how you manage Python environments. For example, if you use ``virtualenvwrapper``, you could append the ``source`` line in ``/path/to/virtualenv/bin/postactivate``. Utilizing tab completion itself ------------------------------- You've ensured that the completion script is active in your environment - what have you gained? - By default, tabbing after typing ``inv`` or ``invoke`` will display task names from your current directory/project's tasks file. - Tabbing after typing a dash (``-``) or double dash (``--``) will display valid options/flags for the current context: core Invoke options if no task names have been typed yet; options for the most recently typed task otherwise. - Tabbing while typing a partial long option will complete matching long options, using your shell's native substring completion. E.g. if no task names have been typed yet, ``--e`` will offer ``--echo`` as a completion option. - Hitting tab when the most recent typed/completed token is a flag which takes a value, will 'fall through' to your shell's native filename completion. - For example, prior to typing a task name, ``--config `` will complete local file paths to assist in filling in a config file. invoke-2.2.0/sites/shared_conf.py000066400000000000000000000025271445356551000170000ustar00rootroot00000000000000from datetime import datetime from os.path import abspath, join, dirname import alabaster # Alabaster theme + mini-extension html_theme_path = [alabaster.get_path()] extensions = ["alabaster", "sphinx.ext.intersphinx", "sphinx.ext.doctest"] # Paths relative to invoking conf.py - not this shared file html_theme = "alabaster" html_theme_options = { "description": "Pythonic task execution", "github_user": "pyinvoke", "github_repo": "invoke", "analytics_id": "UA-18486793-3", "travis_button": False, # No longer on Travis-CI; README buttons link to Circle "codecov_button": False, # Now a README button "tidelift_url": "https://tidelift.com/subscription/pkg/pypi-invoke?utm_source=pypi-invoke&utm_medium=referral&utm_campaign=docs", # noqa } html_sidebars = { "**": ["about.html", "navigation.html", "searchbox.html", "donate.html"] } # Everything intersphinx's to Python intersphinx_mapping = {"python": ("https://docs.python.org/2.7/", None)} # Doctest settings doctest_path = [abspath(join(dirname(__file__), "..", "tests"))] doctest_global_setup = r""" from _util import MockSubprocess """ # Regular settings project = "Invoke" year = datetime.now().year copyright = "{} Jeff Forcier".format(year) master_doc = "index" templates_path = ["_templates"] exclude_trees = ["_build"] source_suffix = ".rst" default_role = "obj" invoke-2.2.0/sites/www/000077500000000000000000000000001445356551000147715ustar00rootroot00000000000000invoke-2.2.0/sites/www/changelog.rst000066400000000000000000002304401445356551000174550ustar00rootroot00000000000000========= Changelog ========= - :release:`2.2.0 <2023-07-12>` - :feature:`-` Remove the somewhat inaccurate subclass requirement around `~invoke.config.Config`'s ``.clone(into=...)`` constructor call. It was broken for certain use cases (such as trying to clone one subclass into a sibling subclass, which would yield a ``TypeError``) and is irrelevant if one is using the new type annotations. - :release:`2.1.3 <2023-06-14>` - :bug:`944` After the release of 2.1, package-style task modules started looking in the wrong place for project-level config files (inside one's eg ``tasks/`` dir, instead of *next to* that dir) due to a subtlety in the new import/discovery mechanism used. This has been fixed. Thanks to Arnaud V. and Hunter Kelly for the reports and to Jesse P. Johnson for initial debugging/diagnosis. - :release:`2.1.2 <2023-05-15>` - :support:`936 backported` Make sure ``py.typed`` is in our packaging manifest; without it, users working from a regular installation can't perform type checks. Thanks to Nikita Sobolev for catch & patch. - :release:`2.1.1 <2023-05-01>` - :bug:`934` The `importlib` upgrade in 2.1 had a corner case bug (regarding ``from . import `` functionality within package-like task trees) which in turn exposed a false-pass in our test suite. Both have now been fixed. Thanks to Greg Meyer and Robert J. Berger for the bug reports. - :release:`2.0.1 <2023-04-29>` - :bug:`910` Add more rigor around subprocess/runner shutdown to avoid spurious exceptions & also fix downstream issues in libraries like Fabric. Reported by Orlando Rodríguez. - :release:`2.1.0 <2023-04-28>` - :support:`675` Implement `importlib` and deprecate `imp` module. Patches provided by Jesse P. Johnson - :bug:`376 major` Resolve equality comparison bug for non-collections. Patch via Jesse P. Johnson - :support:`906` Implement type hints and type checking tests with mypy to reduce errors and impove code documentation. Patches by Jesse P. Johnson and review by Sam Bull. - :support:`901 backported` (via :issue:`903`) Tweak test suite ``setup`` methods to be named ``setup_method`` so pytest stops whining about it. Patch via Jesse P. Johnson. - :release:`2.0.0 <2023-01-16>` - :support:`-` Remove support for, and imports related to, all Python versions less than 3.6 - including Python 2. This also includes updates to vendored packages, such as removing ``six`` and upgrading ``lexicon`` to the latest version; and also treatment of things like ``Mock`` use within `invoke.context.MockContext` (which now expects stdlib's ``unittest.mock`` instead of hunting for the old standalone ``mock`` library). Thanks to various folks for patches related to some of this work, including Jesse P. Johnson who supplied multiple PRs whose commits made it in. .. warning:: This change is backwards incompatible in the following scenarios: - You use Python <3.6. Shouldn't be an issue as we now specify ``python_requires`` in packaging metadata. - You call ``invoke.util.encode_output`` manually for some reason. (This became a noop under Python 3, so just...remove it!) - You use `invoke.context.MockContext`; its ``repeat`` init kwarg changed its default value from ``False`` to ``True``. This probably won't bite you, but we mention it just in case you somehow relied upon the legacy behavior. - You subclass `invoke.runners.Runner` and/or have had to interact with its ``stop`` or ``stop_timer`` methods. The latter has been merged into the former, and if you are overriding ``stop``, you'll want to make sure you now call ``super()`` somewhere if you were not already. - :support:`-` `Task.argspec ` has changed its return value; it now returns an `inspect.Signature` derived from that of the task's body callable. .. warning:: This change is backwards incompatible if you were using this method directly. - :release:`1.7.3 <2022-09-30>` - :support:`- backported` Fix a non-fatal bug in our setup.py ``long_description`` generation causing 1.7.0-1.7.2 to have malformed description text on PyPI. - :release:`1.7.2 <2022-09-30>` - :bug:`876` Refactor CLI parser instantiation such that the ``tasks.ignore_unknown_help`` feature (added in 1.7) works when Invoke is run in ``--complete`` mode, i.e. in tab-completion scripts. - :bug:`-` Fix errors thrown when comparing `~invoke.tasks.Task` objects to non-Task objects; such comparisons are now always false. - :release:`1.7.1 <2022-05-11>` - :bug:`659` Improve behavior under ``nohup``, which causes stdin to become an undetectably-unreadable (but otherwise legit) file descriptor. Previously this led to `OSError` even if you weren't expecting anything on stdin; we now trap this specific case and silently ignore it, allowing execution to continue. Thanks to ``@kingkisskill`` for initial report and to Ryan Stoner for followup and workshopping. - :release:`1.7.0 <2022-03-18>` - :feature:`793` Add a new ``tasks.ignore_unknown_help`` config option for users who hand their tasks centrally-defined argument help dictionaries; it defaults to ``False`` but such users may set it to ``True`` to avoid exceptions. Thanks to ``@Allu2`` for the report. - :support:`-` Switch to using ``yaml.safe_load`` for loading config files. This avoids some warnings under newer PyYAML versions and is also, in a shocking twist, more secure. - :support:`803` Upgrade our vendored PyYAML from 3.11 to 5.4.1; this should both supply a number of security fixes, and address problems loading project-level YAML config files under Python 3.10. Fix via Andreas Rammhold. - :feature:`845` Env vars explicitly supplied to `~invoke.context.Context.sudo` (via its ``env`` kwarg) are now explicitly preserved via ``sudo``'s ``--preserve-env`` argument. Patch courtesy of Benno Rice. - :support:`-` Switch our continuous integration service from Travis-CI to Circle-CI, plus related and necessary updates to various administrative config files, management tasks and metadata. Including but not limited to: - Enhanced PyPI-level metadata/links - Split out tool config data from ``setup.cfg`` - Enhance execution & coverage of unit vs integration tests under CI .. warning:: Due to various factors, this release will **not** include a Python 2-compatible wheel archive. Users of Python 2 can still install from the sdist, and are strongly encouraged to check `the roadmap `_ as the next release will likely be the one that removes Python 2 entirely! - :release:`1.6.0 <2021-07-09>` - :release:`1.5.1 <2021-07-09>` - :support:`- backported` (Mostly) alphabetize the list of ``run()`` params in the runners API docs. Previously they were an unordered mess. - :support:`- backported` Document the ``dry`` keyword argument to ``run``, which had been added in version 1.3 but only documented as a CLI flag; it does also work as a kwarg (and as a config option). - :bug:`751` Don't explode on empty-but-for-whitespace task docstrings. Thanks to Matt Hayden for the report & initial patches. - :feature:`791` Add a new ``run.echo_format`` configuration option allowing control over the format of echoed commands. It defaults to the previously hardcoded value (wrap in ANSI bold) and is thus backwards compatible. Thanks to David JM Emmett for the patch. - :release:`1.5.0 <2020-12-30>` - :feature:`454` (also :issue:`577`/:issue:`658`, via :issue:`583`/:issue:`681`/:issue:`607`) Allow any string-compatible object to be passed to `Context.cd `, enabling use of eg ``pathlib.Path`` instances. Thanks to Jimm Domingo for the original report and Ludovico Bianchi, Mario César, and Floris Lambrechts for patches. - :bug:`409 major` (via :issue:`611`/:issue:`580`) Don't silently discard help text for task arguments whose names happen to contain underscores. Reported by ``@iago1460``, original patches by Hayden Flinner and Floris Lambrechts. - :bug:`398 major` (via :issue:`611`/:issue:`580`) Don't silently ignore task help specifiers which don't actually map to the decorated task's arguments (eg ``@task(help={"foo": "help for foo"})`` wrapping a task without a ``foo`` argument). Reported by Sohaib Farooqi, with original patches by Hayden Flinner and Floris Lambrechts. - :feature:`197` Allow subcollections to act as the default 'tasks' of their parent collections (via the new ``default`` kwarg to `~invoke.collection.Collection.add_collection`). This means that nontrivial task trees can specify eg "use my test subcollection's default task as the global default task" and similar. Thanks to Tye Wang for the request and initial patch. - :support:`-` Enhanced test coverage in a handful of modules whose coverage was under 90%. - :feature:`-` `~invoke.context.MockContext` now populates its ``NotImplementedError`` exception instances (typically raised when a command is executed which had no pre-prepared result) with the command string that triggered them; this makes it much easier to tell what exactly in a test caused the error. - :feature:`-` `~invoke.context.MockContext` now accepts a few quality-of-life shortcuts as keys and values in its ``run``/``sudo`` arguments: - Keys may be compiled regular expression objects, as well as strings, and will match any calls whose commands match the regex. - Values may be ``True`` or ``False`` as shorthand for otherwise empty `~invoke.runners.Result` objects with exit codes of ``0`` or ``1`` respectively. - Values may also be strings, as shorthand for otherwise empty `~invoke.runners.Result` objects with those strings given as the ``stdout`` argument. - :feature:`441` Add a new ``repeat`` kwarg to `~invoke.context.MockContext` which, when True (default: False) causes stored results for its methods to be yielded repeatedly instead of consumed. Feature request courtesy of ``@SwampFalc``. - :bug:`- major` Immutable iterable result values handed to `~invoke.context.MockContext` would yield errors (due to the use of ``pop()``). The offending logic has been retooled to be more iterator-focused and now works for tuples and etc. - :support:`-` Update the :ref:`testing documentation ` a bit: cleaned up existing examples and added new sections for the other updates in the 1.5 release. - :feature:`700` Automatically populate the ``command`` attribute of `~invoke.runners.Result` objects returned by `~invoke.context.MockContext` methods, with the command string triggering that result. Previously users had to do this by hand or otherwise suffered inaccurate result objects. Thanks to ``@SwampFalc`` for the report & initial patch. - :feature:`-` Upgrade `~invoke.context.MockContext` to wrap its methods in ``Mock`` objects if the ``(unittest.)mock`` library is importable. This makes testing Invoke-using codebases even easier. - :release:`1.4.1 <2020-01-29>` - :release:`1.3.1 <2020-01-29>` - :support:`586 backported` Explicitly strip out ``__pycache__`` (and for good measure, ``.py[co]``, which previously we only stripped from the ``tests/`` folder) in our ``MANIFEST.in``, since at least some earlier releases erroneously included such. Credit to Martijn Pieters for the report and Floris Lambrechts for the patch. - :bug:`660` Fix an issue with `~invoke.run` & friends having intermittent problems at exit time (symptom was typically about the exit code value being ``None`` instead of an integer; often with an exception trace). Thanks to Frank Lazzarini for the report and to the numerous others who provided reproduction cases. - :bug:`518` Close pseudoterminals opened by the `~invoke.runners.Local` class during ``run(..., pty=True)``. Previously, these were only closed incidentally at process shutdown, causing file descriptor leakage in long-running processes. Thanks to Jonathan Paulson for the report. - :release:`1.4.0 <2020-01-03>` - :bug:`637 major` A corner case in `~invoke.context.Context.run` caused overridden streams to be unused if those streams were also set to be hidden (eg ``run(command, hide=True, out_stream=StringIO())`` would result in no writes to the ``StringIO`` object). This has been fixed - hiding for a given stream is now ignored if that stream has been set to some non-``None`` (and in the case of ``in_stream``, non-``False``) value. - :bug:`- major` As part of feature work on :issue:`682`, we noticed that the `~invoke.runners.Result` return value from `~invoke.context.Context.run` was inconsistent between dry-run and regular modes; for example, the dry-run version of the object lacked updated values for ``hide``, ``encoding`` and ``env``. This has been fixed. - :feature:`682` (originally reported as :issue:`194`) Add asynchronous behavior to `~invoke.runners.Runner.run`: - Basic asynchronicity, where the method returns as soon as the subprocess has started running, and that return value is an object with methods allowing access to the final result. - "Disowning" subprocesses entirely, which not only returns immediately but also omits background threading, allowing the subprocesses to outlive Invoke's own process. See the updated API docs for the `~invoke.runners.Runner` for details on the new ``asynchronous`` and ``disown`` kwargs enabling this behavior. Thanks to ``@MinchinWeb`` for the original report. - :feature:`-` Never accompanied the top-level singleton `~invoke.run` (which simply wraps an anonymous `~invoke.context.Context`'s ``run`` method) with its logical sibling, `~invoke.sudo` - this has been remedied. - :release:`1.3.0 <2019-08-06>` - :feature:`324` Add basic dry-run support, in the form of a new :option:`--dry` CLI option and matching ``run.dry`` config setting, which causes command runners (eg `~invoke.run`, `Context.run `) to: - Act as if the ``echo`` option has been turned on, printing the command-to-be-run to stdout; - Skip actual subprocess invocation (returning before any of that machinery starts running); - Return a dummy `~invoke.runners.Result` object with 'blank' values (empty stdout/err strings, ``0`` exit code, etc). This allows quickly seeing what a given task or series of tasks might do, without actually running any shell commands (though naturally, any state-modifying Python code will still run). Thanks to Monty Hindman for the feature request and ``@thebjorn`` for the initial patch. - :bug:`384 major` (via :issue:`653`) Modify config file loading so it detects missing-file IOErrors via their ``errno`` attribute instead of their string rendering (eg ``"No such file"``). This should improve compatibility for non-English locales. Thanks to Patrick Massot for the report and Github user ``@cybiere`` for the patch. - :feature:`539` (via :issue:`645`) Add support for command timeouts, i.e. the ability to add an upper bound on how long a call to `~invoke.context.Context.run` may take to execute. Specifically: - A ``timeout`` argument to `~invoke.context.Context.run`. - The ``timeouts.command`` config setting mapping to that argument. - The :option:`-T/--command-timeout <-T>` CLI flag. Thanks to Israel Fruchter for the request & an early version of the patchset. - :bug:`552 major` (also :issue:`553`) Add a new `~invoke.runners.Runner` method, `~invoke.runners.Runner.close_proc_stdin`, and call it when standard input processing detects an EOF. Without this, subprocesses that read their stdin until EOF would block forever, hanging the program. Thanks to ``@plockc`` for the report & initial patch. .. note:: This fix only applies when ``pty=False`` (the default); PTYs complicate the situation greatly (but also mean the issue is less likely to occur). - :bug:`557 major` (with assist from :issue:`640`) Fix the `~invoke.context.Context.cd` and `~invoke.context.Context.prefix` context managers so that ``with cd`` and ``with prefix`` correctly revert their state manipulations after they exit, when exceptions occur. Thanks to Jon Walsh and Artur Puzio for their respective patches. - :bug:`466 major` Update the parsing and CLI-program mechanisms so that all core arguments may be given within task CLI contexts; previously this functionality only worked for the ``--help`` flag, and other core arguments given after task names (such as ``--echo``) were silently ignored. - :feature:`-` Allow the configuration system to override which `~invoke.executor.Executor` subclass to use when executing tasks (via an import-oriented string). Specifically, it's now possible to alter execution by distributing such a subclass alongside, for example, a repository-local config file which sets ``tasks.executor_class``; previously, this sort of thing required use of :ref:`custom binaries `. - :release:`1.2.0 <2018-09-13>` - :feature:`301` (via :issue:`414`) Overhaul tab completion mechanisms so users can :ref:`print a completion script ` which automatically matches the emitting binary's configured names (compared to the previous hardcoded scripts, which only worked for ``inv``/``invoke`` by default). Thanks to Nicolas Höning for the foundational patchset. - :release:`1.1.1 <2018-07-31>` - :release:`1.0.2 <2018-07-31>` - :bug:`556` (also `fabric/fabric#1823 `_) Pre-emptively check for an error condition involving an unpicklable config file value (Python config files and imported module objects) and raise a useful exception instead of allowing a confusing ``TypeError`` to bubble up later. Reported by Pham Cong Dinh. - :bug:`559` (also `fabric/fabric#1812 `_) Modify how `~invoke.runners.Runner` performs stdin terminal mode changes, to avoid incorrect terminal state restoration when run concurrently (which could lead to things like terminal echo becoming disabled after the Python process exits). Thanks to Adam Jensen and Nick Timkovich for the detailed bug reports & reproduction assistance. - :release:`1.1.0 <2018-07-12>` - :release:`1.0.1 <2018-07-12>` - :feature:`-` Enhance `~invoke.tasks.Call` with a new method (``clone_data``) and new kwarg to an existing method (``clone`` grew ``with_``) to assist subclassers when extending. - :bug:`270` (also :issue:`551`) ``None`` values in config levels (most commonly caused by empty configuration files) would raise ``AttributeError`` when `~invoke.config.merge_dicts` was used to merge config levels together. This has been fixed. Thanks to Tyler Hoffman and Vlad Frolov for the reports. - :feature:`-` Refactor `~invoke.tasks.Call` internals slightly, exposing some previously internal logic as the ``clone_data`` method; this is useful for client codebases when extending `~invoke.tasks.Call` and friends. - :feature:`-` Remove overzealous argument checking in `@task `, instead just handing any extra kwargs into the task class constructor. The high level behavior for truly invalid kwargs is the same (``TypeError``) but now extending codebases can add kwargs to their versions of ``@task`` without issue. - :feature:`-` Add a ``klass`` kwarg to `@task ` to allow extending codebases the ability to create their own variants on ``@task``/``Task``. - :bug:`-` Fix up the ``__repr__`` of `~invoke.tasks.Call` to reference dynamic class name instead of hardcoding ``"Call"``; this allows subclasses' ``__repr__`` output to be correct instead of confusing. - :support:`- backported` Fixed some inaccuracies in the API docs around `~invoke.executor.Executor` and its ``core`` kwarg (was erroneously referring to `~invoke.parser.context.ParserContext` instead of `~invoke.parser.parser.ParseResult`). Includes related cleaning-up of docstrings and tests. - :support:`- backported` Apply the `black `_ code formatter to our codebase and our CI configuration. - :support:`- backported` Fix some test-suite-only failures preventing successful testing on Python 3.7 and PyPy3, and move them out of the 'allowed failures' test matrix quarantine now that they pass. - :support:`- backported` Implemented some minor missing tests, such as testing the ``INVOKE_DEBUG`` low-level env var. - :feature:`543` Implemented support for using ``INVOKE_RUNTIME_CONFIG`` env var as an alternate method of supplying a runtime configuration file path (effectively, an env var based version of using the ``-f``/``--config`` option). Feature request via Kevin J. Qiu. - :bug:`528` Around Invoke 0.23 we broke the ability to weave in subcollections via keyword arguments to `~invoke.collection.Collection`, though it primarily manifests as ``NoneType`` related errors during ``inv --list``. This was unintentional and has been fixed. Report submitted by Tuukka Mustonen. - :bug:`-` As part of solving :issue:`528` we found a related bug, where unnamed subcollections also caused issues with ``inv --list --list-format=json``. Specifically, `Collection.serialized ` sorts subcollections by name, which is problematic when that name is ``None``. This is now fixed. - :release:`1.0.0 <2018-05-09>` - :feature:`-` Added the :ref:`--prompt-for-sudo-password ` CLI option for getpass-based up-front prompting of a sensitive configuration value. - :feature:`-` Updated `~invoke.tasks.Task` to mimic the wrapped function's ``__module__`` attribute, allowing for better interaction with things like Sphinx autodoc that attempt to filter out imported objects from a module. - :bug:`- major` Removed an old, unused and untested (but, regrettably, documented and public) method that doesn't seem to be much use: ``invoke.config.Config.paths``. Please reach out if you were actually using it and we may consider adding some form of it back. .. warning:: This is a backwards incompatible change if you were using ``Config.paths``. - :bug:`- major` Tweaked the innards of `~invoke.config.Config`/`~invoke.config.DataProxy` to prevent accessing properties & other attributes' values during ``__setattr__`` (the code in question only needed the names). This should have no noticeable effect on user code (besides a marginal speed increase) but fixed some minor test coverage issues. - :release:`0.23.0 <2018-04-29>` - :bug:`- major` Previously, some error conditions (such as invalid task or collection names being supplied by the user) printed to standard output, instead of standard error. Standard error seems more appropriate here, so this has been fixed. .. warning:: This is backwards incompatible if you were explicitly checking the standard output of the ``inv[oke]`` program for some of these error messages. .. warning:: If your code is manually raising or introspecting instances of `~invoke.exceptions.Exit`, note that its signature has changed from ``Exit(code=0)`` to ``Exit(message=None, code=None)``. (Thus, this will only impact you if you were calling its constructor instead of raising the class object itself.) - :bug:`- major` `~invoke.collection.Collection` had some minor bugs or oversights in how it responds to things like ``repr()``, ``==``; boolean behavior; how docstrings appear when created from a Python module; etc. All are now fixed. If you're not sure whether this affects you, it does not :) - :bug:`- major` Integer-type CLI arguments were not displaying placeholder text in ``--help`` output (i.e. they appeared as ``--myint`` instead of ``--myint=INT``.) This has been fixed. - :feature:`33` Overhaul task listing (formerly just a simple, boolean ``--list``) to make life easier for users with nontrivial task trees: - Limit display to a specific namespace by giving an optional argument to ``--list``, e.g. ``--list build``; - Additional output formats besides the default (now known as ``flat``) such as a nested view with ``--list-format nested`` or script-friendly output with ``--list-format json``. - The default ``flat`` format now sorts a bit differently - the previous algorithm would break up trees of tasks. - Limit listing depth, so it's easier to view only the first level or two (i.e. the overall namespaces) of a large tree, e.g. ``--list --list-depth 1``; Thanks to the many users who submitted various requests under this ticket's umbrella, and to Dave Burkholder in particular for detailed use case analysis & feedback. - :support:`-` (partially re: :issue:`33`) Renamed the ``--root`` CLI flag to ``--search-root``, partly for clarity (:issue:`33` will be adding namespace display-root related flags, which would make ``--root`` ambiguous) and partly for consistency with the config option, which was already named ``search_root``. (The short version of the flag, ``-r``, is unchanged.) .. warning:: This is a backwards incompatible change. To fix, simply use ``--search-root`` anywhere you were previously using ``--root``. - :bug:`516 major` Remove the CLI parser ambiguity rule regarding flag-like tokens which are seen after an optional-value flag (e.g. ``inv task --optionally-takes-a-value --some-other-flag``.) Previously, any flag-like value in such a spot was considered ambiguous and raised a `~invoke.exceptions.ParseError`. Now, the surrounding parse context is used to resolve the ambiguity, and no error is raised. .. warning:: This behavior is backwards incompatible, but only if you had the minority case where users frequently *and erroneously* give otherwise-legitimate flag-like values to optional-value arguments, and you rely on the parse errors to notify them of their mistake. (If you don't understand what this means, don't worry, you almost certainly don't need to care!) - :support:`515` Ported the test suite from `spec `_ (`nose `_) to `pytest-relaxed `_ (`pytest `_) as pytest basically won the test-runner war against nose & has greater mindshare, more shiny toys, etc. - :support:`-` Rename ``invoke.platform`` to ``invoke.terminals``; it was inadvertently shadowing the ``platform`` standard library builtin module. This was not causing any bugs we are aware of, but it is still poor hygiene. .. warning:: This change is technically backwards incompatible. We don't expect many users import ``invoke.platform`` directly, but if you are, take note. - :bug:`- major` (partially re: :issue:`449`) Update error message around missing positional arguments so it actually lists them. Includes a minor tweak to the API of `~invoke.parser.context.ParserContext`, namely changing ``needs_positional_arguments`` (bool) to ``missing_positional_arguments`` (list). - :release:`0.22.1 <2018-01-29>` - :bug:`342` Accidentally hardcoded ``Collection`` instead of ``cls`` in `Collection.from_module ` (an alternate constructor and therefore a classmethod.) This made it rather hard to properly subclass `~invoke.collection.Collection`. Report and initial patch courtesy of Luc Saffre. - :support:`433 backported` Add -dev and -nightly style Python versions to our Travis builds. Thanks to ``@SylvainDe`` for the contribution. - :bug:`437` When merging configuration levels together (which uses `copy.copy` by default), pass file objects by reference so they don't get closed. Catch & patch by Paul Healy. - :support:`469 backported` Fix up the :ref:`doc/example ` re: subclassing `~invoke.config.Config`. Credit: ``@Aiky30``. - :bug:`488` Account for additional I/O related ``OSError`` error strings when attempting to capture only this specific subtype of error. This should fix some issues with less common libc implementations such as ``musl`` (as found on e.g. Alpine Linux.) Thanks to Rajitha Perera for the report. - :release:`0.22.0 <2017-11-29>` - :bug:`407 major` (also :issue:`494`, :issue:`67`) Update the default value of the ``run.shell`` config value so that it reflects a Windows-appropriate value (specifically, the ``COMSPEC`` env var or a fallback of ``cmd.exe``) on Windows platforms. This prevents Windows users from being forced to always ship around configuration-level overrides. Thanks to Maciej 'maQ' Kusz for the original patchset, and to ``@thebjorn`` and Garrett Jenkins for providing lots of feedback. - :bug:`- major` Iterable-type CLI args were actually still somewhat broken & were 'eating' values after themselves in the parser stream (thus e.g. preventing parsing of subsequent tasks or flags.) This has been fixed. - :support:`364` Drop Python 2.6 and Python 3.3 support, as these versions now account for only very low percentages of the userbase and are unsupported (or about to be unsupported) by the rest of the ecosystem, including ``pip``. This includes updating documentation & packaging metadata as well as taking advantage of basic syntax additions like set literals/comprehensions (``{1, 2, 3}`` instead of ``set([1, 2, 3])``) and removing positional string argument specifiers (``"{}".format(val)`` instead of ``"{0}".format(val)``). - :release:`0.21.0 <2017-09-18>` - :feature:`132` Implement 'iterable' and 'incrementable' CLI flags, allowing for invocations like ``inv mytask --listy foo --listy bar`` (resulting in a call like ``mytask(listy=['foo', 'bar'])``) or ``inv mytask -vvv`` (resulting in e.g. ``mytask(verbose=3)``. Specifically, these require use of the new :ref:`iterable ` and :ref:`incrementable ` arguments to `@task ` - see those links to the conceptual docs for details. - :release:`0.20.4 <2017-08-14>` - :bug:`-` The behavior of `Config ` when ``lazy=True`` didn't match that described in the API docs, after the recent updates to its lifecycle. (Specifically, any config data given to the constructor was not visible in the resulting instance until ``merge()`` was explicitly called.) This has been fixed, along with other related minor issues. - :release:`0.20.3 <2017-08-04>` - :bug:`467` (Arguably also a feature, but since it enables behavior users clearly found intuitive, we're considering it a bug.) Split up the parsing machinery of `Program ` and pushed the `Collection `-making out of `Loader `. Combined, this allows us to honor the project-level config file *before* the second (task-oriented) CLI parsing step, instead of after. For example, this means you can turn off ``auto_dash_names`` in your per-project configs and not only in your system or user configs. Report again courtesy of Luke Orland. .. warning:: This is a backwards incompatible change *if* you were subclassing and overriding any of the affected methods in the ``Program`` or ``Loader`` classes. - :release:`0.20.2 <2017-08-02>` - :bug:`465` The ``tasks.auto_dash_names`` config option added in ``0.20.0`` wasn't being fully honored when set to ``False``; this has been fixed. Thanks to Luke Orland for the report. - :release:`0.20.1 <2017-07-27>` - :bug:`-` Fix a broken ``six.moves`` import within ``invoke.util``; was causing ``ImportError`` in environments without an external copy of ``six`` installed. The dangers of one's local and CI environments all pulling down packages that use ``six``! It's everywhere! - :release:`0.20.0 <2017-07-27>` - :feature:`-` (required to support :issue:`310` and :issue:`329`) Break up the `~invoke.config.Config` lifecycle some more, allowing it to gradually load configuration vectors; this allows the CLI machinery (`~invoke.executor.Executor`) to honor configuration settings from config files which impact how CLI parsing and task loading behaves. Specifically, this adds more public ``Config.load_*`` methods, which in tandem with the ``lazy`` kwarg to ``__init__`` (formerly ``defer_post_init``, see below) allow full control over exactly when each config level is loaded. .. warning:: This change may be backwards incompatible if you were using or subclassing the `~invoke.config.Config` class in any of the following ways: - If you were passing ``__init__`` kwargs such as ``project_home`` or ``runtime_path`` and expecting those files to auto-load, they no longer do; you must explicitly call `~invoke.config.Config.load_project` and/or `~invoke.config.Config.load_runtime` explicitly. - The ``defer_post_init`` keyword argument to ``Config.__init__`` has been renamed to ``lazy``, and controls whether system/user config files are auto-loaded. - ``Config.post_init`` has been removed, in favor of explicit/granular use of the ``load_*`` family of methods. - All ``load_*`` methods now call ``Config.merge`` automatically by default (previously, merging was deferred to the end of most config related workflows.) This should only be a problem if your config contents are extremely large (it's an entirely in-memory dict-traversal operation) and can be avoided by specifying ``merge=False`` to any such method. (Note that you must, at some point, call `~invoke.config.Config.merge` in order for the config object to work normally!) - :feature:`310` (also :issue:`455`, :issue:`291`) Allow configuring collection root directory & module name via configuration files (previously, they were only configurable via CLI flags or generating a custom `~invoke.program.Program`.) - :feature:`329` All task and collection names now have underscores turned into dashes automatically, as task parameters have been for some time. This impacts ``--list``, ``--help``, and of course the parser. For details, see :ref:`dashes-vs-underscores`. This behavior is controlled by a new config setting, ``tasks.auto_dash_names``, which can be set to ``False`` to go back to the classic behavior. Thanks to Alexander Artemenko for the initial feature request. - :bug:`396 major` ``Collection.add_task(task, aliases=('other', 'names')`` was listed in the conceptual documentation, but not implemented (technically, it was removed at some point and never reinstated.) It has been (re-)added and now exists. Thanks to ``@jenisys`` for the report. .. warning:: This technically changes argument order for `Collection.add_task `, so be aware if you were using positional arguments! - :bug:`- major` Display of hidden subprocess output when a command execution failed (end-of-session output starting with ``Encountered a bad command exit code!``) was liable to display encoding errors (e.g. ``'ascii' codec can't encode character ...``) when that output was not ASCII-compatible. This problem was previously solved for *non-hidden* (mirrored) subprocess output, but the fix (encode the data with the local encoding) had not been applied to exception display. Now it's applied in both cases. - :feature:`322` Allow users to completely disable mirroring of stdin to subprocesses, by specifying ``False`` for the ``run.in_stream`` config setting and/or keyword argument. This can help prevent problems when running Invoke under systems that have no useful standard input and which otherwise defeat our pty/fileno related detection. - :release:`0.19.0 <2017-06-19>` - :feature:`-` Add `MockContext.set_result_for ` to allow massaging a mock Context's configured results after instantiation. - :release:`0.18.1 <2017-06-07>` - :bug:`-` Update Context internals re: command execution & configuration of runner subclasses, to work better in client libraries such as Fabric 2. .. note:: If you were using the undocumented ``runner`` configuration value added in :issue:`446`, it is now ``runners.local``. .. warning:: This change modifies the internals of methods like `~invoke.context.Context.run` and `~invoke.context.Context.sudo`; users maintaining their own subclasses should be aware of possible breakage. - :release:`0.18.0 <2017-06-02>` - :feature:`446` Implement `~invoke.context.Context.cd` and `~invoke.context.Context.prefix` context managers (as methods on the not-that-one-the-other-one `~invoke.context.Context` class.) These are based on similar functionality in Fabric 1.x. Credit: Ryan P Kilby. - :support:`448` Fix up some config-related tests that have been failing on Windows for some time. Thanks to Ryan P Kilby. - :feature:`205` Allow giving core flags like ``--help`` after tasks to trigger per-task help. Previously, only ``inv --help taskname`` worked. .. note:: Tasks with their own ``--help`` flags won't be able to leverage this feature - the parser will still interpret the flag as being per-task and not global. This may change in the future to simply throw an exception complaining about the ambiguity. (Feedback welcome.) - :feature:`444` Add support for being used as ``python -m invoke `` on Python 2.7 and up. Thanks to Pekka Klärck for the feature request. - :release:`0.17.0 <2017-05-05>` - :bug:`439 major` Avoid placing stdin into bytewise read mode when it looks like Invoke has been placed in the background by a shell's job control system; doing so was causing the shell to pause the Invoke process (e.g. with a message like ``suspended (tty output)``.) Reported by Tuukka Mustonen. - :bug:`425 major` Fix ``Inappropriate ioctl for device`` errors (usually ``OSError``) when running Invoke without a tty-attached stdin (i.e. when run under 'headless' continuous integration systems or simply as e.g. ``inv sometask < /dev/null`` (redirected stdin.) Thanks to Javier Domingo Cansino for the report & Tuukka Mustonen for troubleshooting assistance. - :feature:`-` Add a ``user`` kwarg & config parameter to `Context.sudo `, which corresponds roughly to ``sudo -u ``. - :bug:`440 major` Make sure to skip a call to ``struct``/``ioctl`` on Windows platforms; otherwise certain situations inside ``run`` calls would trigger import errors. Thanks to ``@chrisc11`` for the report. - :release:`0.16.3 <2017-04-18>` - :bug:`-` Even more setup.py related tomfoolery. - :release:`0.16.2 <2017-04-18>` - :bug:`-` Deal with the fact that PyPI's rendering of Restructured Text has no idea about our fancy new use of Sphinx's doctest module. Sob. - :release:`0.16.1 <2017-04-18>` - :bug:`-` Fix a silly typo preventing proper rendering of the packaging ``long_description`` (causing an effectively blank PyPI description.) - :release:`0.16.0 <2017-04-18>` - :feature:`232` Add support for ``.yml``-suffixed config files (in addition to ``.yaml``, ``.json`` and ``.py``.) Thanks to Matthias Lehmann for the original request & Greg Back for an early patch. - :feature:`418` Enhance ability of client libraries to override config filename prefixes. This includes modifications to related functionality, such as how env var prefixes are configured. .. warning:: **This is a backwards incompatible change** if: - you were relying on the ``env_prefix`` keyword argument to `Config.__init__ `; it is now the ``prefix`` or ``env_prefix`` class attribute, depending. - or the kwarg/attribute of the same name in `Program.__init__ `; you should now be subclassing ``Config`` and using its ``env_prefix`` attribute; - or if you were relying on how standalone ``Config`` objects defaulted to having a ``None`` value for ``env_prefix``, and thus loaded env vars without an ``INVOKE_`` style prefix. See new documentation for this functionality at :ref:`customizing-config-defaults` for details. - :feature:`309` Overhaul how task execution contexts/configs are handled, such that all contexts in a session now share the same config object, and thus user modifications are preserved between tasks. This has been done in a manner that should not break things like collection-based config (which may still differ from task to task.) .. warning:: **This is a backwards incompatible change** if you were relying on the post-0.12 behavior of cloning config objects between each task execution. Make sure to investigate if you find tasks affecting one another in unexpected ways! - :support:`-` Fixed some Python 2.6 incompatible string formatting that snuck in recently. - :feature:`-` Switched the order of the first two arguments of `Config.__init__ `, so that the ``overrides`` kwarg becomes the first positional argument. This supports the common use case of making a `Config ` object that honors the system's core/global defaults; previously, because ``defaults`` was the first argument, you'd end up replacing those core defaults instead of merging with them. .. warning:: **This is a backwards incompatible change** if you were creating custom ``Config`` objects via positional, instead of keyword, arguments. It should have no effect otherwise. - :feature:`-` `Context.sudo ` no longer prompts the user when the configured sudo password is empty; thus, an empty sudo password and a ``sudo`` program configured to require one will result in an exception. The runtime prompting for a missing password was a temporary holdover from Fabric v1, and in retrospect is undesirable. We may add it back in as an opt-in behavior (probably via subclassing) in the future if anybody misses it. .. warning:: **This is a backwards incompatible change**, if you were relying on ``sudo()`` prompting you for your password (vs configuring it). If you *were* doing that, you can simply switch to ``run("sudo ")`` and respond to the subprocess' sudo prompt by hand instead. - :feature:`-` `Result ` and `UnexpectedExit ` objects now have a more useful ``repr()`` (and in the case of ``UnexpectedExit``, a distinct ``repr()`` from their preexisting ``str()``.) - :bug:`432 major` Tighten application of IO thread ``join`` timeouts (in `run `) to only happen when :issue:`351` appears actually present. Otherwise, slow/overworked IO threads had a chance of being joined before truly reading all data from the subprocess' pipe. - :bug:`430 major` Fallback importing of PyYAML when Invoke has been installed without its vendor directory, was still trying to import the vendorized module names (e.g. ``yaml2`` or ``yaml3`` instead of simply ``yaml``). This has been fixed, thanks to Athmane Madjoudj. - :release:`0.15.0 <2017-02-14>` - :bug:`426 major` `DataProxy ` based classes like `Config ` and `Context ` didn't like being `pickled ` or `copied ` and threw ``RecursionError``. This has been fixed. - :feature:`-` `Config `'s internals got cleaned up somewhat; end users should not see much of a difference, but advanced users or authors of extension code may notice the following: - Direct modification of config data (e.g. ``myconfig.section.subsection.key = 'value'`` in user/task code) is now stored in its own config 'level'/data structure; previously such modifications simply mutated the central, 'merged' config cache. This makes it much easier to determine where a final observed value came from, and prevents accidental data loss. - Ditto for deleted values. - Merging/reconciliation of the config levels now happens automatically when data is loaded or modified, which not only simplifies the object's lifecycle a bit but allows the previous change to function without requiring users to call ``.merge()`` after every modification. - :bug:`- major` Python 3's hashing rules differ from Python 2, specifically: A class that overrides ``__eq__()`` and does not define ``__hash__()`` will have its ``__hash__()`` implicitly set to None. `Config ` (specifically, its foundational class `DataProxy `) only defined ``__eq__`` which, combined with the above behavior, meant that ``Config`` objects appeared to hash successfully on Python 2 but yielded ``TypeErrors`` on Python 3. This has been fixed by explicitly setting ``__hash__ = None`` so that the objects do not hash on either interpreter (there are no good immutable attributes by which to define hashability). - :bug:`- major` Configuration keys named ``config`` were inadvertently exposing the internal dict representation of the containing config object, instead of displaying the actual value stored in that key. (Thus, a set config of ``mycontext.foo.bar.config`` would act as if it was the key/value contents of the ``mycontext.foo.bar`` subtree.) This has been fixed. - :feature:`421` Updated `Config.clone ` (and a few other related areas) to replace use of `copy.deepcopy` with a less-rigorous but also less-likely-to-explode recursive dict copier. This prevents frustrating ``TypeErrors`` while still preserving barriers between different tasks' configuration values. - :feature:`-` `Config.clone ` grew a new ``into`` kwarg allowing client libraries with their own `~invoke.config.Config` subclasses to easily "upgrade" vanilla Invoke config objects into their local variety. - :bug:`419 major` Optional parser arguments had a few issues: - The :ref:`conceptual docs about CLI parsing ` mentioned them, but didn't actually show via example how to enable the feature, implying (incorrectly) that they were active always by default. An example has been added. - Even when enabled, they did not function correctly when their default values were of type ``bool``; in this situation, trying to give a value (vs just giving the flag name by itself) caused a parser error. This has been fixed. Thanks to ``@ouroboroscoding`` for the report. - :support:`204` (via :issue:`412`) Fall back to globally-installed copies of our vendored dependencies, if the import from the ``vendor`` tree fails. In normal situations this won't happen, but it allows advanced users or downstream maintainers to nuke ``vendor/`` and prefer explicitly installed packages of e.g. ``six``, ``pyyaml`` or ``fluidity``. Thanks to Athmane Madjoudj for the patch. - :bug:`- major` Fix configuration framework such that nested or dict-like config values may be compared with regular dicts. Previously, doing so caused an ``AttributeError`` (as regular dicts lack a ``.config``). - :bug:`413 major` Update behavior of ``DataProxy`` (used within `~invoke.context.Context` and `~invoke.config.Config`) again, fixing two related issues: - Creating new configuration keys via attribute access wasn't possible: one had to do ``config['foo'] = 'bar'`` because ``config.foo = 'bar'`` would set a real attribute instead of touching configuration. - Supertypes' attributes weren't being considered during the "is this a real attribute on ``self``?" test, leading to different behavior between a nested config-value-as-attribute and a top-level Context/Config one. - :release:`0.14.0 <2016-12-05>` - :bug:`349 major` Display the string representation of `~invoke.exceptions.UnexpectedExit` when handling it inside of `~invoke.program.Program` (including regular ``inv``), if any output was hidden during the ``run`` that generated it. Previously, we only exited with the exception's stored exit code, meaning failures of ``run(..., hide=True)`` commands were unexpectedly silent. (Library-style use of the codebase didn't have this problem, since tracebacks aren't muted.) While implementing this change, we also tweaked the overall display of ``UnexpectedExit`` so it's a bit more consistent & useful: - noting "hey, you ran with ``pty=True``, so there's no stderr"; - showing only the last 10 lines of captured output in the error message (users can, of course, always manually handle the error & access the full thing if desired); - only showing a given stream when it was not already printed to the user's terminal (i.e. if ``hide=False``, no captured output is shown in the error text; if ``hide='stdout'``, only stdout is shown in the error text; etc.) Thanks to Patrick Massot for the original bug report. - :feature:`-` Expose the (normalized) value of `~invoke.runners.Runner.run`'s ``hide`` parameter in its return-value `~invoke.runners.Result` objects. - :bug:`288 major` Address a bug preventing reuse of Invoke as a custom binstub, by moving ``--list`` into the "core args" set of flags present on all Invoke-derived binstubs. Thanks to Jordon Mears for catch & patch. - :bug:`283 major` Fix the concepts/library docs so the example of an explicit ``namespace=`` argument correctly shows wrapping an imported task module in a `~invoke.collection.Collection`. Thanks to ``@zaiste`` for the report. - :bug:`- major` Fix ``DataProxy`` (used within `~invoke.context.Context` and `~invoke.config.Config`) so that real attributes and methods which are shadowed by configuration keys, aren't proxied to the config during regular attribute get/set. (Such config keys are thus required to be accessed via dict-style only, or (on `~invoke.context.Context`) via the explicit ``.config`` attribute.) - :bug:`58 major` Work around bugs in ``select()`` when handling subprocess stream reads, which was causing poor behavior in many nontrivial interactive programs (such as ``vim`` and other fullscreen editors, ``python`` and other REPLs/shells, etc). Such programs should now be largely indistinguishable from their behavior when run directly from a user's shell. - :feature:`406` Update handling of Ctrl-C/``KeyboardInterrupt``, and subprocess exit status pass-through, to be more correct than before: - Submit the interrupt byte sequence ``\x03`` to stdin of all subprocesses, instead of sending ``SIGINT``. - This results in behavior closer to that of truly pressing Ctrl-C when running subprocesses directly; for example, interactive programs like ``vim`` or ``python`` now behave normally instead of prematurely exiting. - Of course, programs that would normally exit on Ctrl-C will still do so! - The exit statuses of subprocesses run with ``pty=True`` are more rigorously checked (using `os.WIFEXITED` and friends), allowing us to surface the real exit values of interrupted programs instead of manually assuming exit code ``130``. - Typically, this will be exit code ``-2``, but it is system dependent. - Other, non-Ctrl-C-driven signal-related exits under PTYs should behave better now as well - previously they could appear to exit ``0``! - Non-subprocess-related ``KeyboardInterrupt`` (i.e. those generated when running top level Python code outside of any ``run`` function calls) will now trigger exit code ``1``, as that is how the Python interpreter typically behaves if you ``KeyboardInterrupt`` it outside of a live REPL. .. warning:: These changes are **backwards incompatible** if you were relying on the "exits ``130``" behavior added in version 0.13, or on the (incorrect) ``SIGINT`` method of killing pty-driven subprocesses on Ctrl-C. - :bug:`- major` Correctly raise ``TypeError`` when unexpected keyword arguments are given to `~invoke.runners.Runner.run`. - :feature:`-` Add a `~invoke.context.MockContext` class for easier testing of user-written tasks and related client code. Includes adding a :ref:`conceptual document on how to test Invoke-using code `. - :feature:`-` Update implementation of `~invoke.runners.Result` so it has default values for all parameters/attributes. This allows it to be more easily used when mocking ``run`` calls in client libraries' tests. .. warning:: This is a backwards incompatible change if you are manually instantiating `~invoke.runners.Result` objects with positional arguments: positional argument order has changed. (Compare the API docs between versions to see exactly how.) - :feature:`294` Implement `Context.sudo `, which wraps `~invoke.context.Context.run` inside a ``sudo`` command. It is capable of auto-responding to ``sudo``'s password prompt with a configured password, and raises a specific exception (`~invoke.exceptions.AuthFailure`) if that password is rejected. - :feature:`369` Overhaul the autoresponse functionality for `~invoke.run` so it's significantly more extensible, both for its own sake and as part of implementing :issue:`294` (see its own changelog entry for details). .. warning:: This is a backwards incompatible change: the ``responses`` kwarg to ``run()`` is now ``watchers``, and accepts a list of `~invoke.watchers.StreamWatcher` objects (such as `~invoke.watchers.Responder`) instead of a dict. If you were using ``run(..., responses={'pattern': 'response'}`` previously, just update to instead use ``run(..., watchers=[Responder('pattern', 'response')])``. - :bug:`- major` Fix a bug in `Config.clone ` where it was instantiating a new ``Config`` instead of a member of the subclass. - :release:`0.13.0 <2016-06-09>` - :feature:`114` Ripped off the band-aid and removed non-contextualized tasks as an option; all tasks must now be contextualized (defined as ``def mytask(context, ...)`` - see :ref:`defining-and-running-task-functions`) even if not using the context. This simplifies the implementation as well as users' conceptual models. Thanks to Bay Grabowski for the patch. .. warning:: This is a backwards incompatible change! - :bug:`350 major` (also :issue:`274`, :issue:`241`, :issue:`262`, :issue:`242`, :issue:`321`, :issue:`338`) Clean up and reorganize encoding-related parts of the code to avoid some of the more common or egregious encode/decode errors surrounding clearly non-ASCII-compatible text. Bug reports, assistance, feedback and code examples courtesy of Paul Moore, Vlad Frolov, Christian Aichinger, Fotis Gimian, Daniel Nunes, and others. - :bug:`351 major` Protect against ``run`` deadlocks involving exceptions in I/O threads & nontrivial amounts of unread data in the corresponding subprocess pipe(s). This situation should now always result in exceptions instead of hangs. - :feature:`259` (also :issue:`280`) Allow updating (or replacing) subprocess shell environments, via the ``env`` and ``replace_env`` kwargs to `~invoke.runners.Runner.run`. Thanks to Fotis Gimian for the report, ``@philtay`` for an early version of the final patch, and Erich Heine & Vlad Frolov for feedback. - :feature:`67` Added ``shell`` option to `~invoke.runners.Runner.run`, allowing control of the shell used when invoking commands. Previously, ``pty=True`` used ``/bin/bash`` and ``pty=False`` (the default) used ``/bin/sh``; the new unified default value is ``/bin/bash``. Thanks to Jochen Breuer for the report. - :bug:`152 major` (also :issue:`251`, :issue:`331`) Correctly handle ``KeyboardInterrupt`` during `~invoke.runners.Runner.run`, re: both mirroring the interrupt signal to the subprocess *and* capturing the local exception within Invoke's CLI handler (so there's no messy traceback, just exiting with code ``130``). Thanks to Peter Darrow for the report, and to Mika Eloranta & Máté Farkas for early versions of the patchset. - :support:`319` Fixed an issue resulting from :issue:`255` which caused problems with how we generate release wheels (notably, some releases such as 0.12.1 fail when installing from wheels on Python 2). .. note:: As part of this fix, the next release will distribute individual Python 2 and Python 3 wheels instead of one 'universal' wheel. This change should be transparent to users. Thanks to ``@ojos`` for the initial report and Frazer McLean for some particularly useful feedback. - :release:`0.12.2 <2016-02-07>` - :support:`314 backported` (Partial fix.) Update ``MANIFEST.in`` so source distributions include some missing project-management files (e.g. our internal ``tasks.py``). This makes unpacked sdists more useful for things like running the doc or build tasks. - :bug:`303` Make sure `~invoke.run` waits for its IO worker threads to cleanly exit (such as allowing a ``finally`` block to revert TTY settings) when ``KeyboardInterrupt`` (or similar) aborts execution in the main thread. Thanks to Tony S Yu and Máté Farkas for the report. - :release:`0.12.1 <2016-02-03>` - :bug:`308` Earlier changes to TTY detection & its use in determining features such as stdin pass-through, were insufficient to handle edge cases such as nested Invoke sessions or piped stdin to Invoke processes. This manifested as hangs and ``OSError`` messages about broken pipes. The issue has been fixed by overhauling all related code to use more specific and accurate checks (e.g. examining just ``fileno`` and/or just ``isatty``). Thanks to Tuukka Mustonen and Máté Farkas for the report (and for enduring the subsequent flood of the project maintainer's stream-of-consciousness ticket updates). - :bug:`305` (also :issue:`306`) Fix up some test-suite issues causing failures on Windows/Appveyor. Thanks to Paul Moore. - :bug:`289` Handful of issues, all fallout from :issue:`289`, which failed to make it out the door for 0.12.0. More are on the way but these should address blockers for some users: * Windows support for the new stdin replication functionality (this was totally blocking Windows users, as reported in :issue:`302` - sorry!); * Stdin is now mirrored to stdout when no PTY is present, so you can see what you're typing (plus a new `~invoke.runners.Runner.run` option and config param, ``echo_stdin``, allowing user override of this behavior); * Exposed the stdin read loop's sleep time as `Runner.input_sleep `; * Sped up some tests a bit. - :release:`0.12.0 <2016-01-12>` - :bug:`257 major` Fix a RecursionError under Python 3 due to lack of ``__deepcopy__`` on `~invoke.tasks.Call` objects. Thanks to Markus Zapke-Gründemann for initial report and Máté Farkas for the patch. - :support:`265` Update our Travis config to select its newer build infrastructure and also run on PyPy3. Thanks to Omer Katz. - :support:`254` Add an ``exclude`` option in our ``setup.py`` so setuptools doesn't try loading our vendored PyYAML's Python 2 sub-package under Python 3 (or vice versa - though all reports were from Python 3 users). Thanks to ``@yoshiya0503`` for catch & initial patch. - :feature:`68` Disable Python's bytecode caching by default, as it complicates our typical use case (frequently-changing .py files) and offers little benefit for human-facing startup times. Bytecode caching can be explicitly re-enabled by specifying ``--write-pyc`` at runtime. Thanks to Jochen Breuer for feature request and ``@brutus`` for initial patchset. - :support:`144` Add code-coverage reporting to our CI builds (albeit `CodeCov `_ instead of `coveralls.io `_). Includes rejiggering our project-specific coverage-generating tasks. Thanks to David Baumgold for the original request/PR and to Justin Abrahms for the tipoff re: CodeCov. - :bug:`297 major` Ignore leading and trailing underscores when turning task arguments into CLI flag names. - :bug:`296 major` Don't mutate ``sys.path`` on collection load if task's parent directory is already on ``sys.path``. - :bug:`295 major` Make sure that `~invoke.run`'s ``hide=True`` also disables echoing. Otherwise, "hidden" helper ``run`` calls will still pollute output when run as e.g. ``invoke --echo ...``. - :feature:`289` (also :issue:`263`) Implement :ref:`autoresponding ` for `~invoke.run`. - :support:`-` Removed official Python 3.2 support; sibling projects also did this recently, it's simply not worth the annoyance given the userbase size. - :feature:`228` (partial) Modified and expanded implementation of `~invoke.executor.Executor`, `~invoke.tasks.Task` and `~invoke.tasks.Call` to make implementing task parameterization easier. - :support:`-` Removed the ``-H`` short flag, leaving just ``--hide``. This was done to avoid conflicts with Fabric's host-oriented ``-H`` flag. Favoritism is real! Apologies. .. warning:: This change is backwards compatible if you used ``-H``. - :feature:`173` Overhauled top level CLI functionality to allow reusing Invoke for distinct binaries, optionally with bundled task namespaces as subcommands. As a side effect, this functionality is now much more extensible to boot. Thanks to Erich Heine for feedback/suggestions during development. .. warning:: This change is backwards incompatible if you imported anything from the ``invoke.cli`` module (which is now rearchitected as `~invoke.program.Program`). It should be transparent to everybody else. .. seealso:: :ref:`reusing-as-a-binary` - :bug:`- major` Fixed a bug in the parser where ``invoke --takes-optional-arg avalue --anotherflag`` was incorrectly considering ``--anotherflag`` to be an ambiguity error (as if ``avalue`` had not been given to ``--takes-optional-arg``. - :release:`0.11.1 <2015-09-07>` - :support:`- backported` Fix incorrect changelog URL in package metadata. - :release:`0.11.0 <2015-09-07>` - :feature:`-` Add a ``.command`` attribute to `~invoke.runners.Result` to preserve the command executed for post-execution introspection. - :feature:`-` Detect local controlling terminal size (`~invoke.terminals.pty_size`) and apply that information when creating pseudoterminals in `~invoke.run` when ``pty=True``. - :bug:`- major` Display stdout instead of stderr in the ``repr()`` of `~invoke.exceptions.Failure` objects, when a pseudo-terminal was used. Previously, failure display focused on the stderr stream, which is always empty under pseudo-terminals. - :bug:`- major` Correctly handle situations where `sys.stdin` has been replaced with an object lacking ``.fileno`` (e.g., some advanced Python shells, headless code execution tools, etc). Previously, this situation resulted in an ``AttributeError``. - :bug:`- major` Capture & reraise exceptions generated by command execution I/O threads, in the main thread, as a `~invoke.exceptions.ThreadException`. - :feature:`235` Allow custom stream objects to be used in `~invoke.run` calls, to be used instead of the defaults of ``sys.stdout``/``sys.stderr``. .. warning:: This change required a major cleanup/rearchitecture of the command execution implementation. The vendored ``pexpect`` module has been completely removed and the API of the `~invoke.runners.Runner` class has changed dramatically (though **the API for run() itself has not**). Be aware there may be edge-case terminal behaviors which have changed or broken as a result of removing ``pexpect``. Please report these as bugs! We expect to crib small bits of what ``pexpect`` does but need concrete test cases first. - :bug:`234 major` (also :issue:`243`) Preserve task-module load location when creating explicit collections with `~invoke.collection.Collection.from_module`; when this was not done, project-local config files were not loading correctly. Thanks to ``@brutus`` and Jan Willems for initial report & troubleshooting, and to Greg Back for identifying the fix. - :bug:`237 major` Completion output lacked "inverse" flag names (e.g. ``--no-myoption`` as a boolean negative version of a defaulting-to-True boolean ``myoption``). This has been corrected. - :bug:`239 major` Completion erroneously presented core flags instead of per-task flags when both are present in the invocation being completed (e.g. ``inv --debug my_task -``). This has been fixed. - :bug:`238 major` (partial fix) Update the ``zsh`` completion script to account for use of the ``--collection`` core flag. - :support:`-` Additional rearranging of ``run``/``Runner`` related concerns for improved subclassing, organization, and use in other libraries, including: * Changed the name of the ``runner`` module to ``runners``. * Moved the top level ``run`` function from its original home in ``invoke.runner`` to `invoke.__init__ `, to reflect the fact that it's now simply a convenience wrapper around ``Runner``. * Tweaked the implementation of `~invoke.runners.Runner` so it can reference `~invoke.context.Context` objects (useful for anticipated subclasses). .. warning:: These are backwards incompatible changes if your code was doing any imports from the ``invoke.runner`` module (including especially ``invoke.runner.run``, which is now only ``invoke.run``). Function signatures have **not** changed. - :support:`224` Add a completion script for the ``fish`` shell, courtesy of Jaime Marquínez Ferrándiz. - :release:`0.10.1 <2015-03-17>` - :support:`- backported` Tweak README to reflect recent(-ish) changes in ``pip`` re: users who install the development version via ``pip`` instead of using git. - :release:`0.10.0 <2015-03-17>` - :feature:`104` Add core CLI flag ``--complete`` to support shell tab completion scripts, and add some 'blessed' such scripts for bash (3 and 4) and zsh. Thanks to Ivan Malison and Andrew Roberts for providing discussion & early patchsets. - :support:`-` Reorganize `~invoke.runners.Runner`, `~invoke.runners.Local` and ``invoke.runner.run`` for improved distribution of responsibilities & downstream subclassing. .. warning:: This includes backwards incompatible changes to the API signature of most members of the ``invoke.runner`` module, including ``invoke.runner.run``. (However, in the case of ``invoke.runner.run``, the changes are mostly in the later, optional keyword arguments.) - :feature:`219` Fall back to non-PTY command execution in situations where ``pty=True`` but no PTY appears present. See `~invoke.runners.Local` for details. - :support:`212` Implement basic linting support using ``flake8``, and apply formatting changes to satisfy said linting. As part of this shakeup, also changed all old-style (``%s``) string formatting to new-style (``{0}``). Thanks to Collin Anderson for the foundational patch. - :support:`215` (also :issue:`213`, :issue:`214`) Tweak tests & configuration sections of the code to include Windows compatibility. Thanks to Paul Moore. - :bug:`201 major` (also :issue:`211`) Replace the old, first-draft gross monkeypatched Popen code used for ``invoke.runner.run`` with a non-monkeypatched approach that works better on non-POSIX platforms like Windows, and also attempts to handle encoding and locale issues more gracefully (meaning: at all gracefully). Specifically, the new approach uses threading instead of ``select.select``, and performs explicit encoding/decoding based on detected or explicitly expressed encodings. Major thanks to Paul Moore for an enormous amount of testing/experimentation/discussion, as well as the bulk of the code changes themselves. .. warning:: The top level ``invoke.runner.run`` function has had a minor signature change: the sixth positional argument used to be ``runner`` and is now ``encoding`` (with ``runner`` now being the seventh positional argument). - :feature:`147` Drastically overhaul/expand the configuration system to account for multiple configuration levels including (but not limited to) file paths, environment variables, and Python-level constructs (previously the only option). See :ref:`configuration` for details. Thanks to Erich Heine for his copious feedback on this topic. .. warning:: This is technically a backwards incompatible change, though some existing user config-setting code may continue to work as-is. In addition, this system may see further updates before 1.0. - :bug:`191 major` Bypass ``pexpect``'s automatic command splitting to avoid issues running complex nested/quoted commands under a pty. Credit to ``@mijikai`` for noticing the problem. - :bug:`183 major` Task docstrings whose first line started on the same line as the opening quote(s) were incorrectly presented in ``invoke --help ``. This has been fixed by using `inspect.getdoc`. Thanks to Pekka Klärck for the catch & suggested fix. - :bug:`180 major` Empty invocation (e.g. just ``invoke`` with no flags or tasks, and when no default task is defined) no longer printed help output, instead complaining about the lack of default task. It now prints help again. Thanks to Brent O'Connor for the catch. - :bug:`175 major` ``autoprint`` did not function correctly for tasks stored in sub-collections; this has been fixed. Credit: Matthias Lehmann. - :release:`0.9.0 <2014-08-26>` - :bug:`165 major` Running ``inv[oke]`` with no task names on a collection containing a default task should (intuitively) have run that default task, but instead did nothing. This has been fixed. - :bug:`167 major` Running the same task multiple times in one CLI session was horribly broken; it works now. Thanks to Erich Heine for the report. - :bug:`119 major` (also :issue:`162`, :issue:`113`) Better handle platform-sensitive operations such as pty size detection or use, either replacing with platform-specific implementations or raising useful exceptions. Thanks to Gabi Davar and (especially) Paul Moore, for feedback & original versions of the final patchset. - :feature:`136` Added the ``autoprint`` flag to `invoke.tasks.Task`/`@task `, allowing users to set up tasks which act as both subroutines & "print a result" CLI tasks. Thanks to Matthias Lehmann for the original patch. - :bug:`162 major` Adjust platform-sensitive imports so Windows users don't encounter import-time exceptions. Thanks to Paul Moore for the patch. - :support:`169` Overhaul the Sphinx docs into two trees, one for main project info and one for versioned API docs. - :bug:`- major` Fixed a sub-case of the already-mostly-fixed :issue:`149` so the error message works usefully even with no explicit collection name given. - :release:`0.8.2 <2014-06-15>` - :bug:`149` Print a useful message to stderr when Invoke can't find the requested collection/tasks file, instead of displaying a traceback. - :bug:`145` Ensure a useful message is displayed (instead of a confusing exception) when listing empty task collections. - :bug:`142` The refactored Loader class failed to account for the behavior of `imp.find_module` when run against packages (vs modules) and was exploding at load time. This has been fixed. Thanks to David Baumgold for catch & patch. - :release:`0.8.1 <2014-06-09>` - :bug:`140` Revert incorrect changes to our ``setup.py`` regarding detection of sub-packages such as the vendor tree & the parser. Also add additional scripting to our Travis-CI config to catch this class of error in future. Thanks to Steven Loria and James Cox for the reports. - :release:`0.8.0 <2014-06-08>` - :feature:`135` (also bugs :issue:`120`, :issue:`123`) Implement post-tasks to match pre-tasks, and allow control over the arguments passed to both (via `invoke.tasks.call`). For details, see :ref:`pre-post-tasks`. .. warning:: Pre-tasks were overhauled a moderate amount to implement this feature; they now require references to **task objects** instead of **task names**. This is a backwards incompatible change. - :support:`25` Trim a bunch of time off the test suite by using mocking and other tools instead of dogfooding a bunch of subprocess spawns. - :bug:`128 major` Positional arguments containing underscores were not exporting to the parser correctly; this has been fixed. Thanks to J. Javier Maestro for catch & patch. - :bug:`121 major` Add missing help output denoting inverse Boolean options (i.e. ``--[no-]foo`` for a ``--foo`` flag whose value defaults to true.) Thanks to Andrew Roberts for catch & patch. - :support:`118` Update the bundled ``six`` plus other minor tweaks to support files. Thanks to Matt Iversen. - :feature:`115` Make it easier to reuse Invoke's primary CLI machinery in other (non-Invoke-distributed) bin-scripts. Thanks to Noah Kantrowitz. - :feature:`110` Add task docstrings' 1st lines to ``--list`` output. Thanks to Hiroki Kiyohara for the original PR (with assists from Robert Read and James Thigpen.) - :support:`117` Tidy up ``setup.py`` a bit, including axing the (broken) `distutils` support. Thanks to Matt Iversen for the original PR & followup discussion. - :feature:`87` (also :issue:`92`) Rework the loader module such that recursive filesystem searching is implemented, and is used instead of searching `sys.path`. This adds the behavior most users expect or are familiar with from Fabric 1 or similar tools; and it avoids nasty surprise collisions with other installed packages containing files named ``tasks.py``. Thanks to Michael Hahn for the original report & PR, and to Matt Iversen for providing the discovery algorithm used in the final version of this change. .. warning:: This is technically a backwards incompatible change (reminder: we're not at 1.0 yet!). You'll only notice if you were relying on adding your tasks module to ``sys.path`` and then calling Invoke elsewhere on the filesystem. - :support:`-` Refactor the `invoke.runners.Runner` module to differentiate what it means to run a command in the abstract, from execution specifics. Top level API is unaffected. - :bug:`131 major` Make sure one's local tasks module is always first in ``sys.path``, even if its parent directory was already somewhere else in ``sys.path``. This ensures that local tasks modules never become hidden by third-party ones. Thanks to ``@crccheck`` for the early report and to Dorian Puła for assistance fixing. - :bug:`116 major` Ensure nested config overrides play nicely with default tasks and pre-tasks. - :bug:`127 major` Fill in tasks' exposed ``name`` attribute with body name if explicit name not given. - :feature:`124` Add a ``--debug`` flag to the core parser to enable easier debugging (on top of existing ``INVOKE_DEBUG`` env var.) - :feature:`125` Improve output of Failure exceptions when printed. - :release:`0.7.0 <2014.01.28>` - :feature:`109` Add a ``default`` kwarg to `invoke.collection.Collection.add_task` allowing per-collection control over default tasks. - :feature:`108` Update `invoke.collection.Collection.from_module` to accept useful shorthand arguments for tweaking the `invoke.collection.Collection` objects it creates (e.g. name, configuration.) - :feature:`107` Update configuration merging behavior for more flexible reuse of imported task modules, such as parameterizing multiple copies of a module within a task tree. - :release:`0.6.1 <2013.11.21>` - :bug:`96` Tasks in subcollections which set explicit names (via e.g. ``@task(name='foo')``) were not having those names honored. This is fixed. Thanks to Omer Katz for the report. - :bug:`98` **BACKWARDS INCOMPATIBLE CHANGE!** Configuration merging has been reversed so outer collections' config settings override inner collections. This makes distributing reusable modules significantly less silly. - :release:`0.6.0 <2013.11.21>` - :bug:`86 major` Task arguments named with an underscore broke the help feature; this is now fixed. Thanks to Stéphane Klein for the catch. - :feature:`89` Implemented configuration for distributed task modules: can set config options in `invoke.collection.Collection` objects and they are made available to contextualized tasks. - :release:`0.5.1 <2013.09.15>` - :bug:`81` Fall back to sane defaults for PTY sizes when autodetection gives insane results. Thanks to ``@akitada`` for the patch. - :bug:`83` Fix a bug preventing underscored keyword arguments from working correctly as CLI flags (e.g. ``mytask --my-arg`` would not map back correctly to ``mytask(my_arg=...)``.) Credit: ``@akitada``. - :release:`0.5.0 <2013.08.16>` - :feature:`57` Optional-value flags added - e.g. ``--foo`` tells the parser to set the ``foo`` option value to True; ``--foo myval`` sets the value to "myval". The built-in ``--help`` option now leverages this feature for per-task help (e.g. ``--help`` displays global help, ``--help mytask`` displays help for ``mytask`` only.) - :bug:`55 major` A bug in our vendored copy of ``pexpect`` clashed with a Python 2->3 change in import behavior to prevent Invoke from running on Python 3 unless the ``six`` module was installed in one's environment. This was fixed - our vendored ``pexpect`` now always loads its sibling vendored ``six`` correctly. invoke-2.2.0/sites/www/conf.py000066400000000000000000000012771445356551000162770ustar00rootroot00000000000000# Obtain shared config values import sys import os from os.path import abspath, join, dirname sys.path.append(abspath(join(dirname(__file__), ".."))) from shared_conf import * # Releases changelog extension extensions.append("releases") releases_github_path = "pyinvoke/invoke" # Default is 'local' building, but reference the public docs site when building # under RTD. target = join(dirname(__file__), "..", "docs", "_build") if os.environ.get("READTHEDOCS") == "True": target = "https://docs.pyinvoke.org/en/latest/" intersphinx_mapping["docs"] = (target, None) # Sister-site links to documentation html_theme_options["extra_nav_links"] = { "Documentation": "https://docs.pyinvoke.org" } invoke-2.2.0/sites/www/contact.rst000066400000000000000000000023021445356551000171530ustar00rootroot00000000000000Contact ======= You can get in touch with the developer & user community in any of the following ways: * Bug reports and feature requests: first read `contribution-guide.org `_, then check out our `GitHub page `_. * Blog posts: https://bitprophet.org/categories/invoke/ * Twitter: you've got a few options here: * `@bitprophet `_ is the canonical source for updates, but is also the developer's personal account (hint: you can turn off retweets and only see original content!) * `@pyfabric `_ is a much lower-traffic, announcement-only account that also serves the `Fabric `_ project; given how much Fabric is built directly on top of Invoke, many of the posts will be relevant to Invoke-only users. * `@pyinvoke `_ was set up for Invoke-specific announcements, but it only has a dozen followers so we've unfortunately let it languish. Should we automate our release process further, this account may get posts again, and we'll update this page accordingly. invoke-2.2.0/sites/www/development.rst000066400000000000000000000033771445356551000200570ustar00rootroot00000000000000=========== Development =========== Obtaining a source checkout =========================== Our Git repository is maintained on Github at `pyinvoke/invoke`_. Please follow their instructions for cloning (or forking, then cloning, which is best if you intend to contribute back) the repository there. Once downloaded, install the repo itself + its development dependencies by running ``pip install -r dev-requirements.txt``. Submitting bug reports or patches ================================= We follow `contribution-guide.org`_ for all of our development - please `go there`_ for details on submitting patches, which branch(es) to work out of, and so on. Our issue tracker is on `our GitHub page`_. Changelog location ================== Invoke's changelog lives in ``sites/www/changelog.rst`` and is formatted using the `Releases `_ Sphinx plugin. Running management tasks ======================== Invoke uses itself for project management and has a number of tasks you can see with ``inv --list``. Some specific tasks of note: * ``test`` and ``integration``: Runs the primary and integration test suites, respectively. (Most of the time you can ignore ``integration`` - it's mostly for use by CI systems or once-in-a-while sanity checks locally.) * ``www`` and ``docs`` (and their subtasks like ``docs.browse``): Builds the WWW site and the API docs, respectively. Another good resource is to skim our ``.travis.yml`` file for the commands it executes - if submissions don't pass all of those commands to some degree, they won't pass Travis' CI builds either! .. _go there: .. _contribution-guide.org: https://contribution-guide.org .. _our GitHub page: .. _pyinvoke/invoke: https://github.com/pyinvoke/invoke invoke-2.2.0/sites/www/faq.rst000066400000000000000000000177311445356551000163030ustar00rootroot00000000000000========================== Frequently asked questions ========================== General project questions ========================= .. _invoke-split-from-fabric: Why was Invoke split off from the `Fabric `_ project? -------------------------------------------------------------------------- Fabric (1.x and earlier) was a hybrid project implementing two feature sets: task execution (organization of task functions, execution of them via CLI, and local shell commands) and high level SSH actions (organization of servers/hosts, remote shell commands, and file transfer). For use cases requiring both feature sets, this arrangement worked well. However, over time it became clear many users only needed one or the other, with local-only users resenting heavy SSH/crypto install requirements, and remote-focused users struggling with API limitations caused by the hybrid codebase. When planning Fabric 2.x, having the "local" feature set as a standalone library made sense, and it seemed plausible to design the SSH component as a separate layer above. Thus, Invoke was created to focus exclusively on local and abstract concerns, leaving Fabric 2.x concerned only with servers and network commands. Fabric 2 leverages many parts of Invoke's API, and allows (but does not require!) use of Invoke's CLI features, allowing multiple use cases (build tool, high level SSH lib, hybrid build/orchestration tool) to coexist without negatively impacting each other. Defining/executing tasks ======================== .. _bad-first-arg: My task's first argument isn't showing up in ``--help``! -------------------------------------------------------- This problem pops up if you forget to define an initial context argument for your task. For example, can you spot the problem in this sample task file? :: from invoke import task @task def build(c, where, clean=False): pass @task def clean(what): pass This task file doesn't cause obvious errors when sanity-checking it with ``inv --list`` or ``inv --help``. However, ``clean`` forgot to set aside its first argument for the context - so Invoke is treating ``what`` as the context argument! This means it doesn't show up in help output or other command-line parsing stages. The command line says my task's first argument is invalid! ---------------------------------------------------------- See :ref:`bad-first-arg` - it's probably the same issue. Running local shell commands (``run``) ====================================== .. _program-behavior-ptys: Why is my command behaving differently under Invoke versus being run by hand? ----------------------------------------------------------------------------- 99% of the time, adding ``pty=True`` to your ``run`` call will make things work as you were expecting. Read on for why this is (and why ``pty=True`` is not the default). Command-line programs often change behavior depending on whether a controlling terminal is present; a common example is the use or disuse of colored output. When the recipient of your output is a human at a terminal, you may want to use color, tailor line length to match terminal width, etc. Conversely, when your output is being sent to another program (shell pipe, CI server, file, etc) color escape codes and other terminal-specific behaviors can result in unwanted garbage. Invoke's use cases span both of the above - sometimes you only want data displayed directly, sometimes you only want to capture it as a string; often you want both. Because of this, there is no "correct" default behavior re: use of a pseudo-terminal - some large chunk of use cases will be inconvenienced either way. For use cases which don't care, direct invocation without a pseudo-terminal is faster & cleaner, so it is the default. Calling Python or Python scripts prints all the output at the end of the run! ----------------------------------------------------------------------------- The symptom is easy to spot - you're running a command that takes a few seconds or more to execute, it usually prints lines of text as it goes, but via `~invoke.run` nothing appears to happen at first, and then all the output prints once it's done executing. This is usually due to Python - the "inner" Python executable you're invoking, not the one Invoke is running under - performing unwanted buffering of its output streams. It does this when it thinks it's being called in a non-interactive fashion. The fix is to force Invoke to run the command in a pseudoterminal by saying ``pty=True`` (e.g. ``run("python foo", pty=True)``). Alternately, since both Invoke and the inner command are Python, you could try loading the inner Python module directly in your Invoke-using code, and call whichever methods its command-line stub is using - instead of using `~invoke.run`. This can often have other benefits too. .. _stdin-not-tty: Why do I sometimes see ``err: stdin: is not a tty``? ---------------------------------------------------- See :ref:`program-behavior-ptys` - the same root cause (lack of a PTY by default) is probably what's going on. In some cases (such as via the Fabric library) it's happening because a shell's login files are calling programs that require a PTY (e.g. ``biff`` or ``mesg``) so make sure to look there if the actual foreground command doesn't seem at fault. Everything just exits silently after I run a command! ----------------------------------------------------- Double check the command's exit code! By default, receiving nonzero exit codes at the end of a `~invoke.run` call will result in Invoke halting execution & exiting with that same code. Some programs (pylint, Nagios check scripts, etc) use exit codes to indicate non-fatal status, which can be confusing. The solution here is to add ``warn=True`` to your `~invoke.run` call, which disables the automatic exit behavior. Then you can check the result's ``.exited`` attribute by hand to determine if it truly succeeded. The auto-responder functionality isn't working for my password prompts! ----------------------------------------------------------------------- Some programs write password prompts or other output *directly* to the local terminal (the operating-system-level TTY device), bypassing the usual stdout/stderr streams. For example, this is exactly what `the stdlib's getpass module ` does, if you're calling a program that happens to be written in Python. When this happens, we're powerless, because all we get to see is the subprocess' regular output streams. Thankfully, the solution is usually easy: just add ``pty=True`` to your `~invoke.run` call. Forcing use of an explicit pseudo-terminal usually tricks these kinds of programs into writing prompts to stderr. I'm getting ``IOError: Inappropriate ioctl for device`` when I run commands! ---------------------------------------------------------------------------- This error typically means some code in your project or its dependencies has replaced one of the process streams (``sys.stdin``, ``sys.stdout`` or ``sys.stderr``) with an object that isn't actually hooked up to a terminal, but which pretends that it is. For example, test runners or build systems often do this. 99% of the time, this pops up for stdin only, in which case you may be able to work around it by specifying ``in_stream=False`` to `~invoke.run` (note: ``False``, **not** ``None``!) Gory details ~~~~~~~~~~~~ Technically, what's happened is that the object handed to Invoke's command executor as e.g. ``run('command', in_stream=xxx)`` (or ``out_stream`` or etc; and these all default to the ``sys`` members listed above) implements a ``fileno`` method that is not returning the ID of a real terminal file descriptor. Breaking the contract in this way is what's leading Invoke to do things the OS doesn't like. We're always trying to make this detection smarter; if upgrading to the latest version of Invoke doesn't fix the problem for you, please submit a bug report including details about the values and types of ``sys.stdin/stdout/stderr``. Hopefully we'll find another heuristic we can use! invoke-2.2.0/sites/www/index.rst000066400000000000000000000053611445356551000166370ustar00rootroot00000000000000.. include:: ../../README.rst This website covers project information for Invoke such as the changelog, contribution guidelines, development roadmap, news/blog, and so forth. Detailed usage and API documentation can be found at our code documentation site, `docs.pyinvoke.org `_. Please see below for a high level intro, or the navigation on the left for the rest of the site content. What is Invoke? --------------- * Like Ruby's Rake tool and Invoke's own predecessor Fabric 1.x, it provides a clean, high level API for running shell commands and defining/organizing task functions from a ``tasks.py`` file: .. code-block:: python from invoke import task @task def clean(c, docs=False, bytecode=False, extra=''): patterns = ['build'] if docs: patterns.append('docs/_build') if bytecode: patterns.append('**/*.pyc') if extra: patterns.append(extra) for pattern in patterns: c.run("rm -rf {}".format(pattern)) @task def build(c, docs=False): c.run("python setup.py build") if docs: c.run("sphinx-build docs docs/_build") * From GNU Make, it inherits an emphasis on minimal boilerplate for common patterns and the ability to run multiple tasks in a single invocation:: $ invoke clean build * Where Fabric 1.x considered the command-line approach the default mode of use, Invoke (and tools built on it) are equally at home embedded in your own Python code or a REPL: .. testsetup:: blurb fakeout = """ Hello, this is pip Installing is fun Fake output is fake Successfully installed invocations-0.13.0 pep8-1.5.7 spec-1.3.1 """ proc = MockSubprocess(out=fakeout, exit=0) .. testcleanup:: blurb proc.stop() .. doctest:: blurb >>> from invoke import run >>> cmd = "pip install -r requirements.txt" >>> result = run(cmd, hide=True, warn=True) >>> print(result.ok) True >>> print(result.stdout.splitlines()[-1]) Successfully installed invocations-0.13.0 pep8-1.5.7 spec-1.3.1 * Following the lead of most Unix CLI applications, it offers a traditional flag-based style of command-line parsing, deriving flag names and value types from task signatures (optionally, of course!):: $ invoke clean --docs --bytecode build --docs --extra='**/*.pyo' $ invoke clean -d -b build --docs -e '**/*.pyo' $ invoke clean -db build -de '**/*.pyo' * Like many of its predecessors, it offers advanced features as well -- namespacing, task aliasing, before/after hooks, parallel execution and more. .. toctree:: :hidden: changelog FAQs installing development prior-art contact invoke-2.2.0/sites/www/installing.rst000066400000000000000000000013611445356551000176700ustar00rootroot00000000000000========== Installing ========== Basic installation ================== The recommended way to get Invoke is to **install the latest stable release** via `pip `_:: $ pip install invoke We currently support **Python 3.6+**. Users still on Python 3.5 or older are urged to upgrade. As long as you have a supported Python interpreter, **there are no other dependencies**. Invoke is pure-Python, and contains copies of its few dependencies within its source tree. .. note:: See `this blog post `_ for background on our decision to vendorize dependencies. .. seealso:: :doc:`development` for details on source control checkouts / unstable versions. invoke-2.2.0/sites/www/prior-art.rst000066400000000000000000000047511445356551000174510ustar00rootroot00000000000000========= Prior art ========= Why another task-running/subprocess-spawning Python library? As usual, the short answer is "there were already great 80-90% solutions out there, but none that fit our needs 100%." Specifically: - **Multiple tasks at once** - almost no other Python command-line oriented libraries allow for invocations like:: runner --core-opts task1 --task1-opts task2 --task2-opts and the few that do have half-baked implementations of the feature or are lacking in other ways. - **Ability to mirror and capture subprocess output simultaneously** (in addition to everything flowing from that, like the ability to transparently auto-respond) - the standard library's ``subprocess`` can't do this and most other tools choose one or the other, or have other tradeoffs such as not supporting (or *only* supporting!) pseudoterminals. - **Simplicity** - tools that try to do many things often suffer for it due to lack of focus. We wanted to build something clean and simple that just did one thing (ok...two things) well. - **Customizability/control** - Invoke was designed to work well with (and be a foundation for) other tools such as `Fabric `_'s second version, and we felt that the work needed to adapt existing tools towards this goal would impede progress. Some of the pre-existing solutions in this space in the Python world include: - `Argh `_: One of the more appealing options, but being built on argparse it doesn't support the multi-task invocation we needed. Also has its own "prior art" list which is worth your time. - `Baker `_: Nice and simple, but unfortunately too much so for our needs. - `Paver `_: Tries to do too much, clunky API, user-hostile error messages, multi-task feature existed but was lacking. - `Argparse `_: The modern gold standard for CLI parsing (albeit without command execution). Unfortunately, we were unable to get multiple tasks working despite lots of experimentation. Multiple tasks with their own potentially overlapping argument names, simply doesn't mesh with how ``argparse`` thinks about the command line. - `Click `_: is actually not pre-existing (Invoke's first public releases predate Click by a number of years) but it deserves mention anyway, as it's become popular in this particular niche. invoke-2.2.0/tasks.py000066400000000000000000000077171445356551000145310ustar00rootroot00000000000000import os from typing import TYPE_CHECKING, Optional from invoke import Collection, task, Exit from invocations import ci, checks from invocations.docs import docs, www, sites, watch_docs from invocations.pytest import coverage as coverage_, test as test_ from invocations.packaging import vendorize, release if TYPE_CHECKING: from invoke import Context @task def test( c: "Context", verbose: bool = False, color: bool = True, capture: str = "no", module: Optional[str] = None, k: Optional[str] = None, x: bool = False, opts: str = "", pty: bool = True, ) -> None: """ Run pytest. See `invocations.pytest.test` for details. This is a simple wrapper around the abovementioned task, which makes a couple minor defaults changes appropriate for this particular test suite, such as: - setting ``capture=no`` instead of ``capture=sys``, as we do a very large amount of subprocess IO testing that even the ``sys`` capture screws up - setting ``verbose=False`` because we have a large number of tests and skipping verbose output by default is a ~20% time savings.) """ # TODO: update test suite to use c.config.run.in_stream = False globally. # somehow. test_( c, verbose=verbose, color=color, capture=capture, module=module, k=k, x=x, opts=opts, pty=pty, ) # TODO: replace with invocations' once the "call truly local tester" problem is # solved (see other TODOs). For now this is just a copy/paste/modify. @task(help=test.help) # type: ignore def integration( c: "Context", opts: Optional[str] = None, pty: bool = True ) -> None: """ Run the integration test suite. May be slow! """ # Abort if no default shell on this system - implies some unusual dev # environment. Certain entirely-standalone tests will fail w/o it, even if # tests honoring config overrides (like the unit-test suite) don't. shell = c.config.global_defaults()["run"]["shell"] if not c.run("which {}".format(shell), hide=True, warn=True): err = "No {} on this system - cannot run integration tests! Try a container?" # noqa raise Exit(err.format(shell)) opts = opts or "" opts += " integration/" test(c, opts=opts, pty=pty) @task def coverage( c: "Context", report: str = "term", opts: str = "", codecov: bool = False ) -> None: """ Run pytest in coverage mode. See `invocations.pytest.coverage` for details. """ # Use our own test() instead of theirs. # Also add integration test so this always hits both. # (Not regression, since that's "weird" / doesn't really hit any new # coverage points) coverage_( c, report=report, opts=opts, tester=test, additional_testers=[integration], codecov=codecov, ) @task def regression(c: "Context", jobs: int = 8) -> None: """ Run an expensive, hard-to-test-in-pytest run() regression checker. :param int jobs: Number of jobs to run, in total. Ideally num of CPUs. """ os.chdir("integration/_support") cmd = "seq {} | parallel -n0 --halt=now,fail=1 inv -c regression check" c.run(cmd.format(jobs)) ns = Collection( test, coverage, integration, regression, vendorize, release, www, docs, sites, watch_docs, ci, checks.blacken, checks, ) ns.configure( { "blacken": { # Skip vendor, build dirs when blackening. # TODO: this is making it seem like I really do want an explicit # arg/conf-opt in the blacken task for "excluded paths"...ha "find_opts": "-and -not \( -path './invoke/vendor*' -or -path './build*' \)" # noqa }, "packaging": { "wheel": True, "check_desc": True, "changelog_file": os.path.join( www.configuration()["sphinx"]["source"], "changelog.rst" ), }, } ) invoke-2.2.0/tests/000077500000000000000000000000001445356551000141605ustar00rootroot00000000000000invoke-2.2.0/tests/_support/000077500000000000000000000000001445356551000160335ustar00rootroot00000000000000invoke-2.2.0/tests/_support/alias_sorting.py000066400000000000000000000001161445356551000212410ustar00rootroot00000000000000from invoke import task @task(aliases=("z", "a")) def toplevel(c): pass invoke-2.2.0/tests/_support/autoprint.py000066400000000000000000000005551445356551000204370ustar00rootroot00000000000000from invoke.tasks import task from invoke.collection import Collection @task def nope(c): return "You can't see this" @task(autoprint=True) def yup(c): return "It's alive!" @task(pre=[yup]) def pre_check(c): pass @task(post=[yup]) def post_check(c): pass sub = Collection("sub", yup) ns = Collection(nope, yup, pre_check, post_check, sub) invoke-2.2.0/tests/_support/branch/000077500000000000000000000000001445356551000172705ustar00rootroot00000000000000invoke-2.2.0/tests/_support/branch/explicit.py000066400000000000000000000001111445356551000214540ustar00rootroot00000000000000from invoke import task @task def lyrics(c): print("Don't swear!") invoke-2.2.0/tests/_support/branch/tasks.py000066400000000000000000000001261445356551000207660ustar00rootroot00000000000000from invoke import task @task def alt_root(c): print("Down with the alt-root!") invoke-2.2.0/tests/_support/configs/000077500000000000000000000000001445356551000174635ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/all-four/000077500000000000000000000000001445356551000212045ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/all-four/invoke.json000066400000000000000000000000561445356551000233730ustar00rootroot00000000000000{"json-only": "whee", "shared": "json-value"} invoke-2.2.0/tests/_support/configs/all-four/invoke.py000066400000000000000000000000541445356551000230500ustar00rootroot00000000000000shared = "python-value" python_only = "heh" invoke-2.2.0/tests/_support/configs/all-four/invoke.yaml000066400000000000000000000000501445356551000233560ustar00rootroot00000000000000'yaml-only': "yup" shared: "yaml-value" invoke-2.2.0/tests/_support/configs/all-four/invoke.yml000066400000000000000000000000461445356551000232220ustar00rootroot00000000000000'yml-only': "yup" shared: "yml-value" invoke-2.2.0/tests/_support/configs/collection.py000066400000000000000000000002551445356551000221720ustar00rootroot00000000000000from invoke import ctask, Collection @ctask def go(c): c.run("false") # Ensures a kaboom if mocking fails ns = Collection(go) ns.configure({"run": {"echo": True}}) invoke-2.2.0/tests/_support/configs/echo.yaml000066400000000000000000000000221445356551000212570ustar00rootroot00000000000000run: echo: true invoke-2.2.0/tests/_support/configs/json-and-python/000077500000000000000000000000001445356551000225135ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/json-and-python/invoke.json000066400000000000000000000000561445356551000247020ustar00rootroot00000000000000{"json-only": "whee", "shared": "json-value"} invoke-2.2.0/tests/_support/configs/json-and-python/invoke.py000066400000000000000000000000541445356551000243570ustar00rootroot00000000000000shared = "python-value" python_only = "heh" invoke-2.2.0/tests/_support/configs/json/000077500000000000000000000000001445356551000204345ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/json/invoke.json000066400000000000000000000000511445356551000226160ustar00rootroot00000000000000{"outer": {"inner": {"hooray": "json"}}} invoke-2.2.0/tests/_support/configs/nested/000077500000000000000000000000001445356551000207455ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/nested/invoke.yaml000066400000000000000000000000431445356551000231210ustar00rootroot00000000000000outer: inner: hooray: "yaml" invoke-2.2.0/tests/_support/configs/no-dedupe.yaml000066400000000000000000000000271445356551000222260ustar00rootroot00000000000000tasks: dedupe: false invoke-2.2.0/tests/_support/configs/no-echo.yaml000066400000000000000000000000231445356551000216720ustar00rootroot00000000000000run: echo: false invoke-2.2.0/tests/_support/configs/package/000077500000000000000000000000001445356551000210565ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/package/invoke.yml000066400000000000000000000000461445356551000230740ustar00rootroot00000000000000outer: inner: hooray: "package" invoke-2.2.0/tests/_support/configs/package/tasks/000077500000000000000000000000001445356551000222035ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/package/tasks/__init__.py000066400000000000000000000001341445356551000243120ustar00rootroot00000000000000from invoke import task @task def mytask(c): assert c.outer.inner.hooray == "package" invoke-2.2.0/tests/_support/configs/python/000077500000000000000000000000001445356551000210045ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/python/invoke.py000066400000000000000000000000501445356551000226440ustar00rootroot00000000000000outer = {"inner": {"hooray": "python"}} invoke-2.2.0/tests/_support/configs/runtime.py000066400000000000000000000001311445356551000215130ustar00rootroot00000000000000from invoke import task @task def mytask(c): assert c.outer.inner.hooray == "yaml" invoke-2.2.0/tests/_support/configs/three-of-em/000077500000000000000000000000001445356551000215735ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/three-of-em/invoke.json000066400000000000000000000000561445356551000237620ustar00rootroot00000000000000{"json-only": "whee", "shared": "json-value"} invoke-2.2.0/tests/_support/configs/three-of-em/invoke.py000066400000000000000000000000541445356551000234370ustar00rootroot00000000000000shared = "python-value" python_only = "heh" invoke-2.2.0/tests/_support/configs/three-of-em/invoke.yml000066400000000000000000000000461445356551000236110ustar00rootroot00000000000000'yml-only': "yup" shared: "yml-value" invoke-2.2.0/tests/_support/configs/underscores/000077500000000000000000000000001445356551000220175ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/underscores/invoke.yaml000066400000000000000000000000401445356551000241700ustar00rootroot00000000000000tasks: auto_dash_names: false invoke-2.2.0/tests/_support/configs/underscores/tasks.py000066400000000000000000000001041445356551000235110ustar00rootroot00000000000000from invoke import task @task def i_have_underscores(c): pass invoke-2.2.0/tests/_support/configs/yaml/000077500000000000000000000000001445356551000204255ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/yaml/explicit.py000066400000000000000000000001771445356551000226250ustar00rootroot00000000000000from invoke import task, Collection @task def mytask(c): assert c.outer.inner.hooray == "yaml" ns = Collection(mytask) invoke-2.2.0/tests/_support/configs/yaml/invoke.yaml000066400000000000000000000000431445356551000226010ustar00rootroot00000000000000outer: inner: hooray: "yaml" invoke-2.2.0/tests/_support/configs/yaml/tasks.py000066400000000000000000000001311445356551000221170ustar00rootroot00000000000000from invoke import task @task def mytask(c): assert c.outer.inner.hooray == "yaml" invoke-2.2.0/tests/_support/configs/yml/000077500000000000000000000000001445356551000202645ustar00rootroot00000000000000invoke-2.2.0/tests/_support/configs/yml/explicit.py000066400000000000000000000001761445356551000224630ustar00rootroot00000000000000from invoke import task, Collection @task def mytask(c): assert c.outer.inner.hooray == "yml" ns = Collection(mytask) invoke-2.2.0/tests/_support/configs/yml/invoke.yml000066400000000000000000000000421445356551000222760ustar00rootroot00000000000000outer: inner: hooray: "yml" invoke-2.2.0/tests/_support/configs/yml/tasks.py000066400000000000000000000001301445356551000217550ustar00rootroot00000000000000from invoke import task @task def mytask(c): assert c.outer.inner.hooray == "yml" invoke-2.2.0/tests/_support/contextualized.py000066400000000000000000000006131445356551000214470ustar00rootroot00000000000000from invoke import task @task def go(c): return c @task def check_warn(c): # default: False assert c.config.run.warn is True @task def check_pty(c): # default: False assert c.config.run.pty is True @task def check_hide(c): # default: None assert c.config.run.hide == "both" @task def check_echo(c): # default: False assert c.config.run.echo is True invoke-2.2.0/tests/_support/custom_executor.py000066400000000000000000000000711445356551000216330ustar00rootroot00000000000000from unittest.mock import Mock CustomExecutor = Mock() invoke-2.2.0/tests/_support/debugging.py000066400000000000000000000001431445356551000203360ustar00rootroot00000000000000from invoke import task from invoke.util import debug @task def foo(c): debug("my-sentinel") invoke-2.2.0/tests/_support/decorator_multi_default.py000066400000000000000000000001641445356551000233060ustar00rootroot00000000000000from invoke.tasks import task @task(default=True) def foo(c): pass @task(default=True) def biz(c): pass invoke-2.2.0/tests/_support/decorators.py000066400000000000000000000016271445356551000205600ustar00rootroot00000000000000from invoke.tasks import task @task(aliases=("bar", "otherbar")) def foo(c): """ Foo the bar. """ pass @task def foo2(c): """ Foo the bar: example code Added in 1.0 """ pass @task def foo3(c): """Foo the other bar: example code Added in 1.1 """ pass @task(default=True) def biz(c): pass @task(help={"why": "Motive", "who": "Who to punch"}) def punch(c, who, why): pass @task(positional=["pos"]) def one_positional(c, pos, nonpos): pass @task(positional=["pos1", "pos2"]) def two_positionals(c, pos1, pos2, nonpos): pass @task def implicit_positionals(c, pos1, pos2, nonpos=None): pass @task(optional=["myopt"]) def optional_values(c, myopt): pass @task(iterable=["mylist"]) def iterable_values(c, mylist=None): pass @task(incrementable=["verbose"]) def incrementable_values(c, verbose=None): pass invoke-2.2.0/tests/_support/deeper_ns_list.py000066400000000000000000000003021445356551000213770ustar00rootroot00000000000000from invoke import task, Collection @task def toplevel(c): pass @task def subtask(c): pass ns = Collection( toplevel, Collection("a", subtask, Collection("nother", subtask)) ) invoke-2.2.0/tests/_support/depth_first.py000066400000000000000000000007771445356551000207330ustar00rootroot00000000000000from invoke import task @task def clean_html(c): print("Cleaning HTML") @task def clean_tgz(c): print("Cleaning .tar.gz files") @task(clean_html, clean_tgz) def clean(c): print("Cleaned everything") @task def makedirs(c): print("Making directories") @task(clean, makedirs) def build(c): print("Building") @task def pretest(c): print("Preparing for testing") @task(pretest) def test(c): print("Testing") @task(build, post=[test]) def deploy(c): print("Deploying") invoke-2.2.0/tests/_support/docstrings.py000066400000000000000000000004241445356551000205640ustar00rootroot00000000000000from invoke import task @task def no_docstring(c): pass @task def one_line(c): """foo""" @task def two_lines(c): """foo bar """ @task def leading_whitespace(c): """ foo """ @task(aliases=("a", "b")) def with_aliases(c): """foo""" invoke-2.2.0/tests/_support/empty.py000066400000000000000000000000071445356551000175400ustar00rootroot00000000000000# Yup. invoke-2.2.0/tests/_support/empty_subcollection.py000066400000000000000000000001711445356551000224670ustar00rootroot00000000000000from invoke import task, Collection @task def dummy(c): pass ns = Collection(dummy, Collection("subcollection")) invoke-2.2.0/tests/_support/explicit_root.py000066400000000000000000000004101445356551000212640ustar00rootroot00000000000000""" EXPLICIT LYRICS """ from invoke import task, Collection @task(aliases=["other_top"]) def top_level(c): pass @task(aliases=["other_sub"], default=True) def sub_task(c): pass sub = Collection("sub_level", sub_task) ns = Collection(top_level, sub) invoke-2.2.0/tests/_support/foo.py000066400000000000000000000003421445356551000171670ustar00rootroot00000000000000from invoke.tasks import task @task def mytask(c): pass @task def basic_arg(c, arg="val"): pass @task def multiple_args(c, arg1="val1", otherarg="val2"): pass @task def basic_bool(c, mybool=True): pass invoke-2.2.0/tests/_support/has_modules.py000066400000000000000000000000431445356551000207050ustar00rootroot00000000000000# Not picklable! import os # noqa invoke-2.2.0/tests/_support/ignoreme/000077500000000000000000000000001445356551000176405ustar00rootroot00000000000000invoke-2.2.0/tests/_support/ignoreme/ignoremetoo/000077500000000000000000000000001445356551000221675ustar00rootroot00000000000000invoke-2.2.0/tests/_support/ignoreme/ignoremetoo/.no000066400000000000000000000000001445356551000225720ustar00rootroot00000000000000invoke-2.2.0/tests/_support/integration.py000066400000000000000000000012321445356551000207260ustar00rootroot00000000000000""" A semi-integration-test style fixture spanning multiple feature examples. If we're being honest, though, the new 'tree' fixture package is a lot bigger. """ from invoke.tasks import task @task def print_foo(c): print("foo") @task def print_name(c, name): print(name) @task def print_underscored_arg(c, my_option): print(my_option) @task def foo(c): print("foo") @task(foo) def bar(c): print("bar") @task def post2(c): print("post2") @task(post=[post2]) def post1(c): print("post1") @task(foo, bar, post=[post1, post2]) def biz(c): print("biz") @task(bar, foo, post=[post2, post1]) def boz(c): print("boz") invoke-2.2.0/tests/_support/namespacing.py000066400000000000000000000002751445356551000206760ustar00rootroot00000000000000from invoke import Collection, task, call from subspace import module @task def top_pre(c): pass @task(call(top_pre)) def toplevel(c): pass ns = Collection(module, toplevel) invoke-2.2.0/tests/_support/nontrivial_docstrings.py000066400000000000000000000006511445356551000230330ustar00rootroot00000000000000from invoke import task @task def no_docstring(c): pass @task def task_one(c): """ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam id dictum risus. Nulla lorem justo, sagittis in volutpat eget """ @task def task_two(c): """ Nulla eget ultrices ante. Curabitur sagittis commodo posuere. Duis dapibus facilisis, lacus et dapibus rutrum, lectus turpis egestas dui """ invoke-2.2.0/tests/_support/oops.py000066400000000000000000000000541445356551000173640ustar00rootroot00000000000000import modulethatdoesnotexistohnoes # noqa invoke-2.2.0/tests/_support/package/000077500000000000000000000000001445356551000174265ustar00rootroot00000000000000invoke-2.2.0/tests/_support/package/__init__.py000066400000000000000000000001151445356551000215340ustar00rootroot00000000000000from invoke import Collection from . import module ns = Collection(module) invoke-2.2.0/tests/_support/package/module.py000066400000000000000000000000701445356551000212620ustar00rootroot00000000000000from invoke import task @task def mytask(c): pass invoke-2.2.0/tests/_support/simple_ns_list.py000066400000000000000000000002621445356551000214310ustar00rootroot00000000000000from invoke import task, Collection @task def z_toplevel(c): pass @task def subtask(c): pass ns = Collection(z_toplevel, Collection("a", Collection("b", subtask))) invoke-2.2.0/tests/_support/subcollection_task_name.py000066400000000000000000000001251445356551000232720ustar00rootroot00000000000000from invoke import task @task(name="explicit_name") def implicit_name(c): pass invoke-2.2.0/tests/_support/subspace/000077500000000000000000000000001445356551000176405ustar00rootroot00000000000000invoke-2.2.0/tests/_support/subspace/__init__.py000066400000000000000000000001151445356551000217460ustar00rootroot00000000000000from invoke import Collection from . import module ns = Collection(module) invoke-2.2.0/tests/_support/subspace/module.py000066400000000000000000000000701445356551000214740ustar00rootroot00000000000000from invoke import task @task def mytask(c): pass invoke-2.2.0/tests/_support/sudo_prompt.py000066400000000000000000000002351445356551000207600ustar00rootroot00000000000000from invoke import task @task def expect_config(c): password = c.config.sudo.password assert password == "mypassword", "Got {!r}".format(password) invoke-2.2.0/tests/_support/tasks.py000066400000000000000000000001401445356551000175250ustar00rootroot00000000000000from invoke.tasks import task @task def foo(c): print("Hm") @task def noop(c): pass invoke-2.2.0/tests/_support/tree.json000066400000000000000000000100621445356551000176640ustar00rootroot00000000000000{ "name": "tree", "help": null, "tasks": [ { "name": "shell", "aliases": ["ipython"], "help": "Load a REPL with project state already set up." }, { "name": "test", "aliases": ["run-tests"], "help": "Run the test suite with baked-in args." } ], "default": "test", "collections": [ { "name": null, "help": "Tasks for compiling static code and assets.", "tasks": [ { "name": "all", "aliases": ["everything"], "help": "Build all necessary artifacts." }, { "name": "c-ext", "aliases": ["ext"], "help": "Build our internal C extension." }, { "name": "zap", "aliases": [], "help": "A silly way to clean." } ], "default": "all", "collections": [ { "name": "docs", "help": "Tasks for managing Sphinx docs.", "tasks": [ { "name": "all", "aliases": [], "help": "Build all doc formats." }, { "name": "html", "aliases": [], "help": "Build HTML output only." }, { "name": "pdf", "aliases": [], "help": "Build PDF output only." } ], "default": "all", "collections": [] }, { "name": "python", "help": "PyPI/etc distribution artifacts.", "tasks": [ { "name": "all", "aliases": [], "help": "Build all Python packages." }, { "name": "sdist", "aliases": [], "help": "Build classic style tar.gz." }, { "name": "wheel", "aliases": [], "help": "Build a wheel." } ], "default": "all", "collections": [] } ] }, { "name": "deploy", "help": "How to deploy our code and configs.", "tasks": [ { "name": "db", "aliases": ["db-servers"], "help": "Deploy to our database servers." }, { "name": "everywhere", "aliases": [], "help": "Deploy to all targets." }, { "name": "web", "aliases": [], "help": "Update and bounce the webservers." } ], "default": "everywhere", "collections": [] }, { "name": "provision", "help": "System setup code.", "tasks": [ { "name": "db", "aliases": [], "help": "Stand up one or more DB servers." }, { "name": "web", "aliases": [], "help": "Stand up a Web server." } ], "default": null, "collections": [] } ] } invoke-2.2.0/tests/_support/tree/000077500000000000000000000000001445356551000167725ustar00rootroot00000000000000invoke-2.2.0/tests/_support/tree/__init__.py000066400000000000000000000012151445356551000211020ustar00rootroot00000000000000from invoke import task, Collection from . import build, deploy, provision @task(aliases=["ipython"]) def shell(c): "Load a REPL with project state already set up." pass @task(aliases=["run_tests"], default=True) def test(c): "Run the test suite with baked-in args." pass # NOTE: using build's internal collection directly as a way of ensuring a # corner case (collection 'named' via local kwarg) gets tested for --list. # NOTE: Docstring cloning in effect to preserve the final organic looking # result... localbuild = build.ns localbuild.__doc__ = build.__doc__ ns = Collection(shell, test, deploy, provision, build=localbuild) invoke-2.2.0/tests/_support/tree/build/000077500000000000000000000000001445356551000200715ustar00rootroot00000000000000invoke-2.2.0/tests/_support/tree/build/__init__.py000066400000000000000000000006451445356551000222070ustar00rootroot00000000000000"Tasks for compiling static code and assets." from invoke import task, Collection from . import docs, python @task(name="all", aliases=["everything"], default=True) def all_(c): "Build all necessary artifacts." pass @task(aliases=["ext"]) def c_ext(c): "Build our internal C extension." pass @task def zap(c): "A silly way to clean." pass ns = Collection(all_, c_ext, zap, docs, python) invoke-2.2.0/tests/_support/tree/build/docs.py000066400000000000000000000004221445356551000213710ustar00rootroot00000000000000"Tasks for managing Sphinx docs." from invoke import task, Collection @task(name="all", default=True) def all_(c): "Build all doc formats." pass @task def html(c): "Build HTML output only." pass @task def pdf(c): "Build PDF output only." pass invoke-2.2.0/tests/_support/tree/build/python.py000066400000000000000000000004261445356551000217660ustar00rootroot00000000000000"PyPI/etc distribution artifacts." from invoke import task, Collection @task(name="all", default=True) def all_(c): "Build all Python packages." pass @task def sdist(c): "Build classic style tar.gz." pass @task def wheel(c): "Build a wheel." pass invoke-2.2.0/tests/_support/tree/deploy.py000066400000000000000000000004551445356551000206440ustar00rootroot00000000000000"How to deploy our code and configs." from invoke import task @task(default=True) def everywhere(c): "Deploy to all targets." pass @task(aliases=["db_servers"]) def db(c): "Deploy to our database servers." pass @task def web(c): "Update and bounce the webservers." pass invoke-2.2.0/tests/_support/tree/provision.py000066400000000000000000000002531445356551000213740ustar00rootroot00000000000000"System setup code." from invoke import task @task def db(c): "Stand up one or more DB servers." pass @task def web(c): "Stand up a Web server." pass invoke-2.2.0/tests/_util.py000066400000000000000000000231341445356551000156510ustar00rootroot00000000000000import os import sys from io import BytesIO from functools import wraps try: import termios except ImportError: # Not available on Windows termios = None from contextlib import contextmanager from unittest.mock import patch, Mock from pytest import skip from pytest_relaxed import trap from invoke import Program, Runner from invoke.terminals import WINDOWS support = os.path.join(os.path.dirname(__file__), "_support") ROOT = os.path.abspath(os.path.sep) def skip_if_windows(fn): @wraps(fn) def wrapper(*args, **kwargs): if WINDOWS: skip() return fn(*args, **kwargs) return wrapper @contextmanager def support_path(): sys.path.insert(0, support) try: yield finally: sys.path.pop(0) def load(name): with support_path(): imported = __import__(name) return imported def support_file(subpath): with open(os.path.join(support, subpath)) as fd: return fd.read() @trap def run(invocation, program=None, invoke=True): """ Run ``invocation`` via ``program``, returning output stream captures. ``program`` defaults to ``Program()``. To skip automatically assuming the argv under test starts with ``"invoke "``, say ``invoke=False``. :returns: Two-tuple of ``stdout, stderr`` strings. """ if program is None: program = Program() if invoke: invocation = "invoke {}".format(invocation) program.run(invocation, exit=False) return sys.stdout.getvalue(), sys.stderr.getvalue() def expect( invocation, out=None, err=None, program=None, invoke=True, test=None ): """ Run ``invocation`` via ``program`` and expect resulting output to match. May give one or both of ``out``/``err`` (but not neither). ``program`` defaults to ``Program()``. To skip automatically assuming the argv under test starts with ``"invoke "``, say ``invoke=False``. To customize the operator used for testing (default: equality), use ``test`` (which should be an assertion wrapper of some kind). """ stdout, stderr = run(invocation, program, invoke) # Perform tests if out is not None: if test: test(stdout, out) else: assert out == stdout if err is not None: if test: test(stderr, err) else: assert err == stderr # Guard against silent failures; since we say exit=False this is the only # real way to tell if stuff died in a manner we didn't expect. elif stderr: assert False, "Unexpected stderr: {}".format(stderr) return stdout, stderr class MockSubprocess: def __init__(self, out="", err="", exit=0, isatty=None, autostart=True): self.out_file = BytesIO(out.encode()) self.err_file = BytesIO(err.encode()) self.exit = exit self.isatty = isatty if autostart: self.start() def start(self): # Start patchin' self.popen = patch("invoke.runners.Popen") Popen = self.popen.start() self.read = patch("os.read") read = self.read.start() self.sys_stdin = patch("sys.stdin", new_callable=BytesIO) sys_stdin = self.sys_stdin.start() # Setup mocks process = Popen.return_value process.returncode = self.exit process.stdout.fileno.return_value = 1 process.stderr.fileno.return_value = 2 # If requested, mock isatty to fake out pty detection if self.isatty is not None: sys_stdin.isatty = Mock(return_value=self.isatty) def fakeread(fileno, count): fd = {1: self.out_file, 2: self.err_file}[fileno] return fd.read(count) read.side_effect = fakeread # Return the Popen mock as it's sometimes wanted inside tests return Popen def stop(self): self.popen.stop() self.read.stop() self.sys_stdin.stop() def mock_subprocess(out="", err="", exit=0, isatty=None, insert_Popen=False): def decorator(f): @wraps(f) # We have to include a @patch here to trick pytest into ignoring # the wrapped test's sometimes-there, sometimes-not mock_Popen arg. (It # explicitly "skips ahead" past what it perceives as patch args, even # though in our case those are not applying to the test function!) # Doesn't matter what we patch as long as it doesn't # actually get in our way. @patch("invoke.runners.pty") def wrapper(*args, **kwargs): proc = MockSubprocess( out=out, err=err, exit=exit, isatty=isatty, autostart=False ) Popen = proc.start() args = list(args) args.pop() # Pop the dummy patch if insert_Popen: args.append(Popen) try: f(*args, **kwargs) finally: proc.stop() return wrapper return decorator def mock_pty( out="", err="", exit=0, isatty=None, trailing_error=None, skip_asserts=False, insert_os=False, be_childish=False, os_close_error=False, ): # Windows doesn't have ptys, so all the pty tests should be # skipped anyway... if WINDOWS: return skip_if_windows def decorator(f): import fcntl ioctl_patch = patch("invoke.runners.fcntl.ioctl", wraps=fcntl.ioctl) @wraps(f) @patch("invoke.runners.pty") @patch("invoke.runners.os") @ioctl_patch def wrapper(*args, **kwargs): args = list(args) pty, os, ioctl = args.pop(), args.pop(), args.pop() # Don't actually fork, but pretend we did (with "our" pid differing # depending on be_childish) & give 'parent fd' of 3 (typically, # first allocated non-stdin/out/err FD) pty.fork.return_value = (12345 if be_childish else 0), 3 # We don't really need to care about waiting since not truly # forking/etc, so here we just return a nonzero "pid" + sentinel # wait-status value (used in some tests about WIFEXITED etc) os.waitpid.return_value = None, Mock(name="exitstatus") # Either or both of these may get called, depending... os.WEXITSTATUS.return_value = exit os.WTERMSIG.return_value = exit # If requested, mock isatty to fake out pty detection if isatty is not None: os.isatty.return_value = isatty out_file = BytesIO(out.encode()) err_file = BytesIO(err.encode()) def fakeread(fileno, count): fd = {3: out_file, 2: err_file}[fileno] ret = fd.read(count) # If asked, fake a Linux-platform trailing I/O error. if not ret and trailing_error: raise trailing_error return ret os.read.side_effect = fakeread if os_close_error: os.close.side_effect = IOError if insert_os: args.append(os) # Do the thing!!! f(*args, **kwargs) # Short-circuit if we raised an error in fakeread() if trailing_error: return # Sanity checks to make sure the stuff we mocked, actually got ran! pty.fork.assert_called_with() # Skip rest of asserts if we pretended to be the child if be_childish: return # Expect a get, and then later set, of terminal window size assert ioctl.call_args_list[0][0][1] == termios.TIOCGWINSZ assert ioctl.call_args_list[1][0][1] == termios.TIOCSWINSZ if not skip_asserts: for name in ("execve", "waitpid"): assert getattr(os, name).called # Ensure at least one of the exit status getters was called assert os.WEXITSTATUS.called or os.WTERMSIG.called # Ensure something closed the pty FD os.close.assert_called_once_with(3) return wrapper return decorator class _Dummy(Runner): """ Dummy runner subclass that does minimum work required to execute run(). It also serves as a convenient basic API checker; failure to update it to match the current Runner API will cause TypeErrors, NotImplementedErrors, and similar. """ # Neuter the input loop sleep, so tests aren't slow (at the expense of CPU, # which isn't a problem for testing). input_sleep = 0 def start(self, command, shell, env, timeout=None): pass def read_proc_stdout(self, num_bytes): return "" def read_proc_stderr(self, num_bytes): return "" def _write_proc_stdin(self, data): pass def close_proc_stdin(self): pass @property def process_is_finished(self): return True def returncode(self): return 0 @property def timed_out(self): return False # Dummy command that will blow up if it ever truly hits a real shell. _ = "nope" # Runner that fakes ^C during subprocess exec class _KeyboardInterruptingRunner(_Dummy): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._interrupted = False # Trigger KeyboardInterrupt during wait() def wait(self): if not self._interrupted: self._interrupted = True raise KeyboardInterrupt # But also, after that has been done, pretend subprocess shutdown happened # (or we will loop forever). def process_is_finished(self): return self._interrupted class OhNoz(Exception): pass invoke-2.2.0/tests/cli.py000066400000000000000000000123611445356551000153040ustar00rootroot00000000000000from invoke.collection import Collection from invoke.parser import Parser from invoke.tasks import task class CLIParsing: """ High level parsing tests """ def setup_method(self): @task(positional=[], iterable=["my_list"], incrementable=["verbose"]) def my_task( c, mystring, s, boolean=False, b=False, v=False, long_name=False, true_bool=True, _leading_underscore=False, trailing_underscore_=False, my_list=None, verbose=0, ): pass @task(aliases=["my_task27"]) def my_task2(c): pass @task(default=True) def my_task3(c, mystring): pass @task def my_task4(c, clean=False, browse=False): pass @task(aliases=["other"], default=True) def sub_task(c): pass sub_coll = Collection("sub_coll", sub_task) self.c = Collection(my_task, my_task2, my_task3, my_task4, sub_coll) def _parser(self): return Parser(self.c.to_contexts()) def _parse(self, argstr): return self._parser().parse_argv(argstr.split()) def _compare(self, invoke, flagname, value): invoke = "my-task " + invoke result = self._parse(invoke) assert result[0].args[flagname].value == value def _compare_names(self, given, real): assert self._parse(given)[0].name == real def underscored_flags_can_be_given_as_dashed(self): self._compare("--long-name", "long_name", True) def leading_underscores_are_ignored(self): self._compare("--leading-underscore", "_leading_underscore", True) def trailing_underscores_are_ignored(self): self._compare("--trailing-underscore", "trailing_underscore_", True) def inverse_boolean_flags(self): self._compare("--no-true-bool", "true_bool", False) def namespaced_task(self): self._compare_names("sub-coll.sub-task", "sub-coll.sub-task") def aliases(self): self._compare_names("my-task27", "my-task2") def subcollection_aliases(self): self._compare_names("sub-coll.other", "sub-coll.sub-task") def subcollection_default_tasks(self): self._compare_names("sub-coll", "sub-coll.sub-task") def boolean_args(self): "my-task --boolean" self._compare("--boolean", "boolean", True) def flag_then_space_then_value(self): "my-task --mystring foo" self._compare("--mystring foo", "mystring", "foo") def flag_then_equals_sign_then_value(self): "my-task --mystring=foo" self._compare("--mystring=foo", "mystring", "foo") def short_boolean_flag(self): "my-task -b" self._compare("-b", "b", True) def short_flag_then_space_then_value(self): "my-task -s value" self._compare("-s value", "s", "value") def short_flag_then_equals_sign_then_value(self): "my-task -s=value" self._compare("-s=value", "s", "value") def short_flag_with_adjacent_value(self): "my-task -svalue" r = self._parse("my-task -svalue") assert r[0].args.s.value == "value" def _flag_value_task(self, value): r = self._parse("my-task -s {} my-task2".format(value)) assert len(r) == 2 assert r[0].name == "my-task" assert r[0].args.s.value == value assert r[1].name == "my-task2" def flag_value_then_task(self): "my-task -s value my-task2" self._flag_value_task("value") def flag_value_same_as_task_name(self): "my-task -s my-task2 my-task2" self._flag_value_task("my-task2") def three_tasks_with_args(self): "my-task --boolean my-task3 --mystring foo my-task2" r = self._parse("my-task --boolean my-task3 --mystring foo my-task2") assert len(r) == 3 assert [x.name for x in r] == ["my-task", "my-task3", "my-task2"] assert r[0].args.boolean.value assert r[1].args.mystring.value == "foo" def tasks_with_duplicately_named_kwargs(self): "my-task --mystring foo my-task3 --mystring bar" r = self._parse("my-task --mystring foo my-task3 --mystring bar") assert r[0].name == "my-task" assert r[0].args.mystring.value == "foo" assert r[1].name == "my-task3" assert r[1].args.mystring.value == "bar" def multiple_short_flags_adjacent(self): "my-task -bv (and inverse)" for args in ("-bv", "-vb"): r = self._parse("my-task {}".format(args)) a = r[0].args assert a.b.value assert a.v.value def list_type_flag_can_be_given_N_times_building_a_list(self): "my-task --my-list foo --my-list bar" # Test both the singular and plural cases, just to be safe. self._compare("--my-list foo", "my-list", ["foo"]) self._compare("--my-list foo --my-list bar", "my-list", ["foo", "bar"]) def incrementable_type_flag_can_be_used_as_a_switch_or_counter(self): "my-task -v, -vv, -vvvvv etc, except with explicit --verbose" self._compare("", "verbose", 0) self._compare("--verbose", "verbose", 1) self._compare("--verbose --verbose --verbose", "verbose", 3) invoke-2.2.0/tests/collection.py000066400000000000000000001013451445356551000166710ustar00rootroot00000000000000import operator from functools import reduce from pytest import raises from invoke.collection import Collection from invoke.tasks import task, Task from _util import load, support_path @task def _mytask(c): print("woo!") def _func(c): pass class Collection_: class init: "__init__" def can_accept_task_varargs(self): "can accept tasks as *args" @task def task1(c): pass @task def task2(c): pass c = Collection(task1, task2) assert "task1" in c assert "task2" in c def can_accept_collections_as_varargs_too(self): sub = Collection("sub") ns = Collection(sub) assert ns.collections["sub"] == sub def kwargs_act_as_name_args_for_given_objects(self): sub = Collection() @task def task1(c): pass ns = Collection(loltask=task1, notsub=sub) assert ns["loltask"] == task1 assert ns.collections["notsub"] == sub def initial_string_arg_acts_as_name(self): sub = Collection("sub") ns = Collection(sub) assert ns.collections["sub"] == sub def initial_string_arg_meshes_with_varargs_and_kwargs(self): @task def task1(c): pass @task def task2(c): pass sub = Collection("sub") ns = Collection("root", task1, sub, sometask=task2) for x, y in ( (ns.name, "root"), (ns["task1"], task1), (ns.collections["sub"], sub), (ns["sometask"], task2), ): assert x == y def accepts_load_path_kwarg(self): assert Collection().loaded_from is None assert Collection(loaded_from="a/path").loaded_from == "a/path" def accepts_auto_dash_names_kwarg(self): assert Collection().auto_dash_names is True assert Collection(auto_dash_names=False).auto_dash_names is False class useful_special_methods: def _meh(self): @task def task1(c): pass @task def task2(c): pass @task def task3(c): pass submeh = Collection("submeh", task3) return Collection("meh", task1, task2, submeh) def setup_method(self): self.c = self._meh() def repr_(self): "__repr__" expected = "" assert expected == repr(self.c) def equality_consists_of_name_tasks_and_collections(self): # Truly equal assert self.c == self._meh() # Same contents, different name == not equal diffname = self._meh() diffname.name = "notmeh" assert diffname != self.c # And a sanity check that we didn't forget __ne__...cuz that # definitely happened at one point assert not diffname == self.c # Same name, same tasks, different collections == not equal diffcols = self._meh() del diffcols.collections["submeh"] assert diffcols != self.c # Same name, different tasks, same collections == not equal difftasks = self._meh() del difftasks.tasks["task1"] assert difftasks != self.c def boolean_is_equivalent_to_tasks_and_or_collections(self): # No tasks or colls? Empty/false assert not Collection() # Tasks but no colls? True @task def foo(c): pass assert Collection(foo) # Colls but no tasks: True assert Collection(foo=Collection(foo)) # TODO: whether a tree that is not "empty" but has nothing BUT # other empty collections in it, should be true or false, is kinda # questionable - but since it would result in no usable task names, # let's say it's False. (Plus this lets us just use .task_names as # the shorthand impl...) assert not Collection(foo=Collection()) class from_module: def setup_method(self): self.c = Collection.from_module(load("integration")) class parameters: def setup_method(self): self.mod = load("integration") self.from_module = Collection.from_module def name_override(self): assert self.from_module(self.mod).name == "integration" override = self.from_module(self.mod, name="not-integration") assert override.name == "not-integration" def inline_configuration(self): # No configuration given, none gotten assert self.from_module(self.mod).configuration() == {} # Config kwarg given is reflected when config obtained coll = self.from_module(self.mod, config={"foo": "bar"}) assert coll.configuration() == {"foo": "bar"} def name_and_config_simultaneously(self): # Test w/ posargs to enforce ordering, just for safety. c = self.from_module(self.mod, "the name", {"the": "config"}) assert c.name == "the name" assert c.configuration() == {"the": "config"} def auto_dash_names_passed_to_constructor(self): # Sanity assert self.from_module(self.mod).auto_dash_names is True # Test coll = self.from_module(self.mod, auto_dash_names=False) assert coll.auto_dash_names is False def adds_tasks(self): assert "print-foo" in self.c def derives_collection_name_from_module_name(self): assert self.c.name == "integration" def copies_docstring_from_module(self): expected = "A semi-integration-test style fixture spanning multiple feature examples." # noqa # Checking the first line is sufficient. assert self.c.__doc__.strip().split("\n")[0] == expected def works_great_with_subclassing(self): class MyCollection(Collection): pass c = MyCollection.from_module(load("integration")) assert isinstance(c, MyCollection) def submodule_names_are_stripped_to_last_chunk(self): with support_path(): from package import module c = Collection.from_module(module) assert module.__name__ == "package.module" assert c.name == "module" assert "mytask" in c # Sanity def honors_explicit_collections(self): coll = Collection.from_module(load("explicit_root")) assert "top-level" in coll.tasks assert "sub-level" in coll.collections # The real key test assert "sub-task" not in coll.tasks def allows_tasks_with_explicit_names_to_override_bound_name(self): coll = Collection.from_module(load("subcollection_task_name")) assert "explicit-name" in coll.tasks # not 'implicit_name' def returns_unique_Collection_objects_for_same_input_module(self): # Ignoring self.c for now, just in case it changes later. # First, a module with no root NS mod = load("integration") c1 = Collection.from_module(mod) c2 = Collection.from_module(mod) assert c1 is not c2 # Now one *with* a root NS (which was previously buggy) mod2 = load("explicit_root") c3 = Collection.from_module(mod2) c4 = Collection.from_module(mod2) assert c3 is not c4 class explicit_root_ns: def setup_method(self): mod = load("explicit_root") mod.ns.configure( { "key": "builtin", "otherkey": "yup", "subconfig": {"mykey": "myvalue"}, } ) mod.ns.name = "builtin_name" self.unchanged = Collection.from_module(mod) self.changed = Collection.from_module( mod, name="override_name", config={ "key": "override", "subconfig": {"myotherkey": "myothervalue"}, }, ) def inline_config_with_root_namespaces_overrides_builtin(self): assert self.unchanged.configuration()["key"] == "builtin" assert self.changed.configuration()["key"] == "override" def inline_config_overrides_via_merge_not_replacement(self): assert "otherkey" in self.changed.configuration() def config_override_merges_recursively(self): subconfig = self.changed.configuration()["subconfig"] assert subconfig["mykey"] == "myvalue" def inline_name_overrides_root_namespace_object_name(self): assert self.unchanged.name == "builtin-name" assert self.changed.name == "override-name" def root_namespace_object_name_overrides_module_name(self): # Duplicates part of previous test for explicitness' sake. # I.e. proves that the name doesn't end up 'explicit_root'. assert self.unchanged.name == "builtin-name" def docstring_still_copied_from_module(self): expected = "EXPLICIT LYRICS" assert self.unchanged.__doc__.strip() == expected assert self.changed.__doc__.strip() == expected class add_task: def setup_method(self): self.c = Collection() def associates_given_callable_with_given_name(self): self.c.add_task(_mytask, "foo") assert self.c["foo"] == _mytask def uses_function_name_as_implicit_name(self): self.c.add_task(_mytask) assert "_mytask" in self.c def prefers_name_kwarg_over_task_name_attr(self): self.c.add_task(Task(_func, name="notfunc"), name="yesfunc") assert "yesfunc" in self.c assert "notfunc" not in self.c def prefers_task_name_attr_over_function_name(self): self.c.add_task(Task(_func, name="notfunc")) assert "notfunc" in self.c assert "_func" not in self.c def raises_ValueError_if_no_name_found(self): # Can't use a lambda here as they are technically real functions. class Callable: def __call__(self, ctx): pass with raises(ValueError): self.c.add_task(Task(Callable())) def raises_ValueError_on_multiple_defaults(self): t1 = Task(_func, default=True) t2 = Task(_func, default=True) self.c.add_task(t1, "foo") with raises( ValueError, match=r"'bar' cannot be the default because 'foo' already is!", ): self.c.add_task(t2, "bar") def raises_ValueError_if_task_added_mirrors_subcollection_name(self): self.c.add_collection(Collection("sub")) with raises(ValueError): self.c.add_task(_mytask, "sub") def allows_specifying_task_defaultness(self): self.c.add_task(_mytask, default=True) assert self.c.default == "_mytask" def specifying_default_False_overrides_task_setting(self): @task(default=True) def its_me(c): pass self.c.add_task(its_me, default=False) assert self.c.default is None def allows_specifying_aliases(self): self.c.add_task(_mytask, aliases=("task1", "task2")) assert self.c["_mytask"] is self.c["task1"] is self.c["task2"] def aliases_are_merged(self): @task(aliases=("foo", "bar")) def biz(c): pass # NOTE: using tuple above and list below to ensure no type problems self.c.add_task(biz, aliases=["baz", "boz"]) for x in ("foo", "bar", "biz", "baz", "boz"): assert self.c[x] is self.c["biz"] class add_collection: def setup_method(self): self.c = Collection() def adds_collection_as_subcollection_of_self(self): c2 = Collection("foo") self.c.add_collection(c2) assert "foo" in self.c.collections def can_take_module_objects(self): self.c.add_collection(load("integration")) assert "integration" in self.c.collections def allows_specifying_defaultness(self): collection = Collection("foo") self.c.add_collection(collection, default=True) assert self.c.default == collection.name def raises_ValueError_if_collection_without_name(self): # Aka non-root collections must either have an explicit name given # via kwarg, have a name attribute set, or be a module with # __name__ defined. root = Collection() sub = Collection() with raises(ValueError): root.add_collection(sub) def raises_ValueError_if_collection_named_same_as_task(self): self.c.add_task(_mytask, "sub") with raises(ValueError): self.c.add_collection(Collection("sub")) def raises_ValueError_on_multiple_defaults(self): t1 = Task(_func, default=True) self.c.add_task(t1, "foo") collection = Collection("bar") with raises( ValueError, match=r"'bar' cannot be the default because 'foo' already is!", ): self.c.add_collection(collection, default=True) class getitem: "__getitem__" def setup_method(self): self.c = Collection() def finds_own_tasks_by_name(self): # TODO: duplicates an add_task test above, fix? self.c.add_task(_mytask, "foo") assert self.c["foo"] is _mytask def finds_subcollection_tasks_by_dotted_name(self): sub = Collection("sub") sub.add_task(_mytask) self.c.add_collection(sub) assert self.c["sub._mytask"] is _mytask def honors_aliases_in_own_tasks(self): task = Task(_func, aliases=["bar"]) self.c.add_task(task, "foo") assert self.c["bar"] is task def honors_subcollection_task_aliases(self): self.c.add_collection(load("decorators")) assert "decorators.bar" in self.c def honors_own_default_task_with_no_args(self): task = Task(_func, default=True) self.c.add_task(task) assert self.c[""] is task def honors_own_default_subcollection(self): task = Task(_func, default=True) sub = Collection("sub") sub.add_task(task, default=True) self.c.add_collection(sub, default=True) assert self.c[""] is task def honors_subcollection_default_tasks_on_subcollection_name(self): sub = Collection.from_module(load("decorators")) self.c.add_collection(sub) # Sanity assert self.c["decorators.biz"] is sub["biz"] # Real test assert self.c["decorators"] is self.c["decorators.biz"] def raises_ValueError_for_no_name_and_no_default(self): with raises(ValueError): self.c[""] def ValueError_for_empty_subcol_task_name_and_no_default(self): self.c.add_collection(Collection("whatever")) with raises(ValueError): self.c["whatever"] class to_contexts: def setup_method(self): @task def mytask(c, text, boolean=False, number=5): print(text) @task(aliases=["mytask27"]) def mytask2(c): pass @task(aliases=["othertask"], default=True) def subtask(c): pass sub = Collection("sub", subtask) self.c = Collection(mytask, mytask2, sub) self.contexts = self.c.to_contexts() alias_tups = [list(x.aliases) for x in self.contexts] self.aliases = reduce(operator.add, alias_tups, []) # Focus on 'mytask' as it has the more interesting sig self.context = [x for x in self.contexts if x.name == "mytask"][0] def returns_iterable_of_Contexts_corresponding_to_tasks(self): assert self.context.name == "mytask" assert len(self.contexts) == 3 class auto_dash_names: def context_names_automatically_become_dashed(self): @task def my_task(c): pass contexts = Collection(my_task).to_contexts() assert contexts[0].name == "my-task" def percolates_to_subcollection_tasks(self): @task def outer_task(c): pass @task def inner_task(c): pass coll = Collection(outer_task, inner=Collection(inner_task)) contexts = coll.to_contexts() expected = {"outer-task", "inner.inner-task"} assert {x.name for x in contexts} == expected def percolates_to_subcollection_names(self): @task def my_task(c): pass coll = Collection(inner_coll=Collection(my_task)) contexts = coll.to_contexts() assert contexts[0].name == "inner-coll.my-task" def aliases_are_dashed_too(self): @task(aliases=["hi_im_underscored"]) def whatever(c): pass contexts = Collection(whatever).to_contexts() assert "hi-im-underscored" in contexts[0].aliases def leading_and_trailing_underscores_are_not_affected(self): @task def _what_evers_(c): pass @task def _inner_cooler_(c): pass inner = Collection("inner", _inner_cooler_) contexts = Collection(_what_evers_, inner).to_contexts() expected = {"_what-evers_", "inner._inner-cooler_"} assert {x.name for x in contexts} == expected def _nested_underscores(self, auto_dash_names=None): @task(aliases=["other_name"]) def my_task(c): pass @task(aliases=["other_inner"]) def inner_task(c): pass # NOTE: explicitly not giving kwarg to subcollection; this # tests that the top-level namespace performs the inverse # transformation when necessary. sub = Collection("inner_coll", inner_task) return Collection( my_task, sub, auto_dash_names=auto_dash_names ) def honors_init_setting_on_topmost_namespace(self): coll = self._nested_underscores(auto_dash_names=False) contexts = coll.to_contexts() names = ["my_task", "inner_coll.inner_task"] aliases = [["other_name"], ["inner_coll.other_inner"]] assert sorted(x.name for x in contexts) == sorted(names) assert sorted(x.aliases for x in contexts) == sorted(aliases) def transforms_are_applied_to_explicit_module_namespaces(self): # Symptom when bug present: Collection.to_contexts() dies # because it iterates over .task_names (transformed) and then # tries to use results to access __getitem__ (no auto # transform...because in all other situations, task structure # keys are already transformed; but this wasn't the case for # from_module() with explicit 'ns' objects!) namespace = self._nested_underscores() class FakeModule: __name__ = "my_module" ns = namespace coll = Collection.from_module( FakeModule(), auto_dash_names=False ) # NOTE: underscores, not dashes expected = {"my_task", "inner_coll.inner_task"} assert {x.name for x in coll.to_contexts()} == expected def allows_flaglike_access_via_flags(self): assert "--text" in self.context.flags def positional_arglist_preserves_order_given(self): @task(positional=("second", "first")) def mytask(c, first, second, third): pass coll = Collection() coll.add_task(mytask) c = coll.to_contexts()[0] expected = [c.args["second"], c.args["first"]] assert c.positional_args == expected def exposes_namespaced_task_names(self): assert "sub.subtask" in [x.name for x in self.contexts] def exposes_namespaced_task_aliases(self): assert "sub.othertask" in self.aliases def exposes_subcollection_default_tasks(self): assert "sub" in self.aliases def exposes_aliases(self): assert "mytask27" in self.aliases class task_names: def setup_method(self): self.c = Collection.from_module(load("explicit_root")) def returns_all_task_names_including_subtasks(self): names = set(self.c.task_names.keys()) assert names == {"top-level", "sub-level.sub-task"} def includes_aliases_and_defaults_as_values(self): names = self.c.task_names assert names["top-level"] == ["other-top"] subtask_names = names["sub-level.sub-task"] assert subtask_names == ["sub-level.other-sub", "sub-level"] class configuration: "Configuration methods" def setup_method(self): self.root = Collection() self.task = Task(_func, name="task") def basic_set_and_get(self): self.root.configure({"foo": "bar"}) assert self.root.configuration() == {"foo": "bar"} def configure_performs_merging(self): self.root.configure({"foo": "bar"}) assert self.root.configuration()["foo"] == "bar" self.root.configure({"biz": "baz"}) assert set(self.root.configuration().keys()), {"foo" == "biz"} def configure_merging_is_recursive_for_nested_dicts(self): self.root.configure({"foo": "bar", "biz": {"baz": "boz"}}) self.root.configure({"biz": {"otherbaz": "otherboz"}}) c = self.root.configuration() assert c["biz"]["baz"] == "boz" assert c["biz"]["otherbaz"] == "otherboz" def configure_allows_overwriting(self): self.root.configure({"foo": "one"}) assert self.root.configuration()["foo"] == "one" self.root.configure({"foo": "two"}) assert self.root.configuration()["foo"] == "two" def call_returns_dict(self): assert self.root.configuration() == {} self.root.configure({"foo": "bar"}) assert self.root.configuration() == {"foo": "bar"} def access_merges_from_subcollections(self): inner = Collection("inner", self.task) inner.configure({"foo": "bar"}) self.root.configure({"biz": "baz"}) # With no inner collection assert set(self.root.configuration().keys()) == {"biz"} # With inner collection self.root.add_collection(inner) keys = set(self.root.configuration("inner.task").keys()) assert keys == {"foo", "biz"} def parents_overwrite_children_in_path(self): inner = Collection("inner", self.task) inner.configure({"foo": "inner"}) self.root.add_collection(inner) # Before updating root collection's config, reflects inner assert self.root.configuration("inner.task")["foo"] == "inner" self.root.configure({"foo": "outer"}) # After, reflects outer (since that now overrides) assert self.root.configuration("inner.task")["foo"] == "outer" def sibling_subcollections_ignored(self): inner = Collection("inner", self.task) inner.configure({"foo": "hi there"}) inner2 = Collection("inner2", Task(_func, name="task2")) inner2.configure({"foo": "nope"}) root = Collection(inner, inner2) assert root.configuration("inner.task")["foo"] == "hi there" assert root.configuration("inner2.task2")["foo"] == "nope" def subcollection_paths_may_be_dotted(self): leaf = Collection("leaf", self.task) leaf.configure({"key": "leaf-value"}) middle = Collection("middle", leaf) root = Collection("root", middle) config = root.configuration("middle.leaf.task") assert config == {"key": "leaf-value"} def invalid_subcollection_paths_result_in_KeyError(self): # Straight up invalid with raises(KeyError): Collection("meh").configuration("nope.task") # Exists but wrong level (should be 'root.task', not just # 'task') inner = Collection("inner", self.task) with raises(KeyError): Collection("root", inner).configuration("task") def keys_dont_have_to_exist_in_full_path(self): # Kinda duplicates earlier stuff; meh # Key only stored on leaf leaf = Collection("leaf", self.task) leaf.configure({"key": "leaf-value"}) middle = Collection("middle", leaf) root = Collection("root", middle) config = root.configuration("middle.leaf.task") assert config == {"key": "leaf-value"} # Key stored on mid + leaf but not root middle.configure({"key": "whoa"}) assert root.configuration("middle.leaf.task") == {"key": "whoa"} class subcollection_from_path: def top_level_path(self): collection = Collection.from_module(load("tree")) build = collection.collections["build"] assert collection.subcollection_from_path("build") is build def nested_path(self): collection = Collection.from_module(load("tree")) docs = collection.collections["build"].collections["docs"] assert collection.subcollection_from_path("build.docs") is docs def invalid_path(self): # This is really just testing Lexicon/dict behavior but w/e, good # to be explicit, esp if we ever want this to become Exit or # another custom exception. (For now most/all callers manually # catch KeyError and raise Exit just to keep most Exit use high up # in the stack...) with raises(KeyError): collection = Collection.from_module(load("tree")) collection.subcollection_from_path("lol.whatever.man") class serialized: def empty_collection(self): expected = dict( name=None, help=None, tasks=[], default=None, collections=[] ) assert expected == Collection().serialized() def empty_named_collection(self): expected = dict( name="foo", help=None, tasks=[], default=None, collections=[] ) assert expected == Collection("foo").serialized() def empty_named_docstringed_collection(self): expected = dict( name="foo", help="Hi doc", tasks=[], default=None, collections=[], ) coll = Collection("foo") coll.__doc__ = "Hi doc" assert expected == coll.serialized() def name_docstring_default_and_tasks(self): expected = dict( name="deploy", help="How to deploy our code and configs.", tasks=[ dict( name="db", help="Deploy to our database servers.", aliases=["db-servers"], ), dict( name="everywhere", help="Deploy to all targets.", aliases=[], ), dict( name="web", help="Update and bounce the webservers.", aliases=[], ), ], default="everywhere", collections=[], ) with support_path(): from tree import deploy coll = Collection.from_module(deploy) assert expected == coll.serialized() def name_docstring_default_tasks_and_collections(self): docs = dict( name="docs", help="Tasks for managing Sphinx docs.", tasks=[ dict( name="all", help="Build all doc formats.", aliases=[] ), dict( name="html", help="Build HTML output only.", aliases=[] ), dict( name="pdf", help="Build PDF output only.", aliases=[] ), ], default="all", collections=[], ) python = dict( name="python", help="PyPI/etc distribution artifacts.", tasks=[ dict( name="all", help="Build all Python packages.", aliases=[], ), dict( name="sdist", help="Build classic style tar.gz.", aliases=[], ), dict(name="wheel", help="Build a wheel.", aliases=[]), ], default="all", collections=[], ) expected = dict( name="build", help="Tasks for compiling static code and assets.", tasks=[ dict( name="all", help="Build all necessary artifacts.", aliases=["everything"], ), dict( name="c-ext", help="Build our internal C extension.", aliases=["ext"], ), dict(name="zap", help="A silly way to clean.", aliases=[]), ], default="all", collections=[docs, python], ) with support_path(): from tree import build coll = Collection.from_module(build) assert expected == coll.serialized() def unnamed_subcollections(self): subcoll = Collection() named_subcoll = Collection("hello") # We're binding to name 'subcoll', but subcoll itself has no .name # attribute/value, which is what's being tested. When bug present, # that fact will cause serialized() to die on sorted() when # comparing to named_subcoll (which has a string name). root = Collection(named_subcoll, subcoll=subcoll) expected = dict( name=None, default=None, help=None, tasks=[], collections=[ # Expect anonymous first since we sort them as if their # name was the empty string. dict( tasks=[], collections=[], name=None, default=None, help=None, ), dict( tasks=[], collections=[], name="hello", default=None, help=None, ), ], ) assert expected == root.serialized() invoke-2.2.0/tests/completion.py000066400000000000000000000170071445356551000167100ustar00rootroot00000000000000import os import sys from invoke import Program, task, Collection import pytest from _util import expect, trap, ROOT pytestmark = pytest.mark.usefixtures("integration") @trap def _complete(invocation, collection=None, **kwargs): colstr = "" if collection: colstr = "-c {}".format(collection) command = "inv --complete {0} -- inv {0} {1}".format(colstr, invocation) Program(**kwargs).run(command, exit=False) return sys.stdout.getvalue() # TODO: remove in favor of direct asserts, needs non shite way of getting at # stderr instead of just stdout. def _assert_contains(haystack, needle): assert needle in haystack class CompletionScriptPrinter: """ Printing the completion script """ def setup_method(self): self.prev_cwd = os.getcwd() # Chdir to system root to (hopefully) avoid any tasks.py. This will # prove that --print-completion-script works w/o nearby tasks. os.chdir(ROOT) def teardown_method(self): os.chdir(self.prev_cwd) def only_accepts_certain_shells(self): expect( "--print-completion-script", err="needed value and was not given one", test=_assert_contains, ) expect( "--print-completion-script bla", # NOTE: this needs updating when the real world changes, just like # eg our --help output tests. That's OK & better than just # reimplementing the code under test here. err='Completion for shell "bla" not supported (options are: bash, fish, zsh).', # noqa test=_assert_contains, ) def prints_for_custom_binary_names(self): out, err = expect( "myapp --print-completion-script zsh", program=Program(binary_names=["mya", "myapp"]), invoke=False, ) # Combines some sentinels from vanilla test, with checks that it's # really replacing 'invoke' with desired binary names assert "_complete_mya() {" in out assert "invoke" not in out assert " mya myapp" in out def default_binary_names_is_completing_argv_0(self): out, err = expect( "someappname --print-completion-script zsh", program=Program(binary_names=None), invoke=False, ) assert "_complete_someappname() {" in out assert " someappname" in out def bash_works(self): out, err = expect( "someappname --print-completion-script bash", invoke=False ) assert "_complete_someappname() {" in out assert "complete -F" in out for line in out.splitlines(): if line.startswith("complete -F"): assert line.endswith(" someappname") def fish_works(self): out, err = expect( "someappname --print-completion-script fish", invoke=False ) assert "function __complete_someappname" in out assert "complete --command someappname" in out class ShellCompletion: """ Shell tab-completion behavior """ def no_input_means_just_task_names(self): expect("-c simple_ns_list --complete", out="z-toplevel\na.b.subtask\n") def custom_binary_name_completes(self): expect( "myapp -c integration --complete -- ba", program=Program(binary="myapp"), invoke=False, out="bar", test=_assert_contains, ) def aliased_custom_binary_name_completes(self): for used_binary in ("my", "myapp"): expect( "{0} -c integration --complete -- ba".format(used_binary), program=Program(binary="my[app]"), invoke=False, out="bar", test=_assert_contains, ) def no_input_with_no_tasks_yields_empty_response(self): expect("-c empty --complete", out="") def task_name_completion_includes_aliases(self): for name in ("z\n", "toplevel"): assert name in _complete("", "alias_sorting") def top_level_with_dash_means_core_options(self): output = _complete("-") # No point mirroring all core options, just spot check a few for flag in ("--no-dedupe", "-d", "--debug", "-V", "--version"): assert "{}\n".format(flag) in output def bare_double_dash_shows_only_long_core_options(self): output = _complete("--") assert "--no-dedupe" in output assert "-V" not in output def task_names_only_complete_other_task_names(self): # Because only tokens starting with a dash should result in options. assert "print-name" in _complete("print-foo", "integration") def task_name_completion_includes_tasks_already_seen(self): # Because it's valid to call the same task >1 time. assert "print-foo" in _complete("print-foo", "integration") def per_task_flags_complete_with_single_dashes(self): for flag in ("--name", "-n"): assert flag in _complete("print-name -", "integration") def per_task_flags_complete_with_double_dashes(self): output = _complete("print-name --", "integration") assert "--name" in output assert "-n\n" not in output # newline because -n is in --name def flag_completion_includes_inverse_booleans(self): output = _complete("basic-bool -", "foo") assert "--no-mybool" in output def tasks_with_positional_args_complete_with_flags(self): # Because otherwise completing them is invalid anyways. # NOTE: this currently duplicates another test because this test cares # about a specific detail. output = _complete("print-name --", "integration") assert "--name" in output def core_flags_taking_values_have_no_completion_output(self): # So the shell's default completion is available. assert _complete("-f") == "" def per_task_flags_taking_values_have_no_completion_output(self): assert _complete("basic-arg --arg", "foo") == "" def core_bool_flags_have_task_name_completion(self): assert "mytask" in _complete("--echo", "foo") def per_task_bool_flags_have_task_name_completion(self): assert "mytask" in _complete("basic-bool --mybool", "foo") def core_partial_or_invalid_flags_print_all_flags(self): for flag in ("--echo", "--complete"): for given in ("--e", "--nope"): assert flag in _complete(given) def per_task_partial_or_invalid_flags_print_all_flags(self): for flag in ("--arg1", "--otherarg"): for given in ("--ar", "--nope"): completion = _complete("multiple-args {}".format(given), "foo") assert flag in completion @trap def completion_given_parser_by_program(self): # Trial case: means honoring config ignore_unknown_help option # (i.e. not exploding when any task in the target collection has unused # help keys; doesn't actually need the completion invocation to include # said task!) # NOTE: subclassing to mimic real world report case, and for ease of # shoving in a config override @task(help=dict(lol="nope")) def noboomplz(c): pass ns = Collection(noboomplz) class MyProgram(Program): def create_config(self): super().create_config() self.config.tasks.ignore_unknown_help = True MyProgram(namespace=ns).run("inv --complete -- inv noboom", exit=False) assert sys.stdout.getvalue().strip() == "noboomplz" invoke-2.2.0/tests/concurrency.py000066400000000000000000000062751445356551000170760ustar00rootroot00000000000000from queue import Queue from invoke.util import ExceptionWrapper, ExceptionHandlingThread as EHThread # TODO: rename class ExceptionHandlingThread_: class via_target: def setup_method(self): def worker(q): q.put(7) self.worker = worker def base_case(self): queue = Queue() t = EHThread(target=self.worker, args=[queue]) t.start() t.join() assert queue.get(block=False) == 7 assert queue.empty() def catches_exceptions(self): # Induce exception by submitting a bad queue obj t = EHThread(target=self.worker, args=[None]) t.start() t.join() wrapper = t.exception() assert isinstance(wrapper, ExceptionWrapper) assert wrapper.kwargs == {"args": [None], "target": self.worker} assert wrapper.type == AttributeError assert isinstance(wrapper.value, AttributeError) def exhibits_is_dead_flag(self): # Spin up a thread that will except internally (can't put() on a # None object) t = EHThread(target=self.worker, args=[None]) t.start() t.join() # Excepted -> it's dead assert t.is_dead # Spin up a happy thread that can exit peacefully (it's not "dead", # though...maybe we should change that terminology) t = EHThread(target=self.worker, args=[Queue()]) t.start() t.join() # Not dead, just uh...sleeping? assert not t.is_dead class via_subclassing: def setup_method(self): class MyThread(EHThread): def __init__(self, *args, **kwargs): self.queue = kwargs.pop("queue") super().__init__(*args, **kwargs) def _run(self): self.queue.put(7) self.klass = MyThread def base_case(self): queue = Queue() t = self.klass(queue=queue) t.start() t.join() assert queue.get(block=False) == 7 assert queue.empty() def catches_exceptions(self): # Induce exception by submitting a bad queue obj t = self.klass(queue=None) t.start() t.join() wrapper = t.exception() assert isinstance(wrapper, ExceptionWrapper) assert wrapper.kwargs == {} assert wrapper.type == AttributeError assert isinstance(wrapper.value, AttributeError) def exhibits_is_dead_flag(self): # Spin up a thread that will except internally (can't put() on a # None object) t = self.klass(queue=None) t.start() t.join() # Excepted -> it's dead assert t.is_dead # Spin up a happy thread that can exit peacefully (it's not "dead", # though...maybe we should change that terminology) t = self.klass(queue=Queue()) t.start() t.join() # Not dead, just uh...sleeping? assert not t.is_dead invoke-2.2.0/tests/config.py000066400000000000000000001260401445356551000160020ustar00rootroot00000000000000import pickle import os from os.path import join from unittest.mock import patch, call, Mock import pytest from pytest_relaxed import raises from invoke import config as config_mod # for accessing mocks from invoke.runners import Local from invoke.config import Config from invoke.exceptions import ( AmbiguousEnvVar, UncastableEnvVar, UnknownFileType, UnpicklableConfigMember, ) from _util import skip_if_windows, support pytestmark = pytest.mark.usefixtures("integration") CONFIGS_PATH = "configs" TYPES = ("yaml", "yml", "json", "python") def _load(kwarg, type_, **kwargs): path = join(CONFIGS_PATH, type_ + "/") kwargs[kwarg] = path return Config(**kwargs) class Config_: class class_attrs: # TODO: move all other non-data-bearing kwargs to this mode class prefix: def defaults_to_invoke(self): assert Config().prefix == "invoke" @patch.object(Config, "_load_yaml") def informs_config_filenames(self, load_yaml): class MyConf(Config): prefix = "other" MyConf(system_prefix="dir/") load_yaml.assert_any_call("dir/other.yaml") def informs_env_var_prefix(self): os.environ["OTHER_FOO"] = "bar" class MyConf(Config): prefix = "other" c = MyConf(defaults={"foo": "notbar"}) c.load_shell_env() assert c.foo == "bar" class file_prefix: def defaults_to_None(self): assert Config().file_prefix is None @patch.object(Config, "_load_yaml") def informs_config_filenames(self, load_yaml): class MyConf(Config): file_prefix = "other" MyConf(system_prefix="dir/") load_yaml.assert_any_call("dir/other.yaml") class env_prefix: def defaults_to_None(self): assert Config().env_prefix is None def informs_env_vars_loaded(self): os.environ["OTHER_FOO"] = "bar" class MyConf(Config): env_prefix = "other" c = MyConf(defaults={"foo": "notbar"}) c.load_shell_env() assert c.foo == "bar" class global_defaults: @skip_if_windows def basic_settings(self): # Just a catchall for what the baseline config settings should # be...for some reason we're not actually capturing all of these # reliably (even if their defaults are often implied by the tests # which override them, e.g. runner tests around warn=True, etc). expected = { "run": { "asynchronous": False, "disown": False, "dry": False, "echo": False, "echo_format": "\033[1;37m{command}\033[0m", "echo_stdin": None, "encoding": None, "env": {}, "err_stream": None, "fallback": True, "hide": None, "in_stream": None, "out_stream": None, "pty": False, "replace_env": False, "shell": "/bin/bash", "warn": False, "watchers": [], }, "runners": {"local": Local}, "sudo": { "password": None, "prompt": "[sudo] password: ", "user": None, }, "tasks": { "auto_dash_names": True, "collection_name": "tasks", "dedupe": True, "executor_class": None, "ignore_unknown_help": False, "search_root": None, }, "timeouts": {"command": None}, } assert Config.global_defaults() == expected class init: "__init__" def can_be_empty(self): assert Config().__class__ == Config # derp @patch.object(Config, "_load_yaml") def configure_global_location_prefix(self, load_yaml): # This is a bit funky but more useful than just replicating the # same test farther down? Config(system_prefix="meh/") load_yaml.assert_any_call("meh/invoke.yaml") @skip_if_windows @patch.object(Config, "_load_yaml") def default_system_prefix_is_etc(self, load_yaml): # TODO: make this work on Windows somehow without being a total # tautology? heh. Config() load_yaml.assert_any_call("/etc/invoke.yaml") @patch.object(Config, "_load_yaml") def configure_user_location_prefix(self, load_yaml): Config(user_prefix="whatever/") load_yaml.assert_any_call("whatever/invoke.yaml") @patch.object(Config, "_load_yaml") def default_user_prefix_is_homedir_plus_dot(self, load_yaml): Config() # NOTE: expects autouse fixture which patches expanduser # Make sure we called expanduser() with tilde syntax config_mod.expanduser.assert_any_call("~/.invoke.yaml") # Make sure result of that call was passed into load_yaml load_yaml.assert_any_call(config_mod.expanduser("~/.invoke.yaml")) @patch.object(Config, "_load_yaml") def configure_project_location(self, load_yaml): Config(project_location="someproject").load_project() load_yaml.assert_any_call(join("someproject", "invoke.yaml")) @patch.object(Config, "_load_yaml") def configure_runtime_path(self, load_yaml): Config(runtime_path="some/path.yaml").load_runtime() load_yaml.assert_any_call("some/path.yaml") def accepts_defaults_dict_kwarg(self): c = Config(defaults={"super": "low level"}) assert c.super == "low level" def overrides_dict_is_first_posarg(self): c = Config({"new": "data", "run": {"hide": True}}) assert c.run.hide is True # default is False assert c.run.warn is False # in global defaults, untouched assert c.new == "data" # data only present at overrides layer def overrides_dict_is_also_a_kwarg(self): c = Config(overrides={"run": {"hide": True}}) assert c.run.hide is True @patch.object(Config, "load_system") @patch.object(Config, "load_user") @patch.object(Config, "merge") def system_and_user_files_loaded_automatically( self, merge, load_u, load_s ): Config() load_s.assert_called_once_with(merge=False) load_u.assert_called_once_with(merge=False) merge.assert_called_once_with() @patch.object(Config, "load_system") @patch.object(Config, "load_user") def can_defer_loading_system_and_user_files(self, load_u, load_s): config = Config(lazy=True) assert not load_s.called assert not load_u.called # Make sure default levels are still in place! (When bug present, # i.e. merge() never called, config appears effectively empty.) assert config.run.echo is False class basic_API: "Basic API components" def can_be_used_directly_after_init(self): # No load() here... c = Config({"lots of these": "tests look similar"}) assert c["lots of these"] == "tests look similar" def allows_dict_and_attr_access(self): # TODO: combine with tests for Context probably c = Config({"foo": "bar"}) assert c.foo == "bar" assert c["foo"] == "bar" def nested_dict_values_also_allow_dual_access(self): # TODO: ditto c = Config({"foo": "bar", "biz": {"baz": "boz"}}) # Sanity check - nested doesn't somehow kill simple top level assert c.foo == "bar" assert c["foo"] == "bar" # Actual check assert c.biz.baz == "boz" assert c["biz"]["baz"] == "boz" assert c.biz["baz"] == "boz" assert c["biz"].baz == "boz" def attr_access_has_useful_error_msg(self): c = Config() try: c.nope except AttributeError as e: expected = """ No attribute or config key found for 'nope' Valid keys: ['run', 'runners', 'sudo', 'tasks', 'timeouts'] Valid real attributes: ['clear', 'clone', 'env_prefix', 'file_prefix', 'from_data', 'global_defaults', 'load_base_conf_files', 'load_collection', 'load_defaults', 'load_overrides', 'load_project', 'load_runtime', 'load_shell_env', 'load_system', 'load_user', 'merge', 'pop', 'popitem', 'prefix', 'set_project_location', 'set_runtime_path', 'setdefault', 'update'] """.strip() # noqa assert str(e) == expected else: assert False, "Didn't get an AttributeError on bad key!" def subkeys_get_merged_not_overwritten(self): # Ensures nested keys merge deeply instead of shallowly. defaults = {"foo": {"bar": "baz"}} overrides = {"foo": {"notbar": "notbaz"}} c = Config(defaults=defaults, overrides=overrides) assert c.foo.notbar == "notbaz" assert c.foo.bar == "baz" def is_iterable_like_dict(self): c = Config(defaults={"a": 1, "b": 2}) assert set(c.keys()) == {"a", "b"} assert set(list(c)) == {"a", "b"} def supports_readonly_dict_protocols(self): # Use single-keypair dict to avoid sorting problems in tests. c = Config(defaults={"foo": "bar"}) c2 = Config(defaults={"foo": "bar"}) assert "foo" in c assert "foo" in c2 # mostly just to trigger loading :x assert c == c2 assert len(c) == 1 assert c.get("foo") == "bar" assert list(c.items()) == [("foo", "bar")] assert list(c.keys()) == ["foo"] assert list(c.values()) == ["bar"] class runtime_loading_of_defaults_and_overrides: def defaults_can_be_given_via_method(self): c = Config() assert "foo" not in c c.load_defaults({"foo": "bar"}) assert c.foo == "bar" def defaults_can_skip_merging(self): c = Config() c.load_defaults({"foo": "bar"}, merge=False) assert "foo" not in c c.merge() assert c.foo == "bar" def overrides_can_be_given_via_method(self): c = Config(defaults={"foo": "bar"}) assert c.foo == "bar" # defaults level c.load_overrides({"foo": "notbar"}) assert c.foo == "notbar" # overrides level def overrides_can_skip_merging(self): c = Config() c.load_overrides({"foo": "bar"}, merge=False) assert "foo" not in c c.merge() assert c.foo == "bar" class deletion_methods: def pop(self): # Root c = Config(defaults={"foo": "bar"}) assert c.pop("foo") == "bar" assert c == {} # With the default arg assert c.pop("wut", "fine then") == "fine then" # Leaf (different key to avoid AmbiguousMergeError) c.nested = {"leafkey": "leafval"} assert c.nested.pop("leafkey") == "leafval" assert c == {"nested": {}} def delitem(self): "__delitem__" c = Config(defaults={"foo": "bar"}) del c["foo"] assert c == {} c.nested = {"leafkey": "leafval"} del c.nested["leafkey"] assert c == {"nested": {}} def delattr(self): "__delattr__" c = Config(defaults={"foo": "bar"}) del c.foo assert c == {} c.nested = {"leafkey": "leafval"} del c.nested.leafkey assert c == {"nested": {}} def clear(self): c = Config(defaults={"foo": "bar"}) c.clear() assert c == {} c.nested = {"leafkey": "leafval"} c.nested.clear() assert c == {"nested": {}} def popitem(self): c = Config(defaults={"foo": "bar"}) assert c.popitem() == ("foo", "bar") assert c == {} c.nested = {"leafkey": "leafval"} assert c.nested.popitem() == ("leafkey", "leafval") assert c == {"nested": {}} class modification_methods: def setitem(self): c = Config(defaults={"foo": "bar"}) c["foo"] = "notbar" assert c.foo == "notbar" del c["foo"] c["nested"] = {"leafkey": "leafval"} assert c == {"nested": {"leafkey": "leafval"}} def setdefault(self): c = Config({"foo": "bar", "nested": {"leafkey": "leafval"}}) assert c.setdefault("foo") == "bar" assert c.nested.setdefault("leafkey") == "leafval" assert c.setdefault("notfoo", "notbar") == "notbar" assert c.notfoo == "notbar" nested = c.nested.setdefault("otherleaf", "otherval") assert nested == "otherval" assert c.nested.otherleaf == "otherval" def update(self): c = Config( defaults={"foo": "bar", "nested": {"leafkey": "leafval"}} ) # Regular update(dict) c.update({"foo": "notbar"}) assert c.foo == "notbar" c.nested.update({"leafkey": "otherval"}) assert c.nested.leafkey == "otherval" # Apparently allowed but wholly useless c.update() expected = {"foo": "notbar", "nested": {"leafkey": "otherval"}} assert c == expected # Kwarg edition c.update(foo="otherbar") assert c.foo == "otherbar" # Iterator of 2-tuples edition c.nested.update( [("leafkey", "yetanotherval"), ("newleaf", "turnt")] ) assert c.nested.leafkey == "yetanotherval" assert c.nested.newleaf == "turnt" def reinstatement_of_deleted_values_works_ok(self): # Sounds like a stupid thing to test, but when we have to track # deletions and mutations manually...it's an easy thing to overlook c = Config(defaults={"foo": "bar"}) assert c.foo == "bar" del c["foo"] # Sanity checks assert "foo" not in c assert len(c) == 0 # Put it back again...as a different value, for funsies c.foo = "formerly bar" # And make sure it stuck assert c.foo == "formerly bar" def deleting_parent_keys_of_deleted_keys_subsumes_them(self): c = Config({"foo": {"bar": "biz"}}) del c.foo["bar"] del c.foo # Make sure we didn't somehow still end up with {'foo': {'bar': # None}} assert c._deletions == {"foo": None} def supports_mutation_via_attribute_access(self): c = Config({"foo": "bar"}) assert c.foo == "bar" c.foo = "notbar" assert c.foo == "notbar" assert c["foo"] == "notbar" def supports_nested_mutation_via_attribute_access(self): c = Config({"foo": {"bar": "biz"}}) assert c.foo.bar == "biz" c.foo.bar = "notbiz" assert c.foo.bar == "notbiz" assert c["foo"]["bar"] == "notbiz" def real_attrs_and_methods_win_over_attr_proxying(self): # Setup class MyConfig(Config): myattr = None def mymethod(self): return 7 c = MyConfig({"myattr": "foo", "mymethod": "bar"}) # By default, attr and config value separate assert c.myattr is None assert c["myattr"] == "foo" # After a setattr, same holds true c.myattr = "notfoo" assert c.myattr == "notfoo" assert c["myattr"] == "foo" # Method and config value separate assert callable(c.mymethod) assert c.mymethod() == 7 assert c["mymethod"] == "bar" # And same after setattr def monkeys(): return 13 c.mymethod = monkeys assert c.mymethod() == 13 assert c["mymethod"] == "bar" def config_itself_stored_as_private_name(self): # I.e. one can refer to a key called 'config', which is relatively # commonplace (e.g. .myservice.config -> a config file # contents or path or etc) c = Config() c["foo"] = {"bar": "baz"} c["whatever"] = {"config": "myconfig"} assert c.foo.bar == "baz" assert c.whatever.config == "myconfig" def inherited_real_attrs_also_win_over_config_keys(self): class MyConfigParent(Config): parent_attr = 17 class MyConfig(MyConfigParent): pass c = MyConfig() assert c.parent_attr == 17 c.parent_attr = 33 oops = "Oops! Looks like config won over real attr!" assert "parent_attr" not in c, oops assert c.parent_attr == 33 c["parent_attr"] = "fifteen" assert c.parent_attr == 33 assert c["parent_attr"] == "fifteen" def nonexistent_attrs_can_be_set_to_create_new_top_level_configs(self): # I.e. some_config.foo = 'bar' is like some_config['foo'] = 'bar'. # When this test breaks it usually means some_config.foo = 'bar' # sets a regular attribute - and the configuration itself is never # touched! c = Config() c.some_setting = "some_value" assert c["some_setting"] == "some_value" def nonexistent_attr_setting_works_nested_too(self): c = Config() c.a_nest = {} assert c["a_nest"] == {} c.a_nest.an_egg = True assert c["a_nest"]["an_egg"] def string_display(self): "__str__ and friends" config = Config(defaults={"foo": "bar"}) assert repr(config) == "" def merging_does_not_wipe_user_modifications_or_deletions(self): c = Config({"foo": {"bar": "biz"}, "error": True}) c.foo.bar = "notbiz" del c["error"] assert c["foo"]["bar"] == "notbiz" assert "error" not in c c.merge() # Will be back to 'biz' if user changes don't get saved on their # own (previously they are just mutations on the cached central # config) assert c["foo"]["bar"] == "notbiz" # And this would still be here, too assert "error" not in c class config_file_loading: "Configuration file loading" def system_global(self): "Systemwide conf files" # NOTE: using lazy=True to avoid autoloading so we can prove # load_system() works. for type_ in TYPES: config = _load("system_prefix", type_, lazy=True) assert "outer" not in config config.load_system() assert config.outer.inner.hooray == type_ def system_can_skip_merging(self): config = _load("system_prefix", "yml", lazy=True) assert "outer" not in config._system assert "outer" not in config config.load_system(merge=False) # Test that we loaded into the per-level dict, but not the # central/merged config. assert "outer" in config._system assert "outer" not in config def user_specific(self): "User-specific conf files" # NOTE: using lazy=True to avoid autoloading so we can prove # load_user() works. for type_ in TYPES: config = _load("user_prefix", type_, lazy=True) assert "outer" not in config config.load_user() assert config.outer.inner.hooray == type_ def user_can_skip_merging(self): config = _load("user_prefix", "yml", lazy=True) assert "outer" not in config._user assert "outer" not in config config.load_user(merge=False) # Test that we loaded into the per-level dict, but not the # central/merged config. assert "outer" in config._user assert "outer" not in config def project_specific(self): "Local-to-project conf files" for type_ in TYPES: c = Config(project_location=join(CONFIGS_PATH, type_)) assert "outer" not in c c.load_project() assert c.outer.inner.hooray == type_ def project_can_skip_merging(self): config = Config( project_location=join(CONFIGS_PATH, "yml"), lazy=True ) assert "outer" not in config._project assert "outer" not in config config.load_project(merge=False) # Test that we loaded into the per-level dict, but not the # central/merged config. assert "outer" in config._project assert "outer" not in config def loads_no_project_specific_file_if_no_project_location_given(self): c = Config() assert c._project_path is None c.load_project() assert list(c._project.keys()) == [] defaults = ["tasks", "run", "runners", "sudo", "timeouts"] assert set(c.keys()) == set(defaults) def project_location_can_be_set_after_init(self): c = Config() assert "outer" not in c c.set_project_location(join(CONFIGS_PATH, "yml")) c.load_project() assert c.outer.inner.hooray == "yml" def runtime_conf_via_cli_flag(self): c = Config(runtime_path=join(CONFIGS_PATH, "yaml", "invoke.yaml")) c.load_runtime() assert c.outer.inner.hooray == "yaml" def runtime_can_skip_merging(self): path = join(CONFIGS_PATH, "yaml", "invoke.yaml") config = Config(runtime_path=path, lazy=True) assert "outer" not in config._runtime assert "outer" not in config config.load_runtime(merge=False) # Test that we loaded into the per-level dict, but not the # central/merged config. assert "outer" in config._runtime assert "outer" not in config @raises(UnknownFileType) def unknown_suffix_in_runtime_path_raises_useful_error(self): c = Config(runtime_path=join(CONFIGS_PATH, "screw.ini")) c.load_runtime() def python_modules_dont_load_special_vars(self): "Python modules don't load special vars" # Borrow another test's Python module. c = _load("system_prefix", "python") # Sanity test that lowercase works assert c.outer.inner.hooray == "python" # Real test that builtins, etc are stripped out for special in ("builtins", "file", "package", "name", "doc"): assert "__{}__".format(special) not in c def python_modules_except_usefully_on_unpicklable_modules(self): # Re: #556; when bug present, a TypeError pops up instead (granted, # at merge time, but we want it to raise ASAP, so we're testing the # intended new behavior: raising at config load time. c = Config() c.set_runtime_path(join(support, "has_modules.py")) expected = r"'os' is a module.*giving a tasks file.*mistake" with pytest.raises(UnpicklableConfigMember, match=expected): c.load_runtime(merge=False) @patch("invoke.config.debug") def nonexistent_files_are_skipped_and_logged(self, mock_debug): c = Config() c._load_yml = Mock(side_effect=IOError(2, "aw nuts")) c.set_runtime_path("is-a.yml") # Triggers use of _load_yml c.load_runtime() mock_debug.assert_any_call("Didn't see any is-a.yml, skipping.") @raises(IOError) def non_missing_file_IOErrors_are_raised(self): c = Config() c._load_yml = Mock(side_effect=IOError(17, "uh, what?")) c.set_runtime_path("is-a.yml") # Triggers use of _load_yml c.load_runtime() class collection_level_config_loading: def performed_explicitly_and_directly(self): # TODO: do we want to update the other levels to allow 'direct' # loading like this, now that they all have explicit methods? c = Config() assert "foo" not in c c.load_collection({"foo": "bar"}) assert c.foo == "bar" def merging_can_be_deferred(self): c = Config() assert "foo" not in c._collection assert "foo" not in c c.load_collection({"foo": "bar"}, merge=False) assert "foo" in c._collection assert "foo" not in c class comparison_and_hashing: def comparison_looks_at_merged_config(self): c1 = Config(defaults={"foo": {"bar": "biz"}}) # Empty defaults to suppress global_defaults c2 = Config(defaults={}, overrides={"foo": {"bar": "biz"}}) assert c1 is not c2 assert c1._defaults != c2._defaults assert c1 == c2 def allows_comparison_with_real_dicts(self): c = Config({"foo": {"bar": "biz"}}) assert c["foo"] == {"bar": "biz"} @raises(TypeError) def is_explicitly_not_hashable(self): hash(Config()) class env_vars: "Environment variables" def base_case_defaults_to_INVOKE_prefix(self): os.environ["INVOKE_FOO"] = "bar" c = Config(defaults={"foo": "notbar"}) c.load_shell_env() assert c.foo == "bar" def non_predeclared_settings_do_not_get_consumed(self): os.environ["INVOKE_HELLO"] = "is it me you're looking for?" c = Config() c.load_shell_env() assert "HELLO" not in c assert "hello" not in c def underscores_top_level(self): os.environ["INVOKE_FOO_BAR"] = "biz" c = Config(defaults={"foo_bar": "notbiz"}) c.load_shell_env() assert c.foo_bar == "biz" def underscores_nested(self): os.environ["INVOKE_FOO_BAR"] = "biz" c = Config(defaults={"foo": {"bar": "notbiz"}}) c.load_shell_env() assert c.foo.bar == "biz" def both_types_of_underscores_mixed(self): os.environ["INVOKE_FOO_BAR_BIZ"] = "baz" c = Config(defaults={"foo_bar": {"biz": "notbaz"}}) c.load_shell_env() assert c.foo_bar.biz == "baz" @raises(AmbiguousEnvVar) def ambiguous_underscores_dont_guess(self): os.environ["INVOKE_FOO_BAR"] = "biz" c = Config(defaults={"foo_bar": "wat", "foo": {"bar": "huh"}}) c.load_shell_env() class type_casting: def strings_replaced_with_env_value(self): os.environ["INVOKE_FOO"] = "myvalue" c = Config(defaults={"foo": "myoldvalue"}) c.load_shell_env() assert c.foo == "myvalue" assert isinstance(c.foo, str) def None_replaced(self): os.environ["INVOKE_FOO"] = "something" c = Config(defaults={"foo": None}) c.load_shell_env() assert c.foo == "something" def booleans(self): for input_, result in ( ("0", False), ("1", True), ("", False), ("meh", True), ("false", True), ): os.environ["INVOKE_FOO"] = input_ c = Config(defaults={"foo": bool()}) c.load_shell_env() assert c.foo == result def boolean_type_inputs_with_non_boolean_defaults(self): for input_ in ("0", "1", "", "meh", "false"): os.environ["INVOKE_FOO"] = input_ c = Config(defaults={"foo": "bar"}) c.load_shell_env() assert c.foo == input_ def numeric_types_become_casted(self): tests = [ (int, "5", 5), (float, "5.5", 5.5), ] for old, new_, result in tests: os.environ["INVOKE_FOO"] = new_ c = Config(defaults={"foo": old()}) c.load_shell_env() assert c.foo == result def arbitrary_types_work_too(self): os.environ["INVOKE_FOO"] = "whatever" class Meh: def __init__(self, thing=None): pass old_obj = Meh() c = Config(defaults={"foo": old_obj}) c.load_shell_env() assert isinstance(c.foo, Meh) assert c.foo is not old_obj class uncastable_types: @raises(UncastableEnvVar) def _uncastable_type(self, default): os.environ["INVOKE_FOO"] = "stuff" c = Config(defaults={"foo": default}) c.load_shell_env() def lists(self): self._uncastable_type(["a", "list"]) def tuples(self): self._uncastable_type(("a", "tuple")) class hierarchy: "Config hierarchy in effect" # # NOTE: most of these just leverage existing test fixtures (which live # in their own directories & have differing values for the 'hooray' # key), since we normally don't need more than 2-3 different file # locations for any one test. # def collection_overrides_defaults(self): c = Config(defaults={"nested": {"setting": "default"}}) c.load_collection({"nested": {"setting": "collection"}}) assert c.nested.setting == "collection" def systemwide_overrides_collection(self): c = Config(system_prefix=join(CONFIGS_PATH, "yaml/")) c.load_collection({"outer": {"inner": {"hooray": "defaults"}}}) assert c.outer.inner.hooray == "yaml" def user_overrides_systemwide(self): c = Config( system_prefix=join(CONFIGS_PATH, "yaml/"), user_prefix=join(CONFIGS_PATH, "json/"), ) assert c.outer.inner.hooray == "json" def user_overrides_collection(self): c = Config(user_prefix=join(CONFIGS_PATH, "json/")) c.load_collection({"outer": {"inner": {"hooray": "defaults"}}}) assert c.outer.inner.hooray == "json" def project_overrides_user(self): c = Config( user_prefix=join(CONFIGS_PATH, "json/"), project_location=join(CONFIGS_PATH, "yaml"), ) c.load_project() assert c.outer.inner.hooray == "yaml" def project_overrides_systemwide(self): c = Config( system_prefix=join(CONFIGS_PATH, "json/"), project_location=join(CONFIGS_PATH, "yaml"), ) c.load_project() assert c.outer.inner.hooray == "yaml" def project_overrides_collection(self): c = Config(project_location=join(CONFIGS_PATH, "yaml")) c.load_project() c.load_collection({"outer": {"inner": {"hooray": "defaults"}}}) assert c.outer.inner.hooray == "yaml" def env_vars_override_project(self): os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env" c = Config(project_location=join(CONFIGS_PATH, "yaml")) c.load_project() c.load_shell_env() assert c.outer.inner.hooray == "env" def env_vars_override_user(self): os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env" c = Config(user_prefix=join(CONFIGS_PATH, "yaml/")) c.load_shell_env() assert c.outer.inner.hooray == "env" def env_vars_override_systemwide(self): os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env" c = Config(system_prefix=join(CONFIGS_PATH, "yaml/")) c.load_shell_env() assert c.outer.inner.hooray == "env" def env_vars_override_collection(self): os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env" c = Config() c.load_collection({"outer": {"inner": {"hooray": "defaults"}}}) c.load_shell_env() assert c.outer.inner.hooray == "env" def runtime_overrides_env_vars(self): os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env" c = Config(runtime_path=join(CONFIGS_PATH, "json", "invoke.json")) c.load_runtime() c.load_shell_env() assert c.outer.inner.hooray == "json" def runtime_overrides_project(self): c = Config( runtime_path=join(CONFIGS_PATH, "json", "invoke.json"), project_location=join(CONFIGS_PATH, "yaml"), ) c.load_runtime() c.load_project() assert c.outer.inner.hooray == "json" def runtime_overrides_user(self): c = Config( runtime_path=join(CONFIGS_PATH, "json", "invoke.json"), user_prefix=join(CONFIGS_PATH, "yaml/"), ) c.load_runtime() assert c.outer.inner.hooray == "json" def runtime_overrides_systemwide(self): c = Config( runtime_path=join(CONFIGS_PATH, "json", "invoke.json"), system_prefix=join(CONFIGS_PATH, "yaml/"), ) c.load_runtime() assert c.outer.inner.hooray == "json" def runtime_overrides_collection(self): c = Config(runtime_path=join(CONFIGS_PATH, "json", "invoke.json")) c.load_collection({"outer": {"inner": {"hooray": "defaults"}}}) c.load_runtime() assert c.outer.inner.hooray == "json" def cli_overrides_override_all(self): "CLI-driven overrides win vs all other layers" # TODO: expand into more explicit tests like the above? meh c = Config( overrides={"outer": {"inner": {"hooray": "overrides"}}}, runtime_path=join(CONFIGS_PATH, "json", "invoke.json"), ) c.load_runtime() assert c.outer.inner.hooray == "overrides" def yaml_prevents_yml_json_or_python(self): c = Config(system_prefix=join(CONFIGS_PATH, "all-four/")) assert "json-only" not in c assert "python_only" not in c assert "yml-only" not in c assert "yaml-only" in c assert c.shared == "yaml-value" def yml_prevents_json_or_python(self): c = Config(system_prefix=join(CONFIGS_PATH, "three-of-em/")) assert "json-only" not in c assert "python_only" not in c assert "yml-only" in c assert c.shared == "yml-value" def json_prevents_python(self): c = Config(system_prefix=join(CONFIGS_PATH, "json-and-python/")) assert "python_only" not in c assert "json-only" in c assert c.shared == "json-value" class clone: def preserves_basic_members(self): c1 = Config( defaults={"key": "default"}, overrides={"key": "override"}, system_prefix="global", user_prefix="user", project_location="project", runtime_path="runtime.yaml", ) c2 = c1.clone() # NOTE: expecting identical defaults also implicitly tests that # clone() passes in defaults= instead of doing an empty init + # copy. (When that is not the case, we end up with # global_defaults() being rerun and re-added to _defaults...) assert c2._defaults == c1._defaults assert c2._defaults is not c1._defaults assert c2._overrides == c1._overrides assert c2._overrides is not c1._overrides assert c2._system_prefix == c1._system_prefix assert c2._user_prefix == c1._user_prefix assert c2._project_prefix == c1._project_prefix assert c2.prefix == c1.prefix assert c2.file_prefix == c1.file_prefix assert c2.env_prefix == c1.env_prefix assert c2._runtime_path == c1._runtime_path def preserves_merged_config(self): c = Config( defaults={"key": "default"}, overrides={"key": "override"} ) assert c.key == "override" assert c._defaults["key"] == "default" c2 = c.clone() assert c2.key == "override" assert c2._defaults["key"] == "default" assert c2._overrides["key"] == "override" def preserves_file_data(self): c = Config(system_prefix=join(CONFIGS_PATH, "yaml/")) assert c.outer.inner.hooray == "yaml" c2 = c.clone() assert c2.outer.inner.hooray == "yaml" assert c2._system == {"outer": {"inner": {"hooray": "yaml"}}} @patch.object( Config, "_load_yaml", return_value={"outer": {"inner": {"hooray": "yaml"}}}, ) def does_not_reload_file_data(self, load_yaml): path = join(CONFIGS_PATH, "yaml/") c = Config(system_prefix=path) c2 = c.clone() assert c2.outer.inner.hooray == "yaml" # Crummy way to say "only got called with this specific invocation # one time" (since assert_calls_with gets mad about other # invocations w/ different args) calls = load_yaml.call_args_list my_call = call("{}invoke.yaml".format(path)) try: calls.remove(my_call) assert my_call not in calls except ValueError: err = "{} not found in {} even once!" assert False, err.format(my_call, calls) def preserves_env_data(self): os.environ["INVOKE_FOO"] = "bar" c = Config(defaults={"foo": "notbar"}) c.load_shell_env() c2 = c.clone() assert c2.foo == "bar" def works_correctly_when_subclassed(self): # Because sometimes, implementation #1 is really naive! class MyConfig(Config): pass c = MyConfig() assert isinstance(c, MyConfig) # sanity c2 = c.clone() assert isinstance(c2, MyConfig) # actual test class into_kwarg: "'into' kwarg" def is_not_required(self): c = Config(defaults={"meh": "okay"}) c2 = c.clone() assert c2.meh == "okay" def resulting_clones_are_typed_as_new_class(self): class MyConfig(Config): pass c = Config() c2 = c.clone(into=MyConfig) assert type(c2) is MyConfig def non_conflicting_values_are_merged(self): # NOTE: this is really just basic clone behavior. class MyConfig(Config): @staticmethod def global_defaults(): orig = Config.global_defaults() orig["new"] = {"data": "ohai"} return orig c = Config(defaults={"other": {"data": "hello"}}) c["runtime"] = {"modification": "sup"} c2 = c.clone(into=MyConfig) # New default data from MyConfig present assert c2.new.data == "ohai" # As well as old default data from the cloned instance assert c2.other.data == "hello" # And runtime user mods from the cloned instance assert c2.runtime.modification == "sup" def does_not_deepcopy(self): c = Config( defaults={ # Will merge_dicts happily "oh": {"dear": {"god": object()}}, # And shallow-copy compound values "shallow": {"objects": ["copy", "okay"]}, # Will preserve refrences to the innermost dict, sadly. Not # much we can do without incurring deepcopy problems (or # reimplementing it entirely) "welp": {"cannot": ["have", {"everything": "we want"}]}, } ) c2 = c.clone() # Basic identity assert c is not c2, "Clone had same identity as original!" # Dicts get recreated assert c.oh is not c2.oh, "Top level key had same identity!" assert ( c.oh.dear is not c2.oh.dear ), "Midlevel key had same identity!" # noqa # Basic values get copied err = "Leaf object() had same identity!" assert c.oh.dear.god is not c2.oh.dear.god, err assert c.shallow.objects == c2.shallow.objects err = "Shallow list had same identity!" assert c.shallow.objects is not c2.shallow.objects, err # Deeply nested non-dict objects are stil problematic, oh well err = "Huh, a deeply nested dict-in-a-list had different identity?" assert c.welp.cannot[1] is c2.welp.cannot[1], err err = "Huh, a deeply nested dict-in-a-list value had different identity?" # noqa assert ( c.welp.cannot[1]["everything"] is c2.welp.cannot[1]["everything"] ), err # noqa def can_be_pickled(self): c = Config(overrides={"foo": {"bar": {"biz": ["baz", "buzz"]}}}) c2 = pickle.loads(pickle.dumps(c)) assert c == c2 assert c is not c2 assert c.foo.bar.biz is not c2.foo.bar.biz # NOTE: merge_dicts has its own very low level unit tests in its own file invoke-2.2.0/tests/conftest.py000066400000000000000000000055171445356551000163670ustar00rootroot00000000000000import logging import os import sys import termios import pytest from unittest.mock import patch from _util import support # Set up icecream globally for convenience. from icecream import install install() # pytest seems to tweak logging such that Invoke's debug logs go to stderr, # which is then hella spammy if one is using --capture=no (which one must in # order to test low level terminal IO stuff, as we do!) # So, we explicitly turn default logging back down. # NOTE: no real better place to put this than here # TODO: see if we can use modern pytest's logging functionality to remove the # need for this, now that pytest-relaxed was modernized logging.basicConfig(level=logging.INFO) @pytest.fixture(autouse=True) def fake_user_home(): # Ignore any real user homedir for purpose of testing. # This allows, for example, a user who has real Invoke configs in their # homedir to still run the test suite safely. # TODO: this is still a bit of a kludge & doesn't solve systemwide configs with patch("invoke.config.expanduser", side_effect=lambda x: x): yield @pytest.fixture def reset_environ(): """ Resets `os.environ` to its prior state after the fixtured test finishes. """ old_environ = os.environ.copy() yield os.environ.clear() os.environ.update(old_environ) @pytest.fixture def chdir_support(): # Always do things relative to tests/_support os.chdir(support) yield # Chdir back to project root to avoid problems os.chdir(os.path.join(os.path.dirname(__file__), "..")) @pytest.fixture def clean_sys_modules(): """ Attempt to nix any imports incurred by the test, to prevent state bleed. In some cases this prevents outright errors (eg a test accidentally relying on another's import of a task tree in the support folder) and in others it's required because we're literally testing runtime imports. """ snapshot = sys.modules.copy() yield # Iterate over another copy to avoid ye olde mutate-during-iterate problem # NOTE: cannot simply 'sys.modules = snapshot' as that is warned against for name, module in sys.modules.copy().items(): # Delete anything newly added (imported) if name not in snapshot: del sys.modules[name] # Overwrite anything that was modified (the easy version...) sys.modules.update(snapshot) @pytest.fixture def integration(reset_environ, chdir_support, clean_sys_modules): yield @pytest.fixture def mock_termios(): with patch("invoke.terminals.termios") as mocked: # Ensure mocked termios has 'real' values for constants...otherwise # doing bit arithmetic on Mocks kinda defeats the point. mocked.ECHO = termios.ECHO mocked.ICANON = termios.ICANON mocked.VMIN = termios.VMIN mocked.VTIME = termios.VTIME yield mocked invoke-2.2.0/tests/context.py000066400000000000000000000717601445356551000162310ustar00rootroot00000000000000import os import pickle import re import sys from unittest.mock import patch, Mock, call from pytest_relaxed import trap from pytest import skip, raises, mark from invoke import ( AuthFailure, Context, Config, FailingResponder, ResponseNotAccepted, StreamWatcher, MockContext, Result, ) from _util import mock_subprocess, _Dummy local_path = "invoke.config.Local" _escaped_prompt = re.escape(Config().sudo.prompt) class Context_: class init: "__init__" def takes_optional_config_arg(self): # Meh-tastic doesn't-barf tests. MEH. Context() Context(config={"foo": "bar"}) class methods_exposed: def _expect_attr(self, attr): c = Context() assert hasattr(c, attr) and callable(getattr(c, attr)) class run: # NOTE: actual behavior of command running is tested in runners.py def exists(self): self._expect_attr("run") @patch(local_path) def defaults_to_Local(self, Local): c = Context() c.run("foo") assert Local.mock_calls == [call(c), call().run("foo")] def honors_runner_config_setting(self): runner_class = Mock() config = Config({"runners": {"local": runner_class}}) c = Context(config) c.run("foo") assert runner_class.mock_calls == [call(c), call().run("foo")] def sudo(self): self._expect_attr("sudo") class configuration_proxy: "Dict-like proxy for self.config" def setup_method(self): config = Config(defaults={"foo": "bar", "biz": {"baz": "boz"}}) self.c = Context(config=config) def direct_access_allowed(self): assert self.c.config.__class__ == Config assert self.c.config["foo"] == "bar" assert self.c.config.foo == "bar" def config_attr_may_be_overwritten_at_runtime(self): new_config = Config(defaults={"foo": "notbar"}) self.c.config = new_config assert self.c.foo == "notbar" def getitem(self): "___getitem__" assert self.c["foo"] == "bar" assert self.c["biz"]["baz"] == "boz" def getattr(self): "__getattr__" assert self.c.foo == "bar" assert self.c.biz.baz == "boz" def get(self): assert self.c.get("foo") == "bar" assert self.c.get("nope", "wut") == "wut" assert self.c.biz.get("nope", "hrm") == "hrm" def pop(self): assert self.c.pop("foo") == "bar" assert self.c.pop("foo", "notbar") == "notbar" assert self.c.biz.pop("baz") == "boz" def popitem(self): assert self.c.biz.popitem() == ("baz", "boz") del self.c["biz"] assert self.c.popitem() == ("foo", "bar") assert self.c.config == {} def del_(self): "del" del self.c["foo"] del self.c["biz"]["baz"] assert self.c.biz == {} del self.c["biz"] assert self.c.config == {} def clear(self): self.c.biz.clear() assert self.c.biz == {} self.c.clear() assert self.c.config == {} def setdefault(self): assert self.c.setdefault("foo") == "bar" assert self.c.biz.setdefault("baz") == "boz" assert self.c.setdefault("notfoo", "notbar") == "notbar" assert self.c.notfoo == "notbar" assert self.c.biz.setdefault("otherbaz", "otherboz") == "otherboz" assert self.c.biz.otherbaz == "otherboz" def update(self): self.c.update({"newkey": "newval"}) assert self.c["newkey"] == "newval" assert self.c.foo == "bar" self.c.biz.update(otherbaz="otherboz") assert self.c.biz.otherbaz == "otherboz" class cwd: def setup_method(self): self.c = Context() def simple(self): self.c.command_cwds = ["a", "b"] assert self.c.cwd == os.path.join("a", "b") def nested_absolute_path(self): self.c.command_cwds = ["a", "/b", "c"] assert self.c.cwd == os.path.join("/b", "c") def multiple_absolute_paths(self): self.c.command_cwds = ["a", "/b", "c", "/d", "e"] assert self.c.cwd == os.path.join("/d", "e") def home(self): self.c.command_cwds = ["a", "~b", "c"] assert self.c.cwd == os.path.join("~b", "c") class cd: _escaped_prompt = re.escape(Config().sudo.prompt) @patch(local_path) def should_apply_to_run(self, Local): runner = Local.return_value c = Context() with c.cd("foo"): c.run("whoami") cmd = "cd foo && whoami" assert runner.run.called, "run() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def should_apply_to_sudo(self, Local): runner = Local.return_value c = Context() with c.cd("foo"): c.sudo("whoami") cmd = "sudo -S -p '[sudo] password: ' cd foo && whoami" assert runner.run.called, "sudo() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def should_occur_before_prefixes(self, Local): runner = Local.return_value c = Context() with c.prefix("source venv"): with c.cd("foo"): c.run("whoami") cmd = "cd foo && source venv && whoami" assert runner.run.called, "run() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def should_use_finally_to_revert_changes_on_exceptions(self, Local): class Oops(Exception): pass runner = Local.return_value c = Context() try: with c.cd("foo"): c.run("whoami") assert runner.run.call_args[0][0] == "cd foo && whoami" raise Oops except Oops: pass c.run("ls") # When bug present, this would be "cd foo && ls" assert runner.run.call_args[0][0] == "ls" @patch(local_path) def cd_should_accept_any_stringable_object(self, Local): class Path: def __init__(self, value): self.value = value def __str__(self): return self.value runner = Local.return_value c = Context() with c.cd(Path("foo")): c.run("whoami") cmd = "cd foo && whoami" assert runner.run.call_args[0][0] == cmd class prefix: @patch(local_path) def prefixes_should_apply_to_run(self, Local): runner = Local.return_value c = Context() with c.prefix("cd foo"): c.run("whoami") cmd = "cd foo && whoami" assert runner.run.called, "run() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def prefixes_should_apply_to_sudo(self, Local): runner = Local.return_value c = Context() with c.prefix("cd foo"): c.sudo("whoami") cmd = "sudo -S -p '[sudo] password: ' cd foo && whoami" assert runner.run.called, "sudo() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def nesting_should_retain_order(self, Local): runner = Local.return_value c = Context() with c.prefix("cd foo"): with c.prefix("cd bar"): c.run("whoami") cmd = "cd foo && cd bar && whoami" assert ( runner.run.called ), "run() never called runner.run()!" # noqa assert runner.run.call_args[0][0] == cmd c.run("whoami") cmd = "cd foo && whoami" assert runner.run.called, "run() never called runner.run()!" assert runner.run.call_args[0][0] == cmd # also test that prefixes do not persist c.run("whoami") cmd = "whoami" assert runner.run.called, "run() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def should_use_finally_to_revert_changes_on_exceptions(self, Local): class Oops(Exception): pass runner = Local.return_value c = Context() try: with c.prefix("cd foo"): c.run("whoami") assert runner.run.call_args[0][0] == "cd foo && whoami" raise Oops except Oops: pass c.run("ls") # When bug present, this would be "cd foo && ls" assert runner.run.call_args[0][0] == "ls" class sudo: @patch(local_path) def prefixes_command_with_sudo(self, Local): runner = Local.return_value Context().sudo("whoami") # NOTE: implicitly tests default sudo.prompt conf value cmd = "sudo -S -p '[sudo] password: ' whoami" assert runner.run.called, "sudo() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def optional_user_argument_adds_u_and_H_flags(self, Local): runner = Local.return_value Context().sudo("whoami", user="rando") cmd = "sudo -S -p '[sudo] password: ' -H -u rando whoami" assert runner.run.called, "sudo() never called runner.run()!" assert runner.run.call_args[0][0] == cmd @patch(local_path) def honors_config_for_user_value(self, Local): runner = Local.return_value config = Config(overrides={"sudo": {"user": "rando"}}) Context(config=config).sudo("whoami") cmd = "sudo -S -p '[sudo] password: ' -H -u rando whoami" assert runner.run.call_args[0][0] == cmd @patch(local_path) def user_kwarg_wins_over_config(self, Local): runner = Local.return_value config = Config(overrides={"sudo": {"user": "rando"}}) Context(config=config).sudo("whoami", user="calrissian") cmd = "sudo -S -p '[sudo] password: ' -H -u calrissian whoami" assert runner.run.call_args[0][0] == cmd @trap @mock_subprocess() def echo_hides_extra_sudo_flags(self): skip() # see TODO in sudo() re: clean output display config = Config(overrides={"runner": _Dummy}) Context(config=config).sudo("nope", echo=True) output = sys.stdout.getvalue() sys.__stderr__.write(repr(output) + "\n") assert "-S" not in output assert Context().sudo.prompt not in output assert "sudo nope" in output @patch(local_path) def honors_config_for_prompt_value(self, Local): runner = Local.return_value config = Config(overrides={"sudo": {"prompt": "FEED ME: "}}) Context(config=config).sudo("whoami") cmd = "sudo -S -p 'FEED ME: ' whoami" assert runner.run.call_args[0][0] == cmd def prompt_value_is_properly_shell_escaped(self): # I.e. setting it to "here's johnny!" doesn't explode. # NOTE: possibly best to tie into issue #2 skip() @patch(local_path) def explicit_env_vars_are_preserved(self, Local): runner = Local.return_value Context().sudo( "whoami", env={"GRATUITOUS_ENVIRONMENT_VARIABLE": "arbitrary value"}, ) assert ( "--preserve-env='GRATUITOUS_ENVIRONMENT_VARIABLE'" in runner.run.call_args[0][0] ) def _expect_responses(self, expected, config=None, kwargs=None): """ Execute mocked sudo(), expecting watchers= kwarg in its run(). * expected: list of 2-tuples of FailingResponder prompt/response * config: Config object, if an overridden one is needed * kwargs: sudo() kwargs, if needed """ if kwargs is None: kwargs = {} Local = Mock() runner = Local.return_value context = Context(config=config) if config else Context() context.config.runners.local = Local context.sudo("whoami", **kwargs) # Tease out the interesting bits - pattern/response - ignoring the # sentinel, etc for now. prompt_responses = [ (watcher.pattern, watcher.response) for watcher in runner.run.call_args[1]["watchers"] ] assert prompt_responses == expected def autoresponds_with_password_kwarg(self): # NOTE: technically duplicates the unitty test(s) in watcher tests. expected = [(_escaped_prompt, "secret\n")] self._expect_responses(expected, kwargs={"password": "secret"}) def honors_configured_sudo_password(self): config = Config(overrides={"sudo": {"password": "secret"}}) expected = [(_escaped_prompt, "secret\n")] self._expect_responses(expected, config=config) def sudo_password_kwarg_wins_over_config(self): config = Config(overrides={"sudo": {"password": "notsecret"}}) kwargs = {"password": "secret"} expected = [(_escaped_prompt, "secret\n")] self._expect_responses(expected, config=config, kwargs=kwargs) class auto_response_merges_with_other_responses: def setup_method(self): class DummyWatcher(StreamWatcher): def submit(self, stream): pass self.watcher_klass = DummyWatcher @patch(local_path) def kwarg_only_adds_to_kwarg(self, Local): runner = Local.return_value context = Context() watcher = self.watcher_klass() context.sudo("whoami", watchers=[watcher]) # When sudo() called w/ user-specified watchers, we add ours to # that list watchers = runner.run.call_args[1]["watchers"] # Will raise ValueError if not in the list watchers.remove(watcher) # Only remaining item in list should be our sudo responder assert len(watchers) == 1 assert isinstance(watchers[0], FailingResponder) assert watchers[0].pattern == _escaped_prompt @patch(local_path) def config_only(self, Local): runner = Local.return_value # Set a config-driven list of watchers watcher = self.watcher_klass() overrides = {"run": {"watchers": [watcher]}} config = Config(overrides=overrides) Context(config=config).sudo("whoami") # Expect that sudo() extracted that config value & put it into # the kwarg level. (See comment in sudo() about why...) watchers = runner.run.call_args[1]["watchers"] # Will raise ValueError if not in the list watchers.remove(watcher) # Only remaining item in list should be our sudo responder assert len(watchers) == 1 assert isinstance(watchers[0], FailingResponder) assert watchers[0].pattern == _escaped_prompt @patch(local_path) def config_use_does_not_modify_config(self, Local): runner = Local.return_value watcher = self.watcher_klass() overrides = {"run": {"watchers": [watcher]}} config = Config(overrides=overrides) Context(config=config).sudo("whoami") # Here, 'watchers' is _the same object_ as was passed into # run(watchers=...). watchers = runner.run.call_args[1]["watchers"] # We want to make sure that what's in the config we just # generated, is untouched by the manipulation done inside # sudo(). # First, that they aren't the same obj err = "Found sudo() reusing config watchers list directly!" assert watchers is not config.run.watchers, err # And that the list is as it was before (i.e. it is not both # our watcher and the sudo()-added one) err = "Our config watchers list was modified!" assert config.run.watchers == [watcher], err @patch(local_path) def both_kwarg_and_config(self, Local): runner = Local.return_value # Set a config-driven list of watchers conf_watcher = self.watcher_klass() overrides = {"run": {"watchers": [conf_watcher]}} config = Config(overrides=overrides) # AND supply a DIFFERENT kwarg-driven list of watchers kwarg_watcher = self.watcher_klass() Context(config=config).sudo("whoami", watchers=[kwarg_watcher]) # Expect that the kwarg watcher and our internal one were the # final result. watchers = runner.run.call_args[1]["watchers"] # Will raise ValueError if not in the list. .remove() uses # identity testing, so two instances of self.watcher_klass will # be different values here. watchers.remove(kwarg_watcher) # Only remaining item in list should be our sudo responder assert len(watchers) == 1 assert conf_watcher not in watchers # Extra sanity assert isinstance(watchers[0], FailingResponder) assert watchers[0].pattern == _escaped_prompt @patch(local_path) def passes_through_other_run_kwargs(self, Local): runner = Local.return_value Context().sudo( "whoami", echo=True, warn=False, hide=True, encoding="ascii" ) assert runner.run.called, "sudo() never called runner.run()!" kwargs = runner.run.call_args[1] assert kwargs["echo"] is True assert kwargs["warn"] is False assert kwargs["hide"] is True assert kwargs["encoding"] == "ascii" @patch(local_path) def returns_run_result(self, Local): runner = Local.return_value expected = runner.run.return_value result = Context().sudo("whoami") err = "sudo() did not return run()'s return value!" assert result is expected, err @mock_subprocess(out="something", exit=None) def raises_auth_failure_when_failure_detected(self): with patch("invoke.context.FailingResponder") as klass: unacceptable = Mock(side_effect=ResponseNotAccepted) klass.return_value.submit = unacceptable excepted = False try: config = Config(overrides={"sudo": {"password": "nope"}}) Context(config=config).sudo("meh", hide=True) except AuthFailure as e: # Basic sanity checks; most of this is really tested in # Runner tests. assert e.result.exited is None expected = "The password submitted to prompt '[sudo] password: ' was rejected." # noqa assert str(e) == expected excepted = True # Can't use except/else as that masks other real exceptions, # such as incorrectly unhandled ThreadErrors if not excepted: assert False, "Did not raise AuthFailure!" def can_be_pickled(self): c = Context() c.foo = {"bar": {"biz": ["baz", "buzz"]}} c2 = pickle.loads(pickle.dumps(c)) assert c == c2 assert c is not c2 assert c.foo.bar.biz is not c2.foo.bar.biz class MockContext_: def init_still_acts_like_superclass_init(self): # No required args assert isinstance(MockContext().config, Config) config = Config(overrides={"foo": "bar"}) # Posarg assert MockContext(config).config is config # Kwarg assert MockContext(config=config).config is config def non_config_init_kwargs_used_as_return_values_for_methods(self): c = MockContext(run=Result("some output")) assert c.run("doesn't mattress").stdout == "some output" def return_value_kwargs_can_take_iterables_too(self): c = MockContext(run=(Result("some output"), Result("more!"))) assert c.run("doesn't mattress").stdout == "some output" assert c.run("still doesn't mattress").stdout == "more!" def return_value_kwargs_may_be_command_string_maps(self): c = MockContext(run={"foo": Result("bar")}) assert c.run("foo").stdout == "bar" def return_value_map_kwargs_may_take_iterables_too(self): c = MockContext(run={"foo": (Result("bar"), Result("biz"))}) assert c.run("foo").stdout == "bar" assert c.run("foo").stdout == "biz" def regexen_return_value_map_keys_match_on_command(self): c = MockContext( run={"string": Result("yup"), re.compile(r"foo.*"): Result("bar")} ) assert c.run("string").stdout == "yup" assert c.run("foobar").stdout == "bar" class boolean_result_shorthand: def as_singleton_args(self): assert MockContext(run=True).run("anything").ok assert not MockContext(run=False).run("anything", warn=True).ok def as_iterables(self): mc = MockContext(run=[True, False]) assert mc.run("anything").ok assert not mc.run("anything", warn=True).ok def as_dict_values(self): mc = MockContext(run=dict(foo=True, bar=False)) assert mc.run("foo").ok assert not mc.run("bar", warn=True).ok class string_result_shorthand: def as_singleton_args(self): assert MockContext(run="foo").run("anything").stdout == "foo" def as_iterables(self): mc = MockContext(run=["definition", "of", "insanity"]) assert mc.run("anything").stdout == "definition" assert mc.run("anything").stdout == "of" assert mc.run("anything").stdout == "insanity" def as_dict_values(self): mc = MockContext(run=dict(foo="foo", bar="bar")) assert mc.run("foo").stdout == "foo" assert mc.run("bar").stdout == "bar" class commands_injected_into_Result: @mark.parametrize( "kwargs", (dict(), dict(command=""), dict(command=None)) ) def when_not_set_or_falsey(self, kwargs): c = MockContext(run={"foo": Result("bar", **kwargs)}) assert c.run("foo").command == "foo" def does_not_occur_when_truthy(self): # Not sure why you'd want this but whatevs! c = MockContext(run={"foo": Result("bar", command="nope")}) assert c.run("foo").command == "nope" # not "bar" def methods_with_no_kwarg_values_raise_NotImplementedError(self): with raises(NotImplementedError): MockContext().run("onoz I did not anticipate this would happen") def does_not_consume_results_by_default(self): mc = MockContext( run=dict( singleton=True, # will repeat wassup=Result("yo"), # ditto iterable=[Result("tick"), Result("tock")], # will not ), ) assert mc.run("singleton").ok assert mc.run("singleton").ok # not consumed assert mc.run("wassup").ok assert mc.run("wassup").ok # not consumed assert mc.run("iterable").stdout == "tick" assert mc.run("iterable").stdout == "tock" assert mc.run("iterable").stdout == "tick" # not consumed assert mc.run("iterable").stdout == "tock" def consumes_singleton_results_when_repeat_False(self): mc = MockContext( repeat=False, run=dict( singleton=True, wassup=Result("yo"), iterable=[Result("tick"), Result("tock")], ), ) assert mc.run("singleton").ok with raises(NotImplementedError): # was consumed mc.run("singleton") assert mc.run("wassup").ok with raises(NotImplementedError): # was consumed mc.run("wassup") assert mc.run("iterable").stdout == "tick" assert mc.run("iterable").stdout == "tock" with raises(NotImplementedError): # was consumed assert mc.run("iterable") def sudo_also_covered(self): c = MockContext(sudo=Result(stderr="super duper")) assert c.sudo("doesn't mattress").stderr == "super duper" try: MockContext().sudo("meh") except NotImplementedError as e: assert str(e) == "meh" else: assert False, "Did not get a NotImplementedError for sudo!" class exhausted_nonrepeating_return_values_also_raise_NotImplementedError: def _expect_NotImplementedError(self, context): context.run("something") try: context.run("something") except NotImplementedError as e: assert str(e) == "something" else: assert False, "Didn't raise NotImplementedError" def single_value(self): self._expect_NotImplementedError( MockContext(run=Result("meh"), repeat=False) ) def iterable(self): self._expect_NotImplementedError( MockContext(run=[Result("meh")], repeat=False) ) def mapping_to_single_value(self): self._expect_NotImplementedError( MockContext(run={"something": Result("meh")}, repeat=False) ) def mapping_to_iterable(self): self._expect_NotImplementedError( MockContext(run={"something": [Result("meh")]}, repeat=False) ) def unexpected_kwarg_type_yields_TypeError(self): with raises(TypeError): MockContext(run=123) class can_modify_return_value_maps_after_instantiation: class non_dict_type_instantiation_values_yield_TypeErrors: class no_stored_result: def run(self): mc = MockContext() with raises(TypeError): mc.set_result_for("run", "whatever", Result("bar")) def sudo(self): mc = MockContext() with raises(TypeError): mc.set_result_for("sudo", "whatever", Result("bar")) class single_result: def run(self): mc = MockContext(run=Result("foo")) with raises(TypeError): mc.set_result_for("run", "whatever", Result("bar")) def sudo(self): mc = MockContext(sudo=Result("foo")) with raises(TypeError): mc.set_result_for("sudo", "whatever", Result("bar")) class iterable_result: def run(self): mc = MockContext(run=[Result("foo")]) with raises(TypeError): mc.set_result_for("run", "whatever", Result("bar")) def sudo(self): mc = MockContext(sudo=[Result("foo")]) with raises(TypeError): mc.set_result_for("sudo", "whatever", Result("bar")) def run(self): mc = MockContext(run={"foo": Result("bar")}) assert mc.run("foo").stdout == "bar" mc.set_result_for("run", "foo", Result("biz")) assert mc.run("foo").stdout == "biz" def sudo(self): mc = MockContext(sudo={"foo": Result("bar")}) assert mc.sudo("foo").stdout == "bar" mc.set_result_for("sudo", "foo", Result("biz")) assert mc.sudo("foo").stdout == "biz" def wraps_run_and_sudo_with_Mock(self, clean_sys_modules): sys.modules["mock"] = None # legacy sys.modules["unittest.mock"] = Mock(Mock=Mock) # buffalo buffalo mc = MockContext( run={"foo": Result("bar")}, sudo={"foo": Result("bar")} ) assert isinstance(mc.run, Mock) assert isinstance(mc.sudo, Mock) invoke-2.2.0/tests/executor.py000066400000000000000000000301771445356551000164000ustar00rootroot00000000000000from unittest.mock import Mock import pytest from invoke import Collection, Config, Context, Executor, Task, call, task from invoke.parser import ParserContext, ParseResult from _util import expect # TODO: why does this not work as a decorator? probably relaxed's fault - but # how? pytestmark = pytest.mark.usefixtures("integration") class Executor_: def setup_method(self): self.task1 = Task(Mock(return_value=7)) self.task2 = Task(Mock(return_value=10), pre=[self.task1]) self.task3 = Task(Mock(), pre=[self.task1]) self.task4 = Task(Mock(return_value=15), post=[self.task1]) self.contextualized = Task(Mock()) coll = Collection() coll.add_task(self.task1, name="task1") coll.add_task(self.task2, name="task2") coll.add_task(self.task3, name="task3") coll.add_task(self.task4, name="task4") coll.add_task(self.contextualized, name="contextualized") self.executor = Executor(collection=coll) class init: "__init__" def allows_collection_and_config(self): coll = Collection() conf = Config() e = Executor(collection=coll, config=conf) assert e.collection is coll assert e.config is conf def uses_blank_config_by_default(self): e = Executor(collection=Collection()) assert isinstance(e.config, Config) def can_grant_access_to_core_arg_parse_result(self): c = ParseResult([ParserContext(name="mytask")]) e = Executor(collection=Collection(), core=c) assert e.core is c # Sanity test of real-world access/usage assert len(e.core) == 1 assert e.core[0].name == "mytask" assert len(e.core[0].args) == 0 def core_arg_parse_result_defaults_to_None(self): assert Executor(collection=Collection()).core is None class execute: def base_case(self): self.executor.execute("task1") assert self.task1.body.called def kwargs(self): k = {"foo": "bar"} self.executor.execute(("task1", k)) args = self.task1.body.call_args[0] kwargs = self.task1.body.call_args[1] assert isinstance(args[0], Context) assert len(args) == 1 assert kwargs["foo"] == "bar" def contextualized_tasks_are_given_parser_context_arg(self): self.executor.execute("contextualized") args = self.contextualized.body.call_args[0] assert len(args) == 1 assert isinstance(args[0], Context) def default_tasks_called_when_no_tasks_specified(self): # NOTE: when no tasks AND no default, Program will print global # help. We just won't do anything at all, which is fine for now. task = Task(Mock("default-task")) coll = Collection() coll.add_task(task, name="mytask", default=True) executor = Executor(collection=coll) executor.execute() args = task.body.call_args[0] assert isinstance(args[0], Context) assert len(args) == 1 class basic_pre_post: "basic pre/post task functionality" def pre_tasks(self): self.executor.execute("task2") assert self.task1.body.call_count == 1 def post_tasks(self): self.executor.execute("task4") assert self.task1.body.call_count == 1 def calls_default_to_empty_args_always(self): pre_body, post_body = Mock(), Mock() t1 = Task(pre_body) t2 = Task(post_body) t3 = Task(Mock(), pre=[t1], post=[t2]) e = Executor(collection=Collection(t1=t1, t2=t2, t3=t3)) e.execute(("t3", {"something": "meh"})) for body in (pre_body, post_body): args = body.call_args[0] assert len(args) == 1 assert isinstance(args[0], Context) def _call_objs(self): # Setup pre_body, post_body = Mock(), Mock() t1 = Task(pre_body) t2 = Task(post_body) t3 = Task( Mock(), pre=[call(t1, 5, foo="bar")], post=[call(t2, 7, biz="baz")], ) c = Collection(t1=t1, t2=t2, t3=t3) e = Executor(collection=c) e.execute("t3") # Pre-task asserts args, kwargs = pre_body.call_args assert kwargs == {"foo": "bar"} assert isinstance(args[0], Context) assert args[1] == 5 # Post-task asserts args, kwargs = post_body.call_args assert kwargs == {"biz": "baz"} assert isinstance(args[0], Context) assert args[1] == 7 def call_objs_play_well_with_context_args(self): self._call_objs() class deduping_and_chaining: def chaining_is_depth_first(self): expect( "-c depth_first deploy", out=""" Cleaning HTML Cleaning .tar.gz files Cleaned everything Making directories Building Deploying Preparing for testing Testing """.lstrip(), ) def _expect(self, args, expected): expect("-c integration {}".format(args), out=expected.lstrip()) class adjacent_hooks: def deduping(self): self._expect( "biz", """ foo bar biz post1 post2 """, ) def no_deduping(self): self._expect( "--no-dedupe biz", """ foo foo bar biz post1 post2 post2 """, ) class non_adjacent_hooks: def deduping(self): self._expect( "boz", """ foo bar boz post2 post1 """, ) def no_deduping(self): self._expect( "--no-dedupe boz", """ foo bar foo boz post2 post1 post2 """, ) # AKA, a (foo) (foo -> bar) scenario arising from foo + bar class adjacent_top_level_tasks: def deduping(self): self._expect( "foo bar", """ foo bar """, ) def no_deduping(self): self._expect( "--no-dedupe foo bar", """ foo foo bar """, ) # AKA (foo -> bar) (foo) class non_adjacent_top_level_tasks: def deduping(self): self._expect( "foo bar", """ foo bar """, ) def no_deduping(self): self._expect( "--no-dedupe foo bar", """ foo foo bar """, ) def deduping_treats_different_calls_to_same_task_differently(self): body = Mock() t1 = Task(body) pre = [call(t1, 5), call(t1, 7), call(t1, 5)] t2 = Task(Mock(), pre=pre) c = Collection(t1=t1, t2=t2) e = Executor(collection=c) e.execute("t2") # Does not call the second t1(5) param_list = [] for body_call in body.call_args_list: assert isinstance(body_call[0][0], Context) param_list.append(body_call[0][1]) assert set(param_list) == {5, 7} class collection_driven_config: "Collection-driven config concerns" def hands_collection_configuration_to_context(self): @task def mytask(c): assert c.my_key == "value" c = Collection(mytask) c.configure({"my_key": "value"}) Executor(collection=c).execute("mytask") def hands_task_specific_configuration_to_context(self): @task def mytask(c): assert c.my_key == "value" @task def othertask(c): assert c.my_key == "othervalue" inner1 = Collection("inner1", mytask) inner1.configure({"my_key": "value"}) inner2 = Collection("inner2", othertask) inner2.configure({"my_key": "othervalue"}) c = Collection(inner1, inner2) e = Executor(collection=c) e.execute("inner1.mytask", "inner2.othertask") def subcollection_config_works_with_default_tasks(self): @task(default=True) def mytask(c): assert c.my_key == "value" # Sets up a task "known as" sub.mytask which may be called as # just 'sub' due to being default. sub = Collection("sub", mytask=mytask) sub.configure({"my_key": "value"}) main = Collection(sub=sub) # Execute via collection default 'task' name. Executor(collection=main).execute("sub") class returns_return_value_of_specified_task: def base_case(self): assert self.executor.execute("task1") == {self.task1: 7} def with_pre_tasks(self): result = self.executor.execute("task2") assert result == {self.task1: 7, self.task2: 10} def with_post_tasks(self): result = self.executor.execute("task4") assert result == {self.task1: 7, self.task4: 15} class autoprinting: def defaults_to_off_and_no_output(self): expect("-c autoprint nope", out="") def prints_return_value_to_stdout_when_on(self): expect("-c autoprint yup", out="It's alive!\n") def prints_return_value_to_stdout_when_on_and_in_collection(self): expect("-c autoprint sub.yup", out="It's alive!\n") def does_not_fire_on_pre_tasks(self): expect("-c autoprint pre-check", out="") def does_not_fire_on_post_tasks(self): expect("-c autoprint post-check", out="") class inter_task_context_and_config_sharing: def context_is_new_but_config_is_same(self): @task def task1(c): return c @task def task2(c): return c coll = Collection(task1, task2) ret = Executor(collection=coll).execute("task1", "task2") c1 = ret[task1] c2 = ret[task2] assert c1 is not c2 # TODO: eventually we may want to change this again, as long as the # effective values within the config are still matching...? Ehh assert c1.config is c2.config def new_config_data_is_preserved_between_tasks(self): @task def task1(c): c.foo = "bar" # NOTE: returned for test inspection, not as mechanism of # sharing data! return c @task def task2(c): return c coll = Collection(task1, task2) ret = Executor(collection=coll).execute("task1", "task2") c2 = ret[task2] assert "foo" in c2.config assert c2.foo == "bar" def config_mutation_is_preserved_between_tasks(self): @task def task1(c): c.config.run.echo = True # NOTE: returned for test inspection, not as mechanism of # sharing data! return c @task def task2(c): return c coll = Collection(task1, task2) ret = Executor(collection=coll).execute("task1", "task2") c2 = ret[task2] assert c2.config.run.echo is True def config_deletion_is_preserved_between_tasks(self): @task def task1(c): del c.config.run.echo # NOTE: returned for test inspection, not as mechanism of # sharing data! return c @task def task2(c): return c coll = Collection(task1, task2) ret = Executor(collection=coll).execute("task1", "task2") c2 = ret[task2] assert "echo" not in c2.config.run invoke-2.2.0/tests/init.py000066400000000000000000000077251445356551000155100ustar00rootroot00000000000000import re from unittest.mock import patch import invoke import invoke.collection import invoke.exceptions import invoke.tasks import invoke.program class Init: "__init__" def dunder_version_info(self): assert hasattr(invoke, "__version_info__") ver = invoke.__version_info__ assert isinstance(ver, tuple) assert all(isinstance(x, int) for x in ver) def dunder_version(self): assert hasattr(invoke, "__version__") ver = invoke.__version__ assert isinstance(ver, str) assert re.match(r"\d+\.\d+\.\d+", ver) def dunder_version_looks_generated_from_dunder_version_info(self): # Meh. ver_part = invoke.__version__.split(".")[0] ver_info_part = invoke.__version_info__[0] assert ver_part == str(ver_info_part) class exposes_bindings: def task_decorator(self): assert invoke.task is invoke.tasks.task def task_class(self): assert invoke.Task is invoke.tasks.Task def collection_class(self): assert invoke.Collection is invoke.collection.Collection def context_class(self): assert invoke.Context is invoke.context.Context def mock_context_class(self): assert invoke.MockContext is invoke.context.MockContext def config_class(self): assert invoke.Config is invoke.config.Config def pty_size_function(self): assert invoke.pty_size is invoke.terminals.pty_size def local_class(self): assert invoke.Local is invoke.runners.Local def runner_class(self): assert invoke.Runner is invoke.runners.Runner def promise_class(self): assert invoke.Promise is invoke.runners.Promise def failure_class(self): assert invoke.Failure is invoke.runners.Failure def exceptions(self): # Meh for obj in vars(invoke.exceptions).values(): if isinstance(obj, type) and issubclass(obj, BaseException): top_level = getattr(invoke, obj.__name__) real = getattr(invoke.exceptions, obj.__name__) assert top_level is real def runner_result(self): assert invoke.Result is invoke.runners.Result def watchers(self): assert invoke.StreamWatcher is invoke.watchers.StreamWatcher assert invoke.Responder is invoke.watchers.Responder assert invoke.FailingResponder is invoke.watchers.FailingResponder def program(self): assert invoke.Program is invoke.program.Program def filesystemloader(self): assert invoke.FilesystemLoader is invoke.loader.FilesystemLoader def argument(self): assert invoke.Argument is invoke.parser.Argument def parsercontext(self): assert invoke.ParserContext is invoke.parser.ParserContext def parser(self): assert invoke.Parser is invoke.parser.Parser def parseresult(self): assert invoke.ParseResult is invoke.parser.ParseResult def executor(self): assert invoke.Executor is invoke.executor.Executor def call(self): assert invoke.call is invoke.tasks.call def Call(self): # Starting to think we shouldn't bother with lowercase-c call... assert invoke.Call is invoke.tasks.Call class offers_singletons: @patch("invoke.Context") def run(self, Context): result = invoke.run("foo", bar="biz") ctx = Context.return_value ctx.run.assert_called_once_with("foo", bar="biz") assert result is ctx.run.return_value @patch("invoke.Context") def sudo(self, Context): result = invoke.sudo("foo", bar="biz") ctx = Context.return_value ctx.sudo.assert_called_once_with("foo", bar="biz") assert result is ctx.sudo.return_value invoke-2.2.0/tests/loader.py000066400000000000000000000105201445356551000157760ustar00rootroot00000000000000import os import sys from importlib.util import spec_from_file_location from types import ModuleType from pathlib import Path from pytest import raises from invoke import Config from invoke.loader import Loader, FilesystemLoader as FSLoader from invoke.exceptions import CollectionNotFound from _util import support class _BasicLoader(Loader): """ Tests top level Loader behavior with basic finder stub. Used when we want to make sure we're testing Loader.load and not e.g. FilesystemLoader's specific implementation. """ def find(self, name): path = os.path.join(support, name) if os.path.exists(f"{path}.py"): path = f"{path}.py" elif os.path.exists(path): path = os.path.join(path, "__init__.py") spec = spec_from_file_location(name, path) return spec class Loader_: def exhibits_default_config_object(self): loader = _BasicLoader() assert isinstance(loader.config, Config) assert loader.config.tasks.collection_name == "tasks" def returns_module_and_location(self): mod, path = _BasicLoader().load("namespacing") assert isinstance(mod, ModuleType) assert path == support def may_configure_config_via_constructor(self): config = Config({"tasks": {"collection_name": "mytasks"}}) loader = _BasicLoader(config=config) assert loader.config.tasks.collection_name == "mytasks" def adds_module_parent_dir_to_sys_path(self): # Crummy doesn't-explode test. _BasicLoader().load("namespacing") def doesnt_duplicate_parent_dir_addition(self): _BasicLoader().load("namespacing") _BasicLoader().load("namespacing") # If the bug is present, this will be 2 at least (and often more, since # other tests will pollute it (!). assert sys.path.count(support) == 1 def can_load_package(self): loader = _BasicLoader() # Load itself doesn't explode (tests 'from . import xxx' internally) mod, enclosing_dir = loader.load("package") # Properties of returned values look as expected # (enclosing dir is always the one above the module-or-package) assert enclosing_dir == support assert mod.__file__ == str(Path(support) / "package" / "__init__.py") def load_name_defaults_to_config_tasks_collection_name(self): "load() name defaults to config.tasks.collection_name" class MockLoader(_BasicLoader): def find(self, name): # Sanity assert name == "simple_ns_list" return super().find(name) config = Config({"tasks": {"collection_name": "simple_ns_list"}}) loader = MockLoader(config=config) # More sanity: expect simple_ns_list.py (not tasks.py) mod, path = loader.load() assert mod.__file__ == os.path.join(support, "simple_ns_list.py") class FilesystemLoader_: def setup_method(self): self.loader = FSLoader(start=support) def discovery_start_point_defaults_to_cwd(self): assert FSLoader().start == os.getcwd() def exposes_start_point_as_attribute(self): assert FSLoader().start == os.getcwd() def start_point_is_configurable_via_kwarg(self): start = "/tmp" assert FSLoader(start=start).start == start def start_point_is_configurable_via_config(self): config = Config({"tasks": {"search_root": "nowhere"}}) assert FSLoader(config=config).start == "nowhere" def raises_CollectionNotFound_if_not_found(self): with raises(CollectionNotFound): self.loader.load("nope") def raises_ImportError_if_found_collection_cannot_be_imported(self): # Instead of masking with a CollectionNotFound with raises(ModuleNotFoundError): self.loader.load("oops") # TODO: Need CollectionImportError here def searches_towards_root_of_filesystem(self): # Loaded while root is in same dir as .py directly = self.loader.load("foo") # Loaded while root is multiple dirs deeper than the .py deep = os.path.join(support, "ignoreme", "ignoremetoo") indirectly = FSLoader(start=deep).load("foo") assert directly[0].__file__ == indirectly[0].__file__ assert directly[0].__spec__ == indirectly[0].__spec__ assert directly[1] == indirectly[1] invoke-2.2.0/tests/merge_dicts.py000066400000000000000000000107061445356551000170230ustar00rootroot00000000000000from pytest import raises from invoke.config import merge_dicts, copy_dict, AmbiguousMergeError class merge_dicts_: # NOTE: don't usually like doing true unit tests of low level plumbing - # prefer to infer it's all working by examining higher level behavior - but # sometimes it's necessary to more easily stamp out certain bugs. def merging_data_onto_empty_dict(self): d1 = {} d2 = {"foo": "bar"} merge_dicts(d1, d2) assert d1 == d2 def updating_with_None_acts_like_merging_empty_dict(self): # When bug present, AttributeError is raised on a None.items() d1 = {"my": "data"} d2 = None merge_dicts(d1, d2) assert d1 == {"my": "data"} def orthogonal_data_merges(self): d1 = {"foo": "bar"} d2 = {"biz": "baz"} merge_dicts(d1, d2) assert d1 == {"foo": "bar", "biz": "baz"} def updates_arg_values_win(self): d1 = {"foo": "bar"} d2 = {"foo": "notbar"} merge_dicts(d1, d2) assert d1 == {"foo": "notbar"} def non_dict_type_mismatch_overwrites_ok(self): d1 = {"foo": "bar"} d2 = {"foo": [1, 2, 3]} merge_dicts(d1, d2) assert d1 == {"foo": [1, 2, 3]} def merging_dict_into_nondict_raises_error(self): # TODO: or...should it?! If a user really wants to take a pre-existing # config path and make it 'deeper' by overwriting e.g. a string with a # dict of strings (or whatever)...should they be allowed to? d1 = {"foo": "bar"} d2 = {"foo": {"uh": "oh"}} with raises(AmbiguousMergeError): merge_dicts(d1, d2) def merging_nondict_into_dict_raises_error(self): d1 = {"foo": {"uh": "oh"}} d2 = {"foo": "bar"} with raises(AmbiguousMergeError): merge_dicts(d1, d2) def nested_leaf_values_merge_ok(self): d1 = {"foo": {"bar": {"biz": "baz"}}} d2 = {"foo": {"bar": {"biz": "notbaz"}}} merge_dicts(d1, d2) assert d1 == {"foo": {"bar": {"biz": "notbaz"}}} def mixed_branch_levels_merges_ok(self): d1 = {"foo": {"bar": {"biz": "baz"}}, "meh": 17, "myown": "ok"} d2 = {"foo": {"bar": {"biz": "notbaz"}}, "meh": 25} merge_dicts(d1, d2) expected = { "foo": {"bar": {"biz": "notbaz"}}, "meh": 25, "myown": "ok", } assert d1 == expected def dict_value_merges_are_not_references(self): core = {} coll = {"foo": {"bar": {"biz": "coll value"}}} proj = {"foo": {"bar": {"biz": "proj value"}}} # Initial merge - when bug present, this sets core['foo'] to the entire # 'foo' dict in 'proj' as a reference - meaning it 'links' back to the # 'proj' dict whenever other things are merged into it merge_dicts(core, proj) assert core == {"foo": {"bar": {"biz": "proj value"}}} assert proj["foo"]["bar"]["biz"] == "proj value" # Identity tests can also prove the bug early assert ( core["foo"] is not proj["foo"] ), "Core foo is literally proj foo!" # noqa # Subsequent merge - just overwrites leaf values this time (thus no # real change, but this is what real config merge code does, so why # not) merge_dicts(core, proj) assert core == {"foo": {"bar": {"biz": "proj value"}}} assert proj["foo"]["bar"]["biz"] == "proj value" # The problem merge - when bug present, core['foo'] references 'foo' # inside 'proj', so this ends up tweaking "core" but it actually # affects "proj" as well! merge_dicts(core, coll) # Expect that the core dict got the update from 'coll'... assert core == {"foo": {"bar": {"biz": "coll value"}}} # BUT that 'proj' remains UNTOUCHED assert proj["foo"]["bar"]["biz"] == "proj value" def merge_file_types_by_reference(self): with open(__file__) as fd: d1 = {} d2 = {"foo": fd} merge_dicts(d1, d2) assert d1["foo"].closed is False class copy_dict_: def returns_deep_copy_of_given_dict(self): # NOTE: not actual deepcopy... source = {"foo": {"bar": {"biz": "baz"}}} copy = copy_dict(source) assert copy["foo"]["bar"] == source["foo"]["bar"] assert copy["foo"]["bar"] is not source["foo"]["bar"] copy["foo"]["bar"]["biz"] = "notbaz" assert source["foo"]["bar"]["biz"] == "baz" invoke-2.2.0/tests/parser_argument.py000066400000000000000000000160131445356551000177310ustar00rootroot00000000000000from pytest import skip, raises from invoke.parser import Argument class Argument_: class init: "__init__" def may_take_names_list(self): names = ("--foo", "-f") a = Argument(names=names) # herp a derp for name in names: assert name in a.names def may_take_name_arg(self): assert "-b" in Argument(name="-b").names def must_get_at_least_one_name(self): with raises(TypeError): Argument() def default_arg_is_name_not_names(self): assert "b" in Argument("b").names def can_declare_positional(self): assert Argument(name="foo", positional=True).positional is True def positional_is_False_by_default(self): assert Argument(name="foo").positional is False def can_set_attr_name_to_control_name_attr(self): a = Argument("foo", attr_name="bar") assert a.name == "bar" # not 'foo' class repr: "__repr__" def shows_useful_info(self): arg = Argument(names=("name", "nick1", "nick2")) expected = "".format("name", "nick1, nick2") assert repr(arg) == expected def does_not_show_nickname_parens_if_no_nicknames(self): assert repr(Argument("name")) == "" def shows_positionalness(self): arg = Argument("name", positional=True) assert repr(arg) == "" def shows_optionalness(self): arg = Argument("name", optional=True) assert repr(arg) == "" def positionalness_and_optionalness_stick_together(self): # TODO: but do these even make sense on the same argument? For now, # best to have a nonsensical test than a missing one... arg = Argument("name", optional=True, positional=True) assert repr(arg) == "" def shows_kind_if_not_str(self): assert repr(Argument("age", kind=int)) == "" def all_the_things_together(self): arg = Argument( names=("meh", "m"), kind=int, optional=True, positional=True ) assert repr(arg) == "" class kind_kwarg: "'kind' kwarg" def is_optional(self): Argument(name="a") Argument(name="b", kind=int) def defaults_to_str(self): assert Argument("a").kind == str def non_bool_implies_value_needed(self): assert Argument(name="a", kind=int).takes_value assert Argument(name="b", kind=str).takes_value assert Argument(name="c", kind=list).takes_value def bool_implies_no_value_needed(self): assert not Argument(name="a", kind=bool).takes_value def bool_implies_default_False_not_None(self): # Right now, parsing a bool flag not given results in None # TODO: may want more nuance here -- False when a --no-XXX flag is # given, True if --XXX, None if not seen? # Only makes sense if we add automatic --no-XXX stuff (think # ./configure) skip() def may_validate_on_set(self): with raises(ValueError): Argument("a", kind=int).value = "five" def list_implies_initial_value_of_empty_list(self): assert Argument("mylist", kind=list).value == [] class names: def returns_tuple_of_all_names(self): assert Argument(names=("--foo", "-b")).names == ("--foo", "-b") assert Argument(name="--foo").names == ("--foo",) def is_normalized_to_a_tuple(self): assert isinstance(Argument(names=("a", "b")).names, tuple) class name: def returns_first_name(self): assert Argument(names=("a", "b")).name == "a" class nicknames: def returns_rest_of_names(self): assert Argument(names=("a", "b")).nicknames == ("b",) class takes_value: def True_by_default(self): assert Argument(name="a").takes_value def False_if_kind_is_bool(self): assert not Argument(name="-b", kind=bool).takes_value class value_set: "value=" def available_as_dot_raw_value(self): "available as .raw_value" a = Argument("a") a.value = "foo" assert a.raw_value == "foo" def untransformed_appears_as_dot_value(self): "untransformed, appears as .value" a = Argument("a", kind=str) a.value = "foo" assert a.value == "foo" def transformed_appears_as_dot_value_with_original_as_raw_value(self): "transformed, modified value is .value, original is .raw_value" a = Argument("a", kind=int) a.value = "5" assert a.value == 5 assert a.raw_value == "5" def list_kind_triggers_append_instead_of_overwrite(self): # TODO: when put this way it makes the API look pretty strange; # maybe a sign we should switch to explicit setter methods # (selected on kind, perhaps) instead of using an implicit setter a = Argument("mylist", kind=list) assert a.value == [] a.value = "val1" assert a.value == ["val1"] a.value = "val2" assert a.value == ["val1", "val2"] def incrementable_True_triggers_increment_of_default(self): a = Argument("verbose", kind=int, default=0, incrementable=True) assert a.value == 0 # NOTE: parser currently just goes "Argument.takes_value is false? # Gonna stuff True/False in there." So this looks pretty silly out # of context (as with list-types above.) a.value = True assert a.value == 1 for _ in range(4): a.value = True assert a.value == 5 class value: def returns_default_if_not_set(self): a = Argument("a", default=25) assert a.value == 25 class raw_value: def is_None_when_no_value_was_actually_seen(self): a = Argument("a", kind=int) assert a.raw_value is None class got_value: def non_list_kind_tests_for_None_value(self): arg = Argument("a") assert not arg.got_value arg.value = "something" assert arg.got_value def list_kind_test_for_empty_list_value(self): arg = Argument("a", kind=list) assert not arg.got_value arg.value = "append-me" assert arg.got_value class set_value: def casts_by_default(self): a = Argument("a", kind=int) a.set_value("5") assert a.value == 5 def allows_setting_value_without_casting(self): a = Argument("a", kind=int) a.set_value("5", cast=False) assert a.value == "5" invoke-2.2.0/tests/parser_context.py000066400000000000000000000275241445356551000176040ustar00rootroot00000000000000import copy from pytest import raises from invoke.parser import Argument, Context from invoke.tasks import task from invoke.collection import Collection class Context_: "ParserContext" # meh def may_have_a_name(self): c = Context(name="taskname") assert c.name == "taskname" def may_have_aliases(self): c = Context(name="realname", aliases=("othername", "yup")) assert "othername" in c.aliases def may_give_arg_list_at_init_time(self): a1 = Argument("foo") a2 = Argument("bar") c = Context(name="name", args=(a1, a2)) assert c.args["foo"] is a1 # TODO: reconcile this sort of test organization with the .flags oriented # tests within 'add_arg'. Some of this behavior is technically driven by # add_arg. class args: def setup_method(self): self.c = Context( args=( Argument("foo"), Argument(names=("bar", "biz")), Argument("baz", attr_name="wat"), ) ) def exposed_as_dict(self): assert "foo" in self.c.args.keys() def exposed_as_Lexicon(self): assert self.c.args.bar == self.c.args["bar"] def args_dict_includes_all_arg_names(self): for x in ("foo", "bar", "biz"): assert x in self.c.args def argument_attr_names_appear_in_args_but_not_flags(self): # Both appear as "Python-facing" args for x in ("baz", "wat"): assert x in self.c.args # But attr_name is for Python access only and isn't shown to the # parser. assert "wat" not in self.c.flags class add_arg: def setup_method(self): self.c = Context() def can_take_Argument_instance(self): a = Argument(names=("foo",)) self.c.add_arg(a) assert self.c.args["foo"] is a def can_take_name_arg(self): self.c.add_arg("foo") assert "foo" in self.c.args def can_take_kwargs_for_single_Argument(self): self.c.add_arg(names=("foo", "bar")) assert "foo" in self.c.args and "bar" in self.c.args def raises_ValueError_on_duplicate(self): self.c.add_arg(names=("foo", "bar")) with raises(ValueError): self.c.add_arg(name="bar") def adds_flaglike_name_to_dot_flags(self): "adds flaglike name to .flags" self.c.add_arg("foo") assert "--foo" in self.c.flags def adds_all_names_to_dot_flags(self): "adds all names to .flags" self.c.add_arg(names=("foo", "bar")) assert "--foo" in self.c.flags assert "--bar" in self.c.flags def adds_true_bools_to_inverse_flags(self): self.c.add_arg(name="myflag", default=True, kind=bool) assert "--myflag" in self.c.flags assert "--no-myflag" in self.c.inverse_flags assert self.c.inverse_flags["--no-myflag"] == "--myflag" def inverse_flags_works_right_with_task_driven_underscored_names(self): # Use a Task here instead of creating a raw argument, we're partly # testing Task.get_arguments()' transform of underscored names # here. Yes that makes this an integration test, but it's nice to # test it here at this level & not just in cli tests. @task def mytask(c, underscored_option=True): pass self.c.add_arg(mytask.get_arguments()[0]) flags = self.c.inverse_flags["--no-underscored-option"] assert flags == "--underscored-option" def turns_single_character_names_into_short_flags(self): self.c.add_arg("f") assert "-f" in self.c.flags assert "--f" not in self.c.flags def adds_positional_args_to_positional_args(self): self.c.add_arg(name="pos", positional=True) assert self.c.positional_args[0].name == "pos" def positional_args_empty_when_none_given(self): assert len(self.c.positional_args) == 0 def positional_args_filled_in_order(self): self.c.add_arg(name="pos1", positional=True) assert self.c.positional_args[0].name == "pos1" self.c.add_arg(name="abc", positional=True) assert self.c.positional_args[1].name == "abc" def positional_arg_modifications_affect_args_copy(self): self.c.add_arg(name="hrm", positional=True) assert self.c.args["hrm"].value == self.c.positional_args[0].value self.c.positional_args[0].value = 17 assert self.c.args["hrm"].value == self.c.positional_args[0].value class deepcopy: "__deepcopy__" def setup_method(self): self.arg = Argument("--boolean") self.orig = Context( name="mytask", args=(self.arg,), aliases=("othername",) ) self.new = copy.deepcopy(self.orig) def returns_correct_copy(self): assert self.new is not self.orig assert self.new.name == "mytask" assert "othername" in self.new.aliases def includes_arguments(self): assert len(self.new.args) == 1 assert self.new.args["--boolean"] is not self.arg def modifications_to_copied_arguments_do_not_touch_originals(self): new_arg = self.new.args["--boolean"] new_arg.value = True assert new_arg.value assert not self.arg.value class help_for: def setup_method(self): # Normal, non-task/collection related Context self.vanilla = Context( args=(Argument("foo"), Argument("bar", help="bar the baz")) ) # Task/Collection generated Context # (will expose flags n such) @task(help={"otherarg": "other help"}, optional=["optval"]) def mytask(c, myarg, otherarg, optval, intval=5): pass col = Collection(mytask) self.tasked = col.to_contexts()[0] def raises_ValueError_for_non_flag_values(self): with raises(ValueError): self.vanilla.help_for("foo") def vanilla_no_helpstr(self): assert self.vanilla.help_for("--foo") == ("--foo=STRING", "") def vanilla_with_helpstr(self): result = self.vanilla.help_for("--bar") assert result == ("--bar=STRING", "bar the baz") def task_driven_with_helpstr(self): result = self.tasked.help_for("--otherarg") assert result == ("-o STRING, --otherarg=STRING", "other help") # Yes, the next 3 tests are identical in form, but technically they # test different behaviors. HERPIN' AN' DERPIN' def task_driven_no_helpstr(self): result = self.tasked.help_for("--myarg") assert result == ("-m STRING, --myarg=STRING", "") def short_form_before_long_form(self): result = self.tasked.help_for("--myarg") assert result == ("-m STRING, --myarg=STRING", "") def equals_sign_for_long_form_only(self): result = self.tasked.help_for("--myarg") assert result == ("-m STRING, --myarg=STRING", "") def kind_to_placeholder_map(self): # Strings helpfor = self.tasked.help_for("--myarg") assert helpfor == ("-m STRING, --myarg=STRING", "") # Ints helpfor = self.tasked.help_for("--intval") assert helpfor == ("-i INT, --intval=INT", "") # TODO: others def shortflag_inputs_work_too(self): m = self.tasked.help_for("-m") myarg = self.tasked.help_for("--myarg") assert m == myarg def optional_values_use_brackets(self): result = self.tasked.help_for("--optval") assert result == ("-p [STRING], --optval[=STRING]", "") def underscored_args(self): c = Context(args=(Argument("i_have_underscores", help="yup"),)) result = c.help_for("--i-have-underscores") assert result == ("--i-have-underscores=STRING", "yup") def true_default_args(self): c = Context(args=(Argument("truthy", kind=bool, default=True),)) assert c.help_for("--truthy") == ("--[no-]truthy", "") class help_tuples: def returns_list_of_help_tuples(self): # Walks own list of flags/args, ensures resulting map to help_for() # TODO: consider redoing help_for to be more flexible on input -- # arg value or flag; or even Argument objects. ? @task(help={"otherarg": "other help"}) def mytask(c, myarg, otherarg): pass c = Collection(mytask).to_contexts()[0] expected = [c.help_for("--myarg"), c.help_for("--otherarg")] assert c.help_tuples() == expected def _assert_order(self, name_tuples, expected_flag_order): c = Context(args=[Argument(names=x) for x in name_tuples]) expected = [c.help_for(x) for x in expected_flag_order] assert c.help_tuples() == expected def sorts_alphabetically_by_shortflag_first(self): # Where shortflags exist, they take precedence self._assert_order( [("zarg", "a"), ("arg", "z")], ["--zarg", "--arg"] ) def case_ignored_during_sorting(self): self._assert_order( [("a",), ("B",)], # In raw cmp() uppercase would come before lowercase, # and we'd get ['-B', '-a'] ["-a", "-B"], ) def lowercase_wins_when_values_identical_otherwise(self): self._assert_order([("V",), ("v",)], ["-v", "-V"]) def sorts_alphabetically_by_longflag_when_no_shortflag(self): # Where no shortflag, sorts by longflag self._assert_order( [("otherarg",), ("longarg",)], ["--longarg", "--otherarg"] ) def sorts_heterogenous_help_output_with_longflag_only_options_first( self, ): # noqa # When both of the above mix, long-flag-only options come first. # E.g.: # --alpha # --beta # -a, --aaaagh # -b, --bah # -c self._assert_order( [("c",), ("a", "aaagh"), ("b", "bah"), ("beta",), ("alpha",)], ["--alpha", "--beta", "-a", "-b", "-c"], ) def mixed_corelike_options(self): self._assert_order( [ ("V", "version"), ("c", "collection"), ("h", "help"), ("l", "list"), ("r", "root"), ], ["-c", "-h", "-l", "-r", "-V"], ) class missing_positional_args: def represents_positional_args_missing_values(self): arg1 = Argument("arg1", positional=True) arg2 = Argument("arg2", positional=False) arg3 = Argument("arg3", positional=True) c = Context(name="foo", args=(arg1, arg2, arg3)) assert c.missing_positional_args == [arg1, arg3] c.positional_args[0].value = "wat" assert c.missing_positional_args == [arg3] c.positional_args[1].value = "hrm" assert c.missing_positional_args == [] class str: "__str__" def with_no_args_output_is_simple(self): assert str(Context("foo")) == "" def args_show_as_repr(self): string = str(Context("bar", args=[Argument("arg1")])) assert ( string == "}>" ) # noqa invoke-2.2.0/tests/parser_parser.py000066400000000000000000000542141445356551000174100ustar00rootroot00000000000000from pytest import raises from invoke.parser import Parser, Context, Argument, ParseError class Parser_: def can_take_initial_context(self): c = Context() p = Parser(initial=c) assert p.initial == c def can_take_initial_and_other_contexts(self): c1 = Context("foo") c2 = Context("bar") p = Parser(initial=Context(), contexts=[c1, c2]) assert p.contexts["foo"] == c1 assert p.contexts["bar"] == c2 def can_take_just_other_contexts(self): c = Context("foo") p = Parser(contexts=[c]) assert p.contexts["foo"] == c def can_take_just_contexts_as_non_keyword_arg(self): c = Context("foo") p = Parser([c]) assert p.contexts["foo"] == c def raises_ValueError_for_unnamed_Contexts_in_contexts(self): with raises(ValueError): Parser(initial=Context(), contexts=[Context()]) def raises_error_for_context_name_clashes(self): with raises(ValueError): Parser(contexts=(Context("foo"), Context("foo"))) def raises_error_for_context_alias_and_name_clashes(self): with raises(ValueError): Parser((Context("foo", aliases=("bar",)), Context("bar"))) def raises_error_for_context_name_and_alias_clashes(self): # I.e. inverse of the above, which is a different code path. with raises(ValueError): Parser((Context("foo"), Context("bar", aliases=("foo",)))) def takes_ignore_unknown_kwarg(self): Parser(ignore_unknown=True) def ignore_unknown_defaults_to_False(self): assert Parser().ignore_unknown is False class parse_argv: def parses_sys_argv_style_list_of_strings(self): "parses sys.argv-style list of strings" # Doesn't-blow-up tests FTL mytask = Context(name="mytask") mytask.add_arg("arg") p = Parser(contexts=[mytask]) p.parse_argv(["mytask", "--arg", "value"]) def returns_only_contexts_mentioned(self): task1 = Context("mytask") task2 = Context("othertask") result = Parser((task1, task2)).parse_argv(["othertask"]) assert len(result) == 1 assert result[0].name == "othertask" def raises_error_if_unknown_contexts_found(self): with raises(ParseError): Parser().parse_argv(["foo", "bar"]) def unparsed_does_not_share_state(self): r = Parser(ignore_unknown=True).parse_argv(["self"]) assert r.unparsed == ["self"] r2 = Parser(ignore_unknown=True).parse_argv(["contained"]) assert r.unparsed == ["self"] # NOT ['self', 'contained'] assert r2.unparsed == ["contained"] # NOT ['self', 'contained'] def ignore_unknown_returns_unparsed_argv_instead(self): r = Parser(ignore_unknown=True).parse_argv(["foo", "bar", "--baz"]) assert r.unparsed == ["foo", "bar", "--baz"] def ignore_unknown_does_not_mutate_rest_of_argv(self): p = Parser([Context("ugh")], ignore_unknown=True) r = p.parse_argv(["ugh", "what", "-nowai"]) # NOT: ['what', '-n', '-w', '-a', '-i'] assert r.unparsed == ["what", "-nowai"] def always_includes_initial_context_if_one_was_given(self): # Even if no core/initial flags were seen t1 = Context("t1") init = Context() result = Parser((t1,), initial=init).parse_argv(["t1"]) assert result[0].name is None assert result[1].name == "t1" def returned_contexts_are_in_order_given(self): t1, t2 = Context("t1"), Context("t2") r = Parser((t1, t2)).parse_argv(["t2", "t1"]) assert [x.name for x in r] == ["t2", "t1"] def returned_context_member_arguments_contain_given_values(self): c = Context("mytask", args=(Argument("boolean", kind=bool),)) result = Parser((c,)).parse_argv(["mytask", "--boolean"]) assert result[0].args["boolean"].value is True def inverse_bools_get_set_correctly(self): arg = Argument("myarg", kind=bool, default=True) c = Context("mytask", args=(arg,)) r = Parser((c,)).parse_argv(["mytask", "--no-myarg"]) assert r[0].args["myarg"].value is False def arguments_which_take_values_get_defaults_overridden_correctly( self, ): # noqa args = (Argument("arg", kind=str), Argument("arg2", kind=int)) c = Context("mytask", args=args) argv = ["mytask", "--arg", "myval", "--arg2", "25"] result = Parser((c,)).parse_argv(argv) assert result[0].args["arg"].value == "myval" assert result[0].args["arg2"].value == 25 def returned_arguments_not_given_contain_default_values(self): # I.e. a Context with args A and B, invoked with no mention of B, # should result in B existing in the result, with its default value # intact, and not e.g. None, or the arg not existing. a = Argument("name", kind=str) b = Argument("age", default=7) c = Context("mytask", args=(a, b)) Parser((c,)).parse_argv(["mytask", "--name", "blah"]) assert c.args["age"].value == 7 def returns_remainder(self): "returns -- style remainder string chunk" r = Parser((Context("foo"),)).parse_argv( ["foo", "--", "bar", "biz"] ) assert r.remainder == "bar biz" def clones_initial_context(self): a = Argument("foo", kind=bool) assert a.value is None c = Context(args=(a,)) p = Parser(initial=c) assert p.initial is c r = p.parse_argv(["--foo"]) assert p.initial is c c2 = r[0] assert c2 is not c a2 = c2.args["foo"] assert a2 is not a assert a.value is None assert a2.value is True def clones_noninitial_contexts(self): a = Argument("foo") assert a.value is None c = Context(name="mytask", args=(a,)) p = Parser(contexts=(c,)) assert p.contexts["mytask"] is c r = p.parse_argv(["mytask", "--foo", "val"]) assert p.contexts["mytask"] is c c2 = r[0] assert c2 is not c a2 = c2.args["foo"] assert a2 is not a assert a.value is None assert a2.value == "val" class parsing_errors: def setup_method(self): self.p = Parser([Context(name="foo", args=[Argument("bar")])]) def missing_flag_values_raise_ParseError(self): with raises(ParseError): self.p.parse_argv(["foo", "--bar"]) def attaches_context_to_ParseErrors(self): try: self.p.parse_argv(["foo", "--bar"]) except ParseError as e: assert e.context is not None def attached_context_is_None_outside_contexts(self): try: Parser().parse_argv(["wat"]) except ParseError as e: assert e.context is None class positional_arguments: def _basic(self): arg = Argument("pos", positional=True) mytask = Context(name="mytask", args=[arg]) return Parser(contexts=[mytask]) def single_positional_arg(self): r = self._basic().parse_argv(["mytask", "posval"]) assert r[0].args["pos"].value == "posval" def omitted_positional_arg_raises_ParseError(self): try: self._basic().parse_argv(["mytask"]) except ParseError as e: expected = "'mytask' did not receive required positional arguments: 'pos'" # noqa assert str(e) == expected else: assert False, "Did not raise ParseError!" def omitted_positional_args_raises_ParseError(self): try: arg = Argument("pos", positional=True) arg2 = Argument("morepos", positional=True) mytask = Context(name="mytask", args=[arg, arg2]) Parser(contexts=[mytask]).parse_argv(["mytask"]) except ParseError as e: expected = "'mytask' did not receive required positional arguments: 'pos', 'morepos'" # noqa assert str(e) == expected else: assert False, "Did not raise ParseError!" def positional_args_eat_otherwise_valid_context_names(self): mytask = Context( "mytask", args=[ Argument("pos", positional=True), Argument("nonpos", default="default"), ], ) Context("lolwut") result = Parser([mytask]).parse_argv(["mytask", "lolwut"]) r = result[0] assert r.args["pos"].value == "lolwut" assert r.args["nonpos"].value == "default" assert len(result) == 1 # Not 2 def positional_args_can_still_be_given_as_flags(self): # AKA "positional args can come anywhere in the context" pos1 = Argument("pos1", positional=True) pos2 = Argument("pos2", positional=True) nonpos = Argument("nonpos", positional=False, default="lol") mytask = Context("mytask", args=[pos1, pos2, nonpos]) assert mytask.positional_args == [pos1, pos2] r = Parser([mytask]).parse_argv( [ "mytask", "--nonpos", "wut", "--pos2", "pos2val", "pos1val", ] )[0] assert r.args["pos1"].value == "pos1val" assert r.args["pos2"].value == "pos2val" assert r.args["nonpos"].value == "wut" class equals_signs: def _compare(self, argname, invoke, value): c = Context("mytask", args=(Argument(argname, kind=str),)) r = Parser((c,)).parse_argv(["mytask", invoke]) assert r[0].args[argname].value == value def handles_equals_style_long_flags(self): self._compare("foo", "--foo=bar", "bar") def handles_equals_style_short_flags(self): self._compare("f", "-f=bar", "bar") def does_not_require_escaping_equals_signs_in_value(self): self._compare("f", "-f=biz=baz", "biz=baz") def handles_multiple_boolean_flags_per_context(self): c = Context( "mytask", args=(Argument("foo", kind=bool), Argument("bar", kind=bool)), ) r = Parser([c]).parse_argv(["mytask", "--foo", "--bar"]) a = r[0].args assert a.foo.value is True assert a.bar.value is True class optional_arg_values: def setup_method(self): self.parser = self._parser() def _parser(self, arguments=None): if arguments is None: arguments = ( Argument( names=("foo", "f"), optional=True, default="mydefault" ), ) self.context = Context("mytask", args=arguments) self.parser = Parser([self.context]) return self.parser def _parse(self, argstr, parser=None): parser = parser or self.parser return parser.parse_argv(["mytask"] + argstr.split()) def _expect(self, argstr, expected, parser=None): result = self._parse(argstr, parser) assert result[0].args.foo.value == expected def no_value_becomes_True_not_default_value(self): self._expect("--foo", True) self._expect("-f", True) def value_given_gets_preserved_normally(self): for argstr in ( "--foo whatever", "--foo=whatever", "-f whatever", "-f=whatever", ): self._expect(argstr, "whatever") def not_given_at_all_uses_default_value(self): self._expect("", "mydefault") class ambiguity_sanity_checks: def _test_for_ambiguity(self, invoke, parser=None): msg = "is ambiguous" try: self._parse(invoke, parser or self.parser) # Expected result except ParseError as e: assert msg in str(e) # No exception occurred at all? Bollocks. else: assert False # Any other exceptions will naturally cause failure here. def unfilled_posargs(self): p = self._parser( ( Argument("foo", optional=True), Argument("bar", positional=True), ) ) self._test_for_ambiguity("--foo uhoh", p) def no_ambiguity_if_option_val_already_given(self): p = self._parser( ( Argument("foo", optional=True), Argument("bar", kind=bool), ) ) # This should NOT raise a ParseError. result = self._parse("--foo hello --bar", p) assert result[0].args["foo"].value == "hello" assert result[0].args["bar"].value is True def valid_argument_is_NOT_ambiguous(self): # The one exception that proves the rule? self._parser((Argument("foo", optional=True), Argument("bar"))) for form in ("--bar barval", "--bar=barval"): result = self._parse("--foo {}".format(form)) assert len(result) == 1 args = result[0].args assert args["foo"].value is True assert args["bar"].value == "barval" def valid_flaglike_argument_is_NOT_ambiguous(self): # The OTHER exception that proves the rule? self._parser( ( Argument("foo", optional=True), Argument("bar", kind=bool), ) ) result = self._parse("--foo --bar") assert len(result) == 1 args = result[0].args assert args["foo"].value is True assert args["bar"].value is True def invalid_flaglike_value_is_stored_as_value(self): self._parser((Argument("foo", optional=True),)) result = self._parse("--foo --bar") assert result[0].args["foo"].value == "--bar" def task_name(self): # mytask --foo myothertask c1 = Context("mytask", args=(Argument("foo", optional=True),)) c2 = Context("othertask") p = Parser([c1, c2]) self._test_for_ambiguity("--foo othertask", p) class list_type_arguments: "list-type (iterable) arguments" def _parse(self, *args): c = Context("mytask", args=(Argument("mylist", kind=list),)) argv = ["mytask"] + list(args) return Parser([c]).parse_argv(argv)[0].args.mylist.value def can_be_given_no_times_resulting_in_default_empty_list(self): assert self._parse() == [] def given_once_becomes_single_item_list(self): assert self._parse("--mylist", "foo") == ["foo"] def given_N_times_becomes_list_of_len_N(self): expected = ["foo", "bar", "biz"] got = self._parse( "--mylist", "foo", "--mylist", "bar", "--mylist", "biz" ) assert got == expected def iterables_work_correctly_outside_a_vacuum(self): # Undetected bug where I was primarily focused on the -vvv use # case...'normal' incrementables never left 'waiting for value' # state in the parser! so _subsequent_ task names & such never got # parsed right, always got appended to the list. c = Context("mytask", args=[Argument("mylist", kind=list)]) c2 = Context("othertask") argv = [ "mytask", "--mylist", "val", "--mylist", "val2", "othertask", ] result = Parser([c, c2]).parse_argv(argv) # When bug present, result only has one context (for 'mytask') and # its 'mylist' consists of ['val', 'val2', 'othertask']. (the # middle '--mylist' was handled semi-correctly.) mylist = result[0].args.mylist.value assert mylist == ["val", "val2"] contexts = len(result) err = "Got {} parse context results instead of 2!".format(contexts) assert contexts == 2, err assert result[1].name == "othertask" class task_repetition: def is_happy_to_handle_same_task_multiple_times(self): task1 = Context("mytask") result = Parser((task1,)).parse_argv(["mytask", "mytask"]) assert len(result) == 2 for x in result: assert x.name == "mytask" def task_args_work_correctly(self): task1 = Context("mytask", args=(Argument("meh"),)) result = Parser((task1,)).parse_argv( ["mytask", "--meh", "mehval1", "mytask", "--meh", "mehval2"] ) assert result[0].args.meh.value == "mehval1" assert result[1].args.meh.value == "mehval2" class per_task_core_flags: class general: def _echo(self): return Argument("echo", kind=bool, default=False) def core_flags_work_normally_when_no_conflict(self): # Initial parse context with an --echo, plus a no-args task initial = Context(args=[self._echo()]) task1 = Context("mytask") parser = Parser(initial=initial, contexts=[task1]) # Call with --echo in the per-task context, expect the core # context got updated (vs an error) result = parser.parse_argv(["mytask", "--echo"]) assert result[0].args.echo.value is True def when_conflict_per_task_args_win_out(self): # Initial parse context with an --echo, plus task w/ same initial = Context(args=[self._echo()]) task1 = Context("mytask", args=[self._echo()]) parser = Parser(initial=initial, contexts=[task1]) # Call with --echo in the per-task context, expect the task # context got updated, and not core. result = parser.parse_argv(["mytask", "--echo"]) assert result[0].args.echo.value is False assert result[1].args.echo.value is True def value_requiring_core_flags_also_work_correctly(self): "value-requiring core flags also work correctly" initial = Context(args=[Argument("hide")]) task1 = Context("mytask") parser = Parser(initial=initial, contexts=[task1]) result = parser.parse_argv(["mytask", "--hide", "both"]) assert result[0].args.hide.value == "both" class edge_cases: def core_bool_but_per_task_string(self): # Initial parse context with bool --hide, and a task with a # regular (string) --hide initial = Context( args=[Argument("hide", kind=bool, default=False)] ) task1 = Context("mytask", args=[Argument("hide")]) parser = Parser(initial=initial, contexts=[task1]) # Expect that, because the task's version wins, we're able to # call it with a value. (If there were weird bugs where the # core flag informed the parsing, this would fail.) result = parser.parse_argv(["mytask", "--hide", "both"]) assert result[0].args.hide.value is False assert result[1].args.hide.value == "both" class help_treats_context_name_as_its_value: def by_itself_base_case(self): task1 = Context("mytask") init = Context(args=[Argument("help", optional=True)]) parser = Parser(initial=init, contexts=[task1]) result = parser.parse_argv(["mytask", "--help"]) assert len(result) == 2 assert result[0].args.help.value == "mytask" assert "help" not in result[1].args def other_tokens_afterwards_raise_parse_errors(self): # NOTE: this is because of the special-casing where we supply # the task name as the value when the flag is literally named # "help". task1 = Context("mytask") init = Context(args=[Argument("help", optional=True)]) parser = Parser(initial=init, contexts=[task1]) with raises(ParseError, match=r".*foobar.*"): parser.parse_argv(["mytask", "--help", "foobar"]) class ParseResult_: "ParseResult" def setup_method(self): self.context = Context( "mytask", args=(Argument("foo", kind=str), Argument("bar")) ) argv = ["mytask", "--foo", "foo-val", "--", "my", "remainder"] self.result = Parser((self.context,)).parse_argv(argv) def acts_as_a_list_of_parsed_contexts(self): assert len(self.result) == 1 assert self.result[0].name == "mytask" def exhibits_remainder_attribute(self): assert self.result.remainder == "my remainder" invoke-2.2.0/tests/program.py000066400000000000000000001557371445356551000162230ustar00rootroot00000000000000import json import os import sys from io import BytesIO from pathlib import Path from invoke.util import Lexicon from unittest.mock import patch, Mock, ANY import pytest from pytest import skip from pytest_relaxed import trap from invoke import ( Argument, Collection, Config, Executor, Exit, FilesystemLoader, ParserContext, ParseResult, Program, Result, Task, UnexpectedExit, ) from invoke import main from invoke.util import cd from invoke.config import merge_dicts from _util import ( ROOT, expect, load, run, skip_if_windows, support_file, support_path, support, ) pytestmark = pytest.mark.usefixtures("integration") class Program_: class init: "__init__" def may_specify_version(self): assert Program(version="1.2.3").version == "1.2.3" def default_version_is_unknown(self): assert Program().version == "unknown" def may_specify_namespace(self): foo = load("foo") assert Program(namespace=foo).namespace is foo def may_specify_name(self): assert Program(name="Myapp").name == "Myapp" def may_specify_binary(self): assert Program(binary="myapp").binary == "myapp" def loader_class_defaults_to_FilesystemLoader(self): assert Program().loader_class is FilesystemLoader def may_specify_loader_class(self): klass = object() assert Program(loader_class=klass).loader_class == klass def executor_class_defaults_to_Executor(self): assert Program().executor_class is Executor def may_specify_executor_class(self): klass = object() assert Program(executor_class=klass).executor_class == klass def config_class_defaults_to_Config(self): assert Program().config_class is Config def may_specify_config_class(self): klass = object() assert Program(config_class=klass).config_class == klass class miscellaneous: "miscellaneous behaviors" def debug_flag_activates_logging(self): # Have to patch our logger to get in before logcapture kicks in. with patch("invoke.util.debug") as debug: Program().run("invoke -d -c debugging foo") debug.assert_called_with("my-sentinel") def debug_honored_as_env_var_too(self, reset_environ): os.environ["INVOKE_DEBUG"] = "1" with patch("invoke.util.debug") as debug: # NOTE: no use of -d/--debug Program().run("invoke -c debugging foo") debug.assert_called_with("my-sentinel") def bytecode_skipped_by_default(self): expect("-c foo mytask") assert sys.dont_write_bytecode def write_pyc_explicitly_enables_bytecode_writing(self): expect("--write-pyc -c foo mytask") assert not sys.dont_write_bytecode class normalize_argv: @patch("invoke.program.sys") def defaults_to_sys_argv(self, mock_sys): argv = ["inv", "--version"] mock_sys.argv = argv p = Program() p.print_version = Mock() p.run(exit=False) p.print_version.assert_called() def uses_a_list_unaltered(self): p = Program() p.print_version = Mock() p.run(["inv", "--version"], exit=False) p.print_version.assert_called() def splits_a_string(self): p = Program() p.print_version = Mock() p.run("inv --version", exit=False) p.print_version.assert_called() class name: def defaults_to_capitalized_binary_when_None(self): expect("myapp --version", out="Myapp unknown\n", invoke=False) def benefits_from_binary_absolute_behavior(self): "benefits from binary()'s absolute path behavior" expect( "/usr/local/bin/myapp --version", out="Myapp unknown\n", invoke=False, ) def uses_overridden_value_when_given(self): p = Program(name="NotInvoke") expect("--version", out="NotInvoke unknown\n", program=p) class binary: def defaults_to_argv_when_None(self): stdout, _ = run("myapp --help", invoke=False) assert "myapp [--core-opts]" in stdout def uses_overridden_value_when_given(self): stdout, _ = run( "myapp --help", invoke=False, program=Program(binary="nope") ) assert "nope [--core-opts]" in stdout @trap def use_binary_basename_when_invoked_absolutely(self): Program().run("/usr/local/bin/myapp --help", exit=False) stdout = sys.stdout.getvalue() assert "myapp [--core-opts]" in stdout assert "/usr/local/bin" not in stdout class called_as: # NOTE: these tests are meh due to Program's lifecycle design # (attributes get modified during run(), such as things based on # observed argv). It's not great, but, whatever. @trap def is_the_whole_deal_when_just_a_name(self): p = Program() p.run("whatever --help", exit=False) assert p.called_as == "whatever" @trap def is_basename_when_given_a_path(self): p = Program() p.run("/usr/local/bin/whatever --help", exit=False) assert p.called_as == "whatever" class binary_names: # NOTE: this is currently only used for completion stuff, so we use # that to test. TODO: maybe make this more unit-y... def defaults_to_argv_when_None(self): stdout, _ = run("foo --print-completion-script zsh", invoke=False) assert " foo" in stdout def can_be_given_directly(self): program = Program(binary_names=["foo", "bar"]) stdout, _ = run( "foo --print-completion-script zsh", invoke=False, program=program, ) assert " foo bar" in stdout class print_version: def displays_name_and_version(self): expect( "--version", program=Program(name="MyProgram", version="0.1.0"), out="MyProgram 0.1.0\n", ) class initial_context: def contains_truly_core_arguments_regardless_of_namespace_value(self): # Spot check. See integration-style --help tests for full argument # checkup. for program in (Program(), Program(namespace=Collection())): for arg in ("--complete", "--debug", "--warn-only", "--list"): stdout, _ = run("--help", program=program) assert arg in stdout def null_namespace_triggers_task_related_args(self): program = Program(namespace=None) for arg in program.task_args(): stdout, _ = run("--help", program=program) assert arg.name in stdout def non_null_namespace_does_not_trigger_task_related_args(self): for arg in Program().task_args(): program = Program(namespace=Collection(mytask=Task(Mock()))) stdout, _ = run("--help", program=program) assert arg.name not in stdout class load_collection: def complains_when_default_collection_not_found(self): # NOTE: assumes system under test has no tasks.py in root. Meh. with cd(ROOT): expect("-l", err="Can't find any collection named 'tasks'!\n") def complains_when_explicit_collection_not_found(self): expect( "-c huhwhat -l", err="Can't find any collection named 'huhwhat'!\n", ) @trap def uses_loader_class_given(self): klass = Mock(side_effect=FilesystemLoader) Program(loader_class=klass).run("myapp --help foo", exit=False) klass.assert_called_with(start=ANY, config=ANY) def config_location_correct_for_package_type_task_trees(self): with cd(Path(support) / "configs" / "package"): expect("mytask") # will assert if config not loaded right class execute: def uses_executor_class_given(self): klass = Mock() Program(executor_class=klass).run("myapp foo", exit=False) klass.assert_called_with(ANY, ANY, ANY) klass.return_value.execute.assert_called_with(ANY) def executor_class_may_be_overridden_via_configured_string(self): class ExecutorOverridingConfig(Config): @staticmethod def global_defaults(): defaults = Config.global_defaults() path = "custom_executor.CustomExecutor" merge_dicts(defaults, {"tasks": {"executor_class": path}}) return defaults mock = load("custom_executor").CustomExecutor p = Program(config_class=ExecutorOverridingConfig) p.run("myapp noop", exit=False) assert mock.assert_called assert mock.return_value.execute.called def executor_is_given_access_to_core_args_and_remainder(self): klass = Mock() cmd = "myapp -e foo -- myremainder" Program(executor_class=klass).run(cmd, exit=False) core = klass.call_args[0][2] assert core[0].args["echo"].value assert core.remainder == "myremainder" class core_args: def returns_core_args_list(self): # Mostly so we encode explicity doc'd public API member in tests. # Spot checks good enough, --help tests include the full deal. core_args = Program().core_args() core_arg_names = [x.names[0] for x in core_args] for name in ("complete", "help", "pty", "version"): assert name in core_arg_names # Also make sure it's a list for easier tweaking/appending assert isinstance(core_args, list) class args_property: def shorthand_for_self_core_args(self): "is shorthand for self.core[0].args" p = Program() p.run("myapp -e noop", exit=False) args = p.args assert isinstance(args, Lexicon) assert args.echo.value is True class core_args_from_task_contexts: # NOTE: many of these use Program.args in lieu of Program.core[0], for # convenience, tho also because initially the behavior was _in_ .args def core_context_gets_updated_with_core_flags_from_tasks(self): # Part of #466. p = Program() p.run("myapp -e noop --hide both", exit=False) # Was given in core assert p.args.echo.value is True # Was given in per-task assert p.args.hide.value == "both" def copying_from_task_context_does_not_set_empty_list_values(self): # Less of an issue for scalars, but for list-type args, doing # .value = actually ends up creating a # list-of-lists. p = Program() # Set up core-args parser context with an iterable arg that hasn't # seen any value yet def filename_args(): return [Argument("filename", kind=list)] p.core = ParseResult([ParserContext(args=filename_args())]) # And a core-via-tasks context with a copy of that same arg, which # also hasn't seen any value yet p.core_via_tasks = ParserContext(args=filename_args()) # Now the behavior of .args can be tested as desired assert p.args["filename"].value == [] # Not [[]]! def copying_from_task_context_does_not_overwrite_good_values(self): # Another subcase, also mostly applying to list types: core context # got a useful value, nothing was found in the per-task context; # when a naive 'is not None' check is used, this overwrites the # good value with an empty list. # (Other types tend to not have this problem because their ._value # is always None when not set. TODO: maybe this should be # considered incorrect behavior for list type args?) def make_arg(): return Argument("filename", kind=list) p = Program() # Core arg, which got a value arg = make_arg() arg.value = "some-file" # appends to list p.core = ParseResult([ParserContext(args=[arg])]) # Set core-via-tasks version to vanilla/blank/empty-list version p.core_via_tasks = ParserContext(args=[make_arg()]) # Call .args, expect that the initial value was not overwritten assert p.args.filename.value == ["some-file"] class run: # NOTE: some of these are integration-style tests, but they are still # fast tests (so not needing to go into the integration suite) and # touch on transformations to the command line that occur above, or # around, the actual parser classes/methods (thus not being suitable # for the parser's own unit tests). def seeks_and_loads_tasks_module_by_default(self): expect("foo", out="Hm\n") def does_not_seek_tasks_module_if_namespace_was_given(self): expect( "foo", err="No idea what 'foo' is!\n", program=Program(namespace=Collection("blank")), ) def explicit_namespace_works_correctly(self): # Regression-ish test re #288 ns = Collection.from_module(load("integration")) expect("print-foo", out="foo\n", program=Program(namespace=ns)) def allows_explicit_task_module_specification(self): expect("-c integration print-foo", out="foo\n") def handles_task_arguments(self): expect("-c integration print-name --name inigo", out="inigo\n") def can_change_collection_search_root(self): for flag in ("-r", "--search-root"): expect( "{} branch/ alt-root".format(flag), out="Down with the alt-root!\n", ) def can_change_collection_search_root_with_explicit_module_name(self): for flag in ("-r", "--search-root"): expect( "{} branch/ -c explicit lyrics".format(flag), out="Don't swear!\n", ) @trap @patch("invoke.program.sys.exit") def ParseErrors_display_message_and_exit_1(self, mock_exit): p = Program() # Run with a definitely-parser-angering incorrect input; the fact # that this line doesn't raise an exception and thus fail the # test, is what we're testing... nah = "nopenotvalidsorry" p.run("myapp {}".format(nah)) # Expect that we did print the core body of the ParseError (e.g. # "no idea what foo is!") and exit 1. (Intent is to display that # info w/o a full traceback, basically.) stderr = sys.stderr.getvalue() assert stderr == "No idea what '{}' is!\n".format(nah) mock_exit.assert_called_with(1) @trap @patch("invoke.program.sys.exit") def UnexpectedExit_exits_with_code_when_no_hiding(self, mock_exit): p = Program() oops = UnexpectedExit( Result(command="meh", exited=17, hide=tuple()) ) p.execute = Mock(side_effect=oops) p.run("myapp foo") # Expect NO repr printed, because stdout/err were not hidden, so we # don't want to add extra annoying verbosity - we want to be more # Make-like here. assert sys.stderr.getvalue() == "" # But we still exit with expected code (vs e.g. 1 or 0) mock_exit.assert_called_with(17) @trap @patch("invoke.program.sys.exit") def shows_UnexpectedExit_str_when_streams_hidden(self, mock_exit): p = Program() oops = UnexpectedExit( Result( command="meh", exited=54, stdout="things!", stderr="ohnoz!", encoding="utf-8", hide=("stdout", "stderr"), ) ) p.execute = Mock(side_effect=oops) p.run("myapp foo") # Expect repr() of exception prints to stderr # NOTE: this partially duplicates a test in runners.py; whatever. stderr = sys.stderr.getvalue() expected = """Encountered a bad command exit code! Command: 'meh' Exit code: 54 Stdout: things! Stderr: ohnoz! """ assert stderr == expected # And exit with expected code (vs e.g. 1 or 0) mock_exit.assert_called_with(54) @trap @patch("invoke.program.sys.exit") def UnexpectedExit_str_encodes_stdout_and_err(self, mock_exit): p = Program() oops = UnexpectedExit( Result( command="meh", exited=54, stdout="this is not ascii: \u1234", stderr="this is also not ascii: \u4321", encoding="utf-8", hide=("stdout", "stderr"), ) ) p.execute = Mock(side_effect=oops) p.run("myapp foo") # NOTE: using explicit binary ASCII here, & accessing raw # getvalue() of the faked sys.stderr (spec.trap auto-decodes it # normally) to have a not-quite-tautological test. otherwise we'd # just be comparing unicode to unicode. shrug? expected = b"""Encountered a bad command exit code! Command: 'meh' Exit code: 54 Stdout: this is not ascii: \xe1\x88\xb4 Stderr: this is also not ascii: \xe4\x8c\xa1 """ got = BytesIO.getvalue(sys.stderr) assert got == expected class Exit_: @patch("invoke.program.sys.exit") def defaults_to_exiting_0(self, mock_exit): p = Program() p.execute = Mock(side_effect=Exit()) p.run("myapp foo") mock_exit.assert_called_once_with(0) @trap @patch("invoke.program.sys.exit") def prints_message_exiting_1_if_message_given(self, mock_exit): p = Program() p.execute = Mock(side_effect=Exit("onoz")) p.run("myapp foo") mock_exit.assert_called_once_with(1) assert sys.stderr.getvalue() == "onoz\n" @trap @patch("invoke.program.sys.exit") def may_explicitly_supply_code_with_message(self, mock_exit): p = Program() p.execute = Mock(side_effect=Exit("onoz", code=17)) p.run("myapp foo") mock_exit.assert_called_once_with(17) assert sys.stderr.getvalue() == "onoz\n" @trap @patch("invoke.program.sys.exit") def may_explicitly_supply_code_without_message(self, mock_exit): p = Program() p.execute = Mock(side_effect=Exit(code=17)) p.run("myapp foo") mock_exit.assert_called_once_with(17) assert sys.stderr.getvalue() == "" def should_show_core_usage_on_core_parse_failures(self): skip() def should_show_context_usage_on_context_parse_failures(self): skip() @trap @patch("invoke.program.sys.exit") def turns_KeyboardInterrupt_into_exit_code_1(self, mock_exit): p = Program() p.execute = Mock(side_effect=KeyboardInterrupt) p.run("myapp -c foo mytask") mock_exit.assert_called_with(1) class help_: "--help" class core: def empty_invocation_with_no_default_task_prints_help(self): stdout, _ = run("-c foo") assert "Core options:" in stdout # TODO: On Windows, we don't get a pty, so we don't get a # guaranteed terminal size of 80x24. Skip for now, but maybe # a suitable fix would be to just strip all whitespace from the # returned and expected values before testing. Then terminal # size is ignored. @skip_if_windows def core_help_option_prints_core_help(self): # TODO: change dynamically based on parser contents? # e.g. no core args == no [--core-opts], # no tasks == no task stuff? # NOTE: test will trigger default pty size of 80x24, so the # below string is formatted appropriately. # TODO: add more unit-y tests for specific behaviors: # * fill terminal w/ columns + spacing # * line-wrap help text in its own column expected = """ Usage: inv[oke] [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts] Core options: --complete Print tab-completion candidates for given parse remainder. --hide=STRING Set default value of run()'s 'hide' kwarg. --no-dedupe Disable task deduplication. --print-completion-script=STRING Print the tab-completion script for your preferred shell (bash|zsh|fish). --prompt-for-sudo-password Prompt user at start of session for the sudo.password config value. --write-pyc Enable creation of .pyc files. -c STRING, --collection=STRING Specify collection name to load. -d, --debug Enable debug output. -D INT, --list-depth=INT When listing tasks, only show the first INT levels. -e, --echo Echo executed commands before running. -f STRING, --config=STRING Runtime configuration file to use. -F STRING, --list-format=STRING Change the display format used when listing tasks. Should be one of: flat (default), nested, json. -h [STRING], --help[=STRING] Show core or per-task help and exit. -l [STRING], --list[=STRING] List available tasks, optionally limited to a namespace. -p, --pty Use a pty when executing shell commands. -r STRING, --search-root=STRING Change root directory used for finding task modules. -R, --dry Echo commands instead of running. -T INT, --command-timeout=INT Specify a global command execution timeout, in seconds. -V, --version Show version and exit. -w, --warn-only Warn, instead of failing, when shell commands fail. """.lstrip() for flag in ["-h", "--help"]: expect(flag, out=expected, program=main.program) def bundled_namespace_help_includes_subcommand_listing(self): t1, t2 = Task(Mock()), Task(Mock()) coll = Collection(task1=t1, task2=t2) p = Program(namespace=coll) # Spot checks for expected bits, so we don't have to change # this every time core args change. for expected in ( # Usage line changes somewhat "Usage: myapp [--core-opts] [--subcommand-opts] ...\n", # noqa # Core options are still present "Core options:\n", "--echo", # Subcommands are listed "Subcommands:\n", " task1", " task2", ): stdout, _ = run("myapp --help", program=p, invoke=False) assert expected in stdout def core_help_doesnt_get_mad_if_loading_fails(self): # Expects no tasks.py in root of FS with cd(ROOT): stdout, _ = run("--help") assert "Usage: " in stdout class per_task: "per-task" def prints_help_for_task_only(self): expected = """ Usage: invoke [--core-opts] punch [--options] [other tasks here ...] Docstring: none Options: -h STRING, --why=STRING Motive -w STRING, --who=STRING Who to punch """.lstrip() for flag in ["-h", "--help"]: expect("-c decorators {} punch".format(flag), out=expected) def works_for_unparameterized_tasks(self): expected = """ Usage: invoke [--core-opts] biz [other tasks here ...] Docstring: none Options: none """.lstrip() expect("-c decorators -h biz", out=expected) def honors_program_binary(self): stdout, _ = run( "-c decorators -h biz", program=Program(binary="notinvoke") ) assert "Usage: notinvoke" in stdout def displays_docstrings_if_given(self): expected = """ Usage: invoke [--core-opts] foo [other tasks here ...] Docstring: Foo the bar. Options: none """.lstrip() expect("-c decorators -h foo", out=expected) def dedents_correctly(self): expected = """ Usage: invoke [--core-opts] foo2 [other tasks here ...] Docstring: Foo the bar: example code Added in 1.0 Options: none """.lstrip() expect("-c decorators -h foo2", out=expected) def dedents_correctly_for_alt_docstring_style(self): expected = """ Usage: invoke [--core-opts] foo3 [other tasks here ...] Docstring: Foo the other bar: example code Added in 1.1 Options: none """.lstrip() expect("-c decorators -h foo3", out=expected) def exits_after_printing(self): # TODO: find & test the other variants of this error case, such # as core --help not exiting, --list not exiting, etc expected = """ Usage: invoke [--core-opts] punch [--options] [other tasks here ...] Docstring: none Options: -h STRING, --why=STRING Motive -w STRING, --who=STRING Who to punch """.lstrip() expect("-c decorators -h punch --list", out=expected) def complains_if_given_invalid_task_name(self): expect("-h this", err="No idea what 'this' is!\n") class task_list: "--list" def _listing(self, lines): return """ Available tasks: {} """.format( "\n".join(" " + x for x in lines) ).lstrip() def _list_eq(self, collection, listing): cmd = "-c {} --list".format(collection) expect(cmd, out=self._listing(listing)) def simple_output(self): expected = self._listing( ( "bar", "biz", "boz", "foo", "post1", "post2", "print-foo", "print-name", "print-underscored-arg", ) ) for flag in ("-l", "--list"): expect("-c integration {}".format(flag), out=expected) def namespacing(self): self._list_eq("namespacing", ("toplevel", "module.mytask")) def top_level_tasks_listed_first(self): self._list_eq("simple_ns_list", ("z-toplevel", "a.b.subtask")) def aliases_sorted_alphabetically(self): self._list_eq("alias_sorting", ("toplevel (a, z)",)) def default_tasks(self): # sub-ns default task display as "real.name (collection name)" self._list_eq( "explicit_root", ( "top-level (other-top)", "sub-level.sub-task (sub-level, sub-level.other-sub)", ), ) def docstrings_shown_alongside(self): self._list_eq( "docstrings", ( "leading-whitespace foo", "no-docstring", "one-line foo", "two-lines foo", "with-aliases (a, b) foo", ), ) def docstrings_are_wrapped_to_terminal_width(self): self._list_eq( "nontrivial_docstrings", ( "no-docstring", "task-one Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n Nullam id dictum", # noqa "task-two Nulla eget ultrices ante. Curabitur sagittis commodo posuere.\n Duis dapibus", # noqa ), ) def empty_collections_say_no_tasks(self): expect( "-c empty -l", err="No tasks found in collection 'empty'!\n" ) def nontrivial_trees_are_sorted_by_namespace_and_depth(self): # By using a larger sample, we can guard against unintuitive # behaviors arising from the above simple unit style tests. E.g. # earlier implementations 'broke up' collections that had more than # 2 levels of depth, because they displayed all 2nd-level tasks # before any 3rd-level ones. # The code must square that concern against "show shallow tasks # before deep ones" (vs straight up alpha sorting) expected = """Available tasks: shell (ipython) Load a REPL with project state already set up. test (run-tests) Run the test suite with baked-in args. build.all (build, build.everything) Build all necessary artifacts. build.c-ext (build.ext) Build our internal C extension. build.zap A silly way to clean. build.docs.all (build.docs) Build all doc formats. build.docs.html Build HTML output only. build.docs.pdf Build PDF output only. build.python.all (build.python) Build all Python packages. build.python.sdist Build classic style tar.gz. build.python.wheel Build a wheel. deploy.db (deploy.db-servers) Deploy to our database servers. deploy.everywhere (deploy) Deploy to all targets. deploy.web Update and bounce the webservers. provision.db Stand up one or more DB servers. provision.web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree --list") assert expected == stdout class namespace_limiting: def argument_limits_display_to_given_namespace(self): stdout, _ = run("-c tree --list build") expected = """Available 'build' tasks: .all (.everything) Build all necessary artifacts. .c-ext (.ext) Build our internal C extension. .zap A silly way to clean. .docs.all (.docs) Build all doc formats. .docs.html Build HTML output only. .docs.pdf Build PDF output only. .python.all (.python) Build all Python packages. .python.sdist Build classic style tar.gz. .python.wheel Build a wheel. Default 'build' task: .all """ assert expected == stdout def argument_may_be_a_nested_namespace(self): stdout, _ = run("-c tree --list build.docs") expected = """Available 'build.docs' tasks: .all Build all doc formats. .html Build HTML output only. .pdf Build PDF output only. Default 'build.docs' task: .all """ assert expected == stdout def empty_namespaces_say_no_tasks_in_namespace(self): # In other words, outer namespace may not be empty, but the # inner one is - this should act just like when there is no # namespace explicitly requested and there's no tasks. # TODO: should the name in the error message be the fully # qualified one instead? expect( "-c empty_subcollection -l subcollection", err="No tasks found in collection 'subcollection'!\n", # noqa ) def invalid_namespaces_exit_with_message(self): expect( "-c empty -l nope", err="Sub-collection 'nope' not found!\n", ) class depth_limiting: def limits_display_to_given_depth(self): # Base case: depth=1 aka "show me the namespaces" expected = """Available tasks (depth=1): shell (ipython) Load a REPL with project state already set up. test (run-tests) Run the test suite with baked-in args. build [3 tasks, 2 collections] Tasks for compiling static code and assets. deploy [3 tasks] How to deploy our code and configs. provision [2 tasks] System setup code. Default task: test """ stdout, _ = run("-c tree --list -F flat --list-depth 1") assert expected == stdout def non_base_case(self): # Middle case: depth=2 expected = """Available tasks (depth=2): shell (ipython) Load a REPL with project state already set up. test (run-tests) Run the test suite with baked-in args. build.all (build, build.everything) Build all necessary artifacts. build.c-ext (build.ext) Build our internal C extension. build.zap A silly way to clean. build.docs [3 tasks] Tasks for managing Sphinx docs. build.python [3 tasks] PyPI/etc distribution artifacts. deploy.db (deploy.db-servers) Deploy to our database servers. deploy.everywhere (deploy) Deploy to all targets. deploy.web Update and bounce the webservers. provision.db Stand up one or more DB servers. provision.web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree --list --list-depth=2") assert expected == stdout def depth_can_be_deeper_than_real_depth(self): # Edge case: depth > actual depth = same as no depth arg expected = """Available tasks (depth=5): shell (ipython) Load a REPL with project state already set up. test (run-tests) Run the test suite with baked-in args. build.all (build, build.everything) Build all necessary artifacts. build.c-ext (build.ext) Build our internal C extension. build.zap A silly way to clean. build.docs.all (build.docs) Build all doc formats. build.docs.html Build HTML output only. build.docs.pdf Build PDF output only. build.python.all (build.python) Build all Python packages. build.python.sdist Build classic style tar.gz. build.python.wheel Build a wheel. deploy.db (deploy.db-servers) Deploy to our database servers. deploy.everywhere (deploy) Deploy to all targets. deploy.web Update and bounce the webservers. provision.db Stand up one or more DB servers. provision.web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree --list --list-depth=5") assert expected == stdout def works_with_explicit_namespace(self): expected = """Available 'build' tasks (depth=1): .all (.everything) Build all necessary artifacts. .c-ext (.ext) Build our internal C extension. .zap A silly way to clean. .docs [3 tasks] Tasks for managing Sphinx docs. .python [3 tasks] PyPI/etc distribution artifacts. Default 'build' task: .all """ stdout, _ = run("-c tree --list build --list-depth=1") assert expected == stdout def short_flag_is_D(self): expected = """Available tasks (depth=1): shell (ipython) Load a REPL with project state already set up. test (run-tests) Run the test suite with baked-in args. build [3 tasks, 2 collections] Tasks for compiling static code and assets. deploy [3 tasks] How to deploy our code and configs. provision [2 tasks] System setup code. Default task: test """ stdout, _ = run("-c tree --list --list-format=flat -D 1") assert expected == stdout def depth_of_zero_is_same_as_max_depth(self): expected = """Available tasks: shell (ipython) Load a REPL with project state already set up. test (run-tests) Run the test suite with baked-in args. build.all (build, build.everything) Build all necessary artifacts. build.c-ext (build.ext) Build our internal C extension. build.zap A silly way to clean. build.docs.all (build.docs) Build all doc formats. build.docs.html Build HTML output only. build.docs.pdf Build PDF output only. build.python.all (build.python) Build all Python packages. build.python.sdist Build classic style tar.gz. build.python.wheel Build a wheel. deploy.db (deploy.db-servers) Deploy to our database servers. deploy.everywhere (deploy) Deploy to all targets. deploy.web Update and bounce the webservers. provision.db Stand up one or more DB servers. provision.web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree --list --list-format=flat -D 0") assert expected == stdout class format: def flat_is_legacy_default_format(self): # Sanity test that --list --list-format=flat is the same as the # old "just --list". expected = """Available tasks: shell (ipython) Load a REPL with project state already set up. test (run-tests) Run the test suite with baked-in args. build.all (build, build.everything) Build all necessary artifacts. build.c-ext (build.ext) Build our internal C extension. build.zap A silly way to clean. build.docs.all (build.docs) Build all doc formats. build.docs.html Build HTML output only. build.docs.pdf Build PDF output only. build.python.all (build.python) Build all Python packages. build.python.sdist Build classic style tar.gz. build.python.wheel Build a wheel. deploy.db (deploy.db-servers) Deploy to our database servers. deploy.everywhere (deploy) Deploy to all targets. deploy.web Update and bounce the webservers. provision.db Stand up one or more DB servers. provision.web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree --list --list-format=flat") assert expected == stdout class nested: def base_case(self): expected = """Available tasks ('*' denotes collection defaults): shell (ipython) Load a REPL with project state already set up. test* (run-tests) Run the test suite with baked-in args. build Tasks for compiling static code and assets. .all* (.everything) Build all necessary artifacts. .c-ext (.ext) Build our internal C extension. .zap A silly way to clean. .docs Tasks for managing Sphinx docs. .all* Build all doc formats. .html Build HTML output only. .pdf Build PDF output only. .python PyPI/etc distribution artifacts. .all* Build all Python packages. .sdist Build classic style tar.gz. .wheel Build a wheel. deploy How to deploy our code and configs. .db (.db-servers) Deploy to our database servers. .everywhere* Deploy to all targets. .web Update and bounce the webservers. provision System setup code. .db Stand up one or more DB servers. .web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree -l -F nested") assert expected == stdout def honors_namespace_arg_to_list(self): stdout, _ = run("-c tree --list build -F nested") expected = """Available 'build' tasks ('*' denotes collection defaults): .all* (.everything) Build all necessary artifacts. .c-ext (.ext) Build our internal C extension. .zap A silly way to clean. .docs Tasks for managing Sphinx docs. .all* Build all doc formats. .html Build HTML output only. .pdf Build PDF output only. .python PyPI/etc distribution artifacts. .all* Build all Python packages. .sdist Build classic style tar.gz. .wheel Build a wheel. Default 'build' task: .all """ assert expected == stdout def honors_depth_arg(self): expected = """Available tasks (depth=2; '*' denotes collection defaults): shell (ipython) Load a REPL with project state already set up. test* (run-tests) Run the test suite with baked-in args. build Tasks for compiling static code and assets. .all* (.everything) Build all necessary artifacts. .c-ext (.ext) Build our internal C extension. .zap A silly way to clean. .docs [3 tasks] Tasks for managing Sphinx docs. .python [3 tasks] PyPI/etc distribution artifacts. deploy How to deploy our code and configs. .db (.db-servers) Deploy to our database servers. .everywhere* Deploy to all targets. .web Update and bounce the webservers. provision System setup code. .db Stand up one or more DB servers. .web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree -l -F nested --list-depth 2") assert expected == stdout def depth_arg_deeper_than_real_depth(self): expected = """Available tasks (depth=5; '*' denotes collection defaults): shell (ipython) Load a REPL with project state already set up. test* (run-tests) Run the test suite with baked-in args. build Tasks for compiling static code and assets. .all* (.everything) Build all necessary artifacts. .c-ext (.ext) Build our internal C extension. .zap A silly way to clean. .docs Tasks for managing Sphinx docs. .all* Build all doc formats. .html Build HTML output only. .pdf Build PDF output only. .python PyPI/etc distribution artifacts. .all* Build all Python packages. .sdist Build classic style tar.gz. .wheel Build a wheel. deploy How to deploy our code and configs. .db (.db-servers) Deploy to our database servers. .everywhere* Deploy to all targets. .web Update and bounce the webservers. provision System setup code. .db Stand up one or more DB servers. .web Stand up a Web server. Default task: test """ stdout, _ = run("-c tree -l -F nested --list-depth 5") assert expected == stdout def all_possible_options(self): expected = """Available 'build' tasks (depth=1; '*' denotes collection defaults): .all* (.everything) Build all necessary artifacts. .c-ext (.ext) Build our internal C extension. .zap A silly way to clean. .docs [3 tasks] Tasks for managing Sphinx docs. .python [3 tasks] PyPI/etc distribution artifacts. Default 'build' task: .all """ stdout, _ = run("-c tree -l build -F nested -D1") assert expected == stdout # TODO: having these in each format smells like a POSSIBLY good # use for parameterized tests... def empty_namespaces_say_no_tasks_in_namespace(self): expect( "-c empty_subcollection -l subcollection -F nested", err="No tasks found in collection 'subcollection'!\n", ) def invalid_namespaces_exit_with_message(self): expect( "-c empty -l nope -F nested", err="Sub-collection 'nope' not found!\n", ) class json: def setup_method(self): # Stored expected data as an actual JSON file cuz it's big # & looks like crap if inlined. Plus by round-tripping it # we remove the pretty-printing. Win-win? self.tree = json.loads(support_file("tree.json")) self.by_name = { x["name"]: x for x in self.tree["collections"] } def base_case(self): stdout, _ = run("-c tree --list --list-format=json") assert self.tree == json.loads(stdout) def honors_namespace_arg_to_list(self): stdout, _ = run("-c tree --list deploy --list-format=json") expected = self.by_name["deploy"] assert expected == json.loads(stdout) def does_not_honor_depth_arg(self): _, stderr = run("-c tree -l --list-format json -D 2") expected = "The --list-depth option is not supported with JSON format!\n" # noqa assert expected == stderr def does_not_honor_depth_arg_even_with_namespace(self): _, stderr = run("-c tree -l build -F json -D 2") expected = "The --list-depth option is not supported with JSON format!\n" # noqa assert expected == stderr # TODO: should an empty-but-valid namespace in JSON format # actually just be an empty dict instead? Let's stay consistent # with the other formats for now, but... def empty_namespaces_say_no_tasks_in_namespace(self): expect( "-c empty_subcollection -l subcollection -F nested", err="No tasks found in collection 'subcollection'!\n", # noqa ) # NOTE: this should probably still exit with a message even if # the previous test re: valid-but-empty is determined to want a # non-error behavior. def invalid_namespaces_exit_with_message(self): expect( "-c empty -l nope -F nested", err="Sub-collection 'nope' not found!\n", ) class run_options: "run() related CLI flags affect 'run' config values" def _test_flag(self, flag, key, value=True): p = Program() p.execute = Mock() # neuter p.run("inv {} foo".format(flag)) assert p.config.run[key] == value def warn_only(self): self._test_flag("-w", "warn") def pty(self): self._test_flag("-p", "pty") def hide(self): self._test_flag("--hide both", "hide", value="both") def echo(self): self._test_flag("-e", "echo") def timeout(self): for flag in ("-T", "--command-timeout"): p = Program() p.execute = Mock() # neuter p.run("inv {} 5 foo".format(flag)) assert p.config.timeouts.command == 5 class configuration: "Configuration-related concerns" def _klass(self): # Pauper's mock that can honor .tasks.collection_name (Loader # looks in the config for this by default.) instance_mock = Mock( tasks=Mock(collection_name="whatever", search_root="meh") ) return Mock(return_value=instance_mock) @trap def config_class_init_kwarg_is_honored(self): klass = self._klass() Program(config_class=klass).run("myapp foo", exit=False) # Don't care about actual args... assert len(klass.call_args_list) == 1 @trap def config_attribute_is_memoized(self): klass = self._klass() # Can't .config without .run (meh); .run calls .config once. p = Program(config_class=klass) p.run("myapp foo", exit=False) assert klass.call_count == 1 # Second access should use cached value p.config assert klass.call_count == 1 # NOTE: these tests all rely on the invoked tasks to perform the # necessary asserts. # TODO: can probably tighten these up to assert things about # Program.config instead? def per_project_config_files_are_loaded_before_task_parsing(self): # Relies on auto_dash_names being loaded at project-conf level; # fixes #467; when bug present, project conf is loaded _after_ # attempt to parse tasks, causing explosion when i_have_underscores # is only sent to parser as i-have-underscores. with cd(os.path.join("configs", "underscores")): expect("i_have_underscores") def per_project_config_files_load_with_explicit_ns(self): # Re: #234 with cd(os.path.join("configs", "yaml")): expect("-c explicit mytask") class runtime_config_file: def can_be_set_via_cli_option(self): with cd("configs"): expect("-c runtime -f yaml/invoke.yaml mytask") def can_be_set_via_env(self, reset_environ): os.environ["INVOKE_RUNTIME_CONFIG"] = "yaml/invoke.yaml" with cd("configs"): expect("-c runtime mytask") def cli_option_wins_over_env(self, reset_environ): # Set env var to load the JSON config instead of the YAML one, # which contains a "json" string internally. os.environ["INVOKE_RUNTIME_CONFIG"] = "json/invoke.json" with cd("configs"): # But run the default test task, which expects a "yaml" # string. If the env var won, this would explode. expect("-c runtime -f yaml/invoke.yaml mytask") def tasks_dedupe_honors_configuration(self): # Kinda-sorta duplicates some tests in executor.py, but eh. with cd("configs"): # Runtime conf file expect( "-c integration -f no-dedupe.yaml biz", out=""" foo foo bar biz post1 post2 post2 """.lstrip(), ) # Flag beats runtime expect( "-c integration -f dedupe.yaml --no-dedupe biz", out=""" foo foo bar biz post1 post2 post2 """.lstrip(), ) # * debug (top level?) # * hide (run.hide...lol) # * pty (run.pty) # * warn (run.warn) def env_vars_load_with_prefix(self, monkeypatch): monkeypatch.setenv("INVOKE_RUN_ECHO", "1") expect("-c contextualized check-echo") def env_var_prefix_can_be_overridden(self, monkeypatch): monkeypatch.setenv("MYAPP_RUN_HIDE", "both") # This forces the execution stuff, including Executor, to run # NOTE: it's not really possible to rework the impl so this test is # cleaner - tasks require per-task/per-collection config, which can # only be realized at the time a given task is to be executed. # Unless we overhaul the Program/Executor relationship so Program # does more of the heavy lifting re: task lookup/load/etc... # NOTE: check-hide will kaboom if its context's run.hide is not set # to True (default False). class MyConf(Config): env_prefix = "MYAPP" p = Program(config_class=MyConf) p.run("inv -c contextualized check-hide") class other_behavior: @patch("invoke.program.getpass.getpass") def sudo_prompt_up_front(self, getpass): getpass.return_value = "mypassword" # Task under test makes expectations re: sudo config (doesn't # actually even sudo, sudo's use of config is tested in Config # tests) with support_path(): try: Program().run( "inv --prompt-for-sudo-password -c sudo_prompt expect-config" # noqa ) except SystemExit as e: # If inner call failed, we'll already have seen its output, # and this will just ensure we ourselves are marked failed assert e.code == 0 # Sanity check that getpass spat out desired prompt prompt = "Desired 'sudo.password' config value: " getpass.assert_called_once_with(prompt) invoke-2.2.0/tests/runners.py000066400000000000000000002116701445356551000162350ustar00rootroot00000000000000import errno import os import signal import struct import sys import termios import threading import types from io import StringIO from io import BytesIO from itertools import chain, repeat from pytest import raises, skip from pytest_relaxed import trap from unittest.mock import patch, Mock, call from invoke import ( CommandTimedOut, Config, Context, Failure, Local, Promise, Responder, Result, Runner, StreamWatcher, SubprocessPipeError, ThreadException, UnexpectedExit, WatcherError, ) from invoke.runners import default_encoding from invoke.terminals import WINDOWS from _util import ( mock_subprocess, mock_pty, skip_if_windows, _Dummy, _KeyboardInterruptingRunner, OhNoz, _, ) class _RaisingWatcher(StreamWatcher): def submit(self, stream): raise WatcherError("meh") class _GenericException(Exception): pass class _GenericExceptingRunner(_Dummy): def wait(self): raise _GenericException def _run(*args, **kwargs): klass = kwargs.pop("klass", _Dummy) settings = kwargs.pop("settings", {}) context = Context(config=Config(overrides=settings)) return klass(context).run(*args, **kwargs) def _runner(out="", err="", **kwargs): klass = kwargs.pop("klass", _Dummy) runner = klass(Context(config=Config(overrides=kwargs))) if "exits" in kwargs: runner.returncode = Mock(return_value=kwargs.pop("exits")) out_file = BytesIO(out.encode()) err_file = BytesIO(err.encode()) runner.read_proc_stdout = out_file.read runner.read_proc_stderr = err_file.read return runner def _expect_platform_shell(shell): if WINDOWS: assert shell.endswith("cmd.exe") else: assert shell == "/bin/bash" def _make_tcattrs(cc_is_ints=True, echo=False): # Set up the control character sub-array; it's technically platform # dependent so we need to be dynamic. # NOTE: setting this up so we can test both potential values for # the 'cc' members...docs say ints, reality says one-byte # bytestrings... cc_base = [None] * (max(termios.VMIN, termios.VTIME) + 1) cc_ints, cc_bytes = cc_base.copy(), cc_base.copy() cc_ints[termios.VMIN], cc_ints[termios.VTIME] = 1, 0 cc_bytes[termios.VMIN], cc_bytes[termios.VTIME] = b"\x01", b"\x00" # Set tcgetattr to look like it's already cbroken... attrs = [ # iflag, oflag, cflag - don't care None, None, None, # lflag needs to have ECHO and ICANON unset ~(termios.ECHO | termios.ICANON), # ispeed, ospeed - don't care None, None, # cc - care about its VMIN and VTIME members. cc_ints if cc_is_ints else cc_bytes, ] # Undo the ECHO unset if caller wants this to look like a non-cbroken term if echo: attrs[3] = attrs[3] | termios.ECHO return attrs class _TimingOutRunner(_Dummy): @property def timed_out(self): return True class Runner_: _stop_methods = ["generate_result", "stop"] # NOTE: these copies of _run and _runner form the base case of "test Runner # subclasses via self._run/_runner helpers" functionality. See how e.g. # Local_ uses the same approach but bakes in the dummy class used. def _run(self, *args, **kwargs): return _run(*args, **kwargs) def _runner(self, *args, **kwargs): return _runner(*args, **kwargs) def _mock_stdin_writer(self): """ Return new _Dummy subclass whose write_proc_stdin() method is a mock. """ class MockedStdin(_Dummy): pass MockedStdin.write_proc_stdin = Mock() return MockedStdin class init: "__init__" def takes_a_context_instance(self): c = Context() assert Runner(c).context == c def context_instance_is_required(self): with raises(TypeError): Runner() class run: def handles_invalid_kwargs_like_any_other_function(self): try: self._run(_, nope_noway_nohow="as if") except TypeError as e: assert "got an unexpected keyword argument" in str(e) else: assert False, "Invalid run() kwarg didn't raise TypeError" class warn: def honors_config(self): runner = self._runner(run={"warn": True}, exits=1) # Doesn't raise Failure -> all good runner.run(_) def kwarg_beats_config(self): runner = self._runner(run={"warn": False}, exits=1) # Doesn't raise Failure -> all good runner.run(_, warn=True) def does_not_apply_to_watcher_errors(self): runner = self._runner(out="stuff") try: watcher = _RaisingWatcher() runner.run(_, watchers=[watcher], warn=True, hide=True) except Failure as e: assert isinstance(e.reason, WatcherError) else: assert False, "Did not raise Failure for WatcherError!" def does_not_apply_to_timeout_errors(self): with raises(CommandTimedOut): self._runner(klass=_TimingOutRunner).run( _, timeout=1, warn=True ) class hide: @trap def honors_config(self): runner = self._runner(out="stuff", run={"hide": True}) r = runner.run(_) assert r.stdout == "stuff" assert sys.stdout.getvalue() == "" @trap def kwarg_beats_config(self): runner = self._runner(out="stuff") r = runner.run(_, hide=True) assert r.stdout == "stuff" assert sys.stdout.getvalue() == "" class pty: def pty_defaults_to_off(self): assert self._run(_).pty is False def honors_config(self): runner = self._runner(run={"pty": True}) assert runner.run(_).pty is True def kwarg_beats_config(self): runner = self._runner(run={"pty": False}) assert runner.run(_, pty=True).pty is True class shell: def defaults_to_bash_or_cmdexe_when_pty_True(self): _expect_platform_shell(self._run(_, pty=True).shell) def defaults_to_bash_or_cmdexe_when_pty_False(self): _expect_platform_shell(self._run(_, pty=False).shell) def may_be_overridden(self): assert self._run(_, shell="/bin/zsh").shell == "/bin/zsh" def may_be_configured(self): runner = self._runner(run={"shell": "/bin/tcsh"}) assert runner.run(_).shell == "/bin/tcsh" def kwarg_beats_config(self): runner = self._runner(run={"shell": "/bin/tcsh"}) assert runner.run(_, shell="/bin/zsh").shell == "/bin/zsh" class env: def defaults_to_os_environ(self): assert self._run(_).env == os.environ def updates_when_dict_given(self): expected = dict(os.environ, FOO="BAR") assert self._run(_, env={"FOO": "BAR"}).env == expected def replaces_when_replace_env_True(self): env = self._run(_, env={"JUST": "ME"}, replace_env=True).env assert env == {"JUST": "ME"} def config_can_be_used(self): env = self._run(_, settings={"run": {"env": {"FOO": "BAR"}}}).env assert env == dict(os.environ, FOO="BAR") def kwarg_wins_over_config(self): settings = {"run": {"env": {"FOO": "BAR"}}} kwarg = {"FOO": "NOTBAR"} foo = self._run(_, settings=settings, env=kwarg).env["FOO"] assert foo == "NOTBAR" class return_value: def return_code(self): """ Result has .return_code (and .exited) containing exit code int """ runner = self._runner(exits=17) r = runner.run(_, warn=True) assert r.return_code == 17 assert r.exited == 17 def ok_attr_indicates_success(self): runner = self._runner() assert runner.run(_).ok is True # default dummy retval is 0 def ok_attr_indicates_failure(self): runner = self._runner(exits=1) assert runner.run(_, warn=True).ok is False def failed_attr_indicates_success(self): runner = self._runner() assert runner.run(_).failed is False # default dummy retval is 0 def failed_attr_indicates_failure(self): runner = self._runner(exits=1) assert runner.run(_, warn=True).failed is True @trap def stdout_attribute_contains_stdout(self): runner = self._runner(out="foo") assert runner.run(_).stdout == "foo" assert sys.stdout.getvalue() == "foo" @trap def stderr_attribute_contains_stderr(self): runner = self._runner(err="foo") assert runner.run(_).stderr == "foo" assert sys.stderr.getvalue() == "foo" def whether_pty_was_used(self): assert self._run(_).pty is False assert self._run(_, pty=True).pty is True def command_executed(self): assert self._run(_).command == _ def shell_used(self): _expect_platform_shell(self._run(_).shell) def hide_param_exposed_and_normalized(self): assert self._run(_, hide=True).hide, "stdout" == "stderr" assert self._run(_, hide=False).hide == tuple() assert self._run(_, hide="stderr").hide == ("stderr",) class command_echoing: @trap def off_by_default(self): self._run("my command") assert sys.stdout.getvalue() == "" @trap def enabled_via_kwarg(self): self._run("my command", echo=True) assert "my command" in sys.stdout.getvalue() @trap def enabled_via_config(self): self._run("yup", settings={"run": {"echo": True}}) assert "yup" in sys.stdout.getvalue() @trap def kwarg_beats_config(self): self._run("yup", echo=True, settings={"run": {"echo": False}}) assert "yup" in sys.stdout.getvalue() @trap def uses_ansi_bold(self): self._run("my command", echo=True) # TODO: vendor & use a color module assert sys.stdout.getvalue() == "\x1b[1;37mmy command\x1b[0m\n" @trap def uses_custom_format(self): self._run( "my command", echo=True, settings={"run": {"echo_format": "AA{command}ZZ"}}, ) assert sys.stdout.getvalue() == "AAmy commandZZ\n" class dry_running: @trap def sets_echo_to_True(self): self._run("what up", settings={"run": {"dry": True}}) assert "what up" in sys.stdout.getvalue() @trap def short_circuits_with_dummy_result(self): runner = self._runner(run={"dry": True}) # Using the call to self.start() in _run_body() as a sentinel for # all the work beyond it. runner.start = Mock() result = runner.run(_) assert not runner.start.called assert isinstance(result, Result) assert result.command == _ assert result.stdout == "" assert result.stderr == "" assert result.exited == 0 assert result.pty is False class encoding: # NOTE: these tests just check what Runner.encoding ends up as; it's # difficult/impossible to mock string objects themselves to see what # .decode() is being given :( # # TODO: consider using truly "nonstandard"-encoded byte sequences as # fixtures, encoded with something that isn't compatible with UTF-8 # (UTF-7 kinda is, so...) so we can assert that the decoded string is # equal to its Unicode equivalent. # # Use UTF-7 as a valid encoding unlikely to be a real default derived # from test-runner's locale.getpreferredencoding() def defaults_to_encoding_method_result(self): # Setup runner = self._runner() encoding = "UTF-7" runner.default_encoding = Mock(return_value=encoding) # Execution & assertion runner.run(_) runner.default_encoding.assert_called_with() assert runner.encoding == "UTF-7" def honors_config(self): c = Context(Config(overrides={"run": {"encoding": "UTF-7"}})) runner = _Dummy(c) runner.default_encoding = Mock(return_value="UTF-not-7") runner.run(_) assert runner.encoding == "UTF-7" def honors_kwarg(self): skip() def uses_locale_module_for_default_encoding(self): # Actually testing this highly OS/env specific stuff is very # error-prone; so we degrade to just testing expected function # calls for now :( with patch("invoke.runners.locale") as fake_locale: fake_locale.getdefaultlocale.return_value = ("meh", "UHF-8") fake_locale.getpreferredencoding.return_value = "FALLBACK" assert self._runner().default_encoding() == "FALLBACK" def falls_back_to_defaultlocale_when_preferredencoding_is_None(self): with patch("invoke.runners.locale") as fake_locale: fake_locale.getdefaultlocale.return_value = (None, None) fake_locale.getpreferredencoding.return_value = "FALLBACK" assert self._runner().default_encoding() == "FALLBACK" class output_hiding: @trap def _expect_hidden(self, hide, expect_out="", expect_err=""): self._runner(out="foo", err="bar").run(_, hide=hide) assert sys.stdout.getvalue() == expect_out assert sys.stderr.getvalue() == expect_err def both_hides_everything(self): self._expect_hidden("both") def True_hides_everything(self): self._expect_hidden(True) def out_only_hides_stdout(self): self._expect_hidden("out", expect_out="", expect_err="bar") def err_only_hides_stderr(self): self._expect_hidden("err", expect_out="foo", expect_err="") def accepts_stdout_alias_for_out(self): self._expect_hidden("stdout", expect_out="", expect_err="bar") def accepts_stderr_alias_for_err(self): self._expect_hidden("stderr", expect_out="foo", expect_err="") def None_hides_nothing(self): self._expect_hidden(None, expect_out="foo", expect_err="bar") def False_hides_nothing(self): self._expect_hidden(False, expect_out="foo", expect_err="bar") def unknown_vals_raises_ValueError(self): with raises(ValueError): self._run(_, hide="wat?") def unknown_vals_mention_value_given_in_error(self): value = "penguinmints" try: self._run(_, hide=value) except ValueError as e: msg = "Error from run(hide=xxx) did not tell user what the bad value was!" # noqa msg += "\nException msg: {}".format(e) assert value in str(e), msg else: assert ( False ), "run() did not raise ValueError for bad hide= value" # noqa def does_not_affect_capturing(self): assert self._runner(out="foo").run(_, hide=True).stdout == "foo" @trap def overrides_echoing(self): self._runner().run("invisible", hide=True, echo=True) assert "invisible" not in sys.stdout.getvalue() class output_stream_overrides: @trap def out_defaults_to_sys_stdout(self): "out_stream defaults to sys.stdout" self._runner(out="sup").run(_) assert sys.stdout.getvalue() == "sup" @trap def err_defaults_to_sys_stderr(self): "err_stream defaults to sys.stderr" self._runner(err="sup").run(_) assert sys.stderr.getvalue() == "sup" @trap def out_can_be_overridden(self): "out_stream can be overridden" out = StringIO() self._runner(out="sup").run(_, out_stream=out) assert out.getvalue() == "sup" assert sys.stdout.getvalue() == "" @trap def overridden_out_is_never_hidden(self): out = StringIO() self._runner(out="sup").run(_, out_stream=out, hide=True) assert out.getvalue() == "sup" assert sys.stdout.getvalue() == "" @trap def err_can_be_overridden(self): "err_stream can be overridden" err = StringIO() self._runner(err="sup").run(_, err_stream=err) assert err.getvalue() == "sup" assert sys.stderr.getvalue() == "" @trap def overridden_err_is_never_hidden(self): err = StringIO() self._runner(err="sup").run(_, err_stream=err, hide=True) assert err.getvalue() == "sup" assert sys.stderr.getvalue() == "" @trap def pty_defaults_to_sys(self): self._runner(out="sup").run(_, pty=True) assert sys.stdout.getvalue() == "sup" @trap def pty_out_can_be_overridden(self): out = StringIO() self._runner(out="yo").run(_, pty=True, out_stream=out) assert out.getvalue() == "yo" assert sys.stdout.getvalue() == "" class output_stream_handling: # Mostly corner cases, generic behavior's covered above def writes_and_flushes_to_stdout(self): out = Mock(spec=StringIO) self._runner(out="meh").run(_, out_stream=out) out.write.assert_called_once_with("meh") out.flush.assert_called_once_with() def writes_and_flushes_to_stderr(self): err = Mock(spec=StringIO) self._runner(err="whatever").run(_, err_stream=err) err.write.assert_called_once_with("whatever") err.flush.assert_called_once_with() class input_stream_handling: # NOTE: actual autoresponder tests are elsewhere. These just test that # stdin works normally & can be overridden. @patch("invoke.runners.sys.stdin", StringIO("Text!")) def defaults_to_sys_stdin(self): # Execute w/ runner class that has a mocked stdin_writer klass = self._mock_stdin_writer() self._runner(klass=klass).run(_, out_stream=StringIO()) # Check that mocked writer was called w/ the data from our patched # sys.stdin. # NOTE: this also tests that non-fileno-bearing streams read/write # 1 byte at a time. See farther-down test for fileno-bearing stdin calls = list(map(lambda x: call(x), "Text!")) klass.write_proc_stdin.assert_has_calls(calls, any_order=False) def can_be_overridden(self): klass = self._mock_stdin_writer() in_stream = StringIO("Hey, listen!") self._runner(klass=klass).run( _, in_stream=in_stream, out_stream=StringIO() ) # stdin mirroring occurs char-by-char calls = list(map(lambda x: call(x), "Hey, listen!")) klass.write_proc_stdin.assert_has_calls(calls, any_order=False) def can_be_disabled_entirely(self): # Mock handle_stdin so we can assert it's not even called class MockedHandleStdin(_Dummy): pass MockedHandleStdin.handle_stdin = Mock() self._runner(klass=MockedHandleStdin).run( _, in_stream=False # vs None or a stream ) assert not MockedHandleStdin.handle_stdin.called @patch("invoke.util.debug") def exceptions_get_logged(self, mock_debug): # Make write_proc_stdin asplode klass = self._mock_stdin_writer() klass.write_proc_stdin.side_effect = OhNoz("oh god why") # Execute with some stdin to trigger that asplode (but skip the # actual bubbled-up raising of it so we can check things out) try: stdin = StringIO("non-empty") self._runner(klass=klass).run(_, in_stream=stdin) except ThreadException: pass # Assert debug() was called w/ expected format # TODO: make the debug call a method on ExceptionHandlingThread, # then make thread class configurable somewhere in Runner, and pass # in a customized ExceptionHandlingThread that has a Mock for that # method? # NOTE: splitting into a few asserts to work around python 3.7 # change re: trailing comma, which kills ability to just statically # assert the entire string. Sigh. Also I'm too lazy to regex. msg = mock_debug.call_args[0][0] assert "Encountered exception OhNoz" in msg assert "'oh god why'" in msg assert "in thread for 'handle_stdin'" in msg def EOF_triggers_closing_of_proc_stdin(self): class Fake(_Dummy): pass Fake.close_proc_stdin = Mock() self._runner(klass=Fake).run(_, in_stream=StringIO("what?")) Fake.close_proc_stdin.assert_called_once_with() def EOF_does_not_close_proc_stdin_when_pty_True(self): class Fake(_Dummy): pass Fake.close_proc_stdin = Mock() self._runner(klass=Fake).run( _, in_stream=StringIO("what?"), pty=True ) assert not Fake.close_proc_stdin.called @patch("invoke.runners.sys.stdin") def EBADF_on_stdin_read_ignored(self, fake_stdin): # Issue #659: nohup is a jerk fake_stdin.read.side_effect = OSError(errno.EBADF, "Ugh") # No boom == fixed self._runner().run(_) @patch("invoke.runners.sys.stdin") def non_EBADF_on_stdin_read_not_ignored(self, fake_stdin): # Issue #659: nohup is a jerk, inverse case eio = OSError(errno.EIO, "lol") fake_stdin.read.side_effect = eio with raises(ThreadException) as info: self._runner().run(_) assert info.value.exceptions[0].value is eio class failure_handling: def fast_failures(self): with raises(UnexpectedExit): self._runner(exits=1).run(_) def non_1_return_codes_still_act_as_failure(self): r = self._runner(exits=17).run(_, warn=True) assert r.failed is True class Failure_repr: def defaults_to_just_class_and_command(self): expected = "" assert repr(Failure(Result(command="oh hecc"))) == expected def subclasses_may_add_more_kv_pairs(self): class TotalFailure(Failure): def _repr(self, **kwargs): return super()._repr(mood="dejected") expected = "" assert repr(TotalFailure(Result(command="onoz"))) == expected class UnexpectedExit_repr: def similar_to_just_the_result_repr(self): try: self._runner(exits=23).run(_) except UnexpectedExit as e: expected = "" assert repr(e) == expected.format(_) class UnexpectedExit_str: def setup_method(self): def lines(prefix): prefixed = "\n".join( "{} {}".format(prefix, x) for x in range(1, 26) ) return prefixed + "\n" self._stdout = lines("stdout") self._stderr = lines("stderr") @trap def displays_command_and_exit_code_by_default(self): try: self._runner( exits=23, out=self._stdout, err=self._stderr ).run(_) except UnexpectedExit as e: expected = """Encountered a bad command exit code! Command: '{}' Exit code: 23 Stdout: already printed Stderr: already printed """ assert str(e) == expected.format(_) else: assert False, "Failed to raise UnexpectedExit!" @trap def does_not_display_stderr_when_pty_True(self): try: self._runner( exits=13, out=self._stdout, err=self._stderr ).run(_, pty=True) except UnexpectedExit as e: expected = """Encountered a bad command exit code! Command: '{}' Exit code: 13 Stdout: already printed Stderr: n/a (PTYs have no stderr) """ assert str(e) == expected.format(_) @trap def pty_stderr_message_wins_over_hidden_stderr(self): try: self._runner( exits=1, out=self._stdout, err=self._stderr ).run(_, pty=True, hide=True) except UnexpectedExit as e: r = str(e) assert "Stderr: n/a (PTYs have no stderr)" in r assert "Stderr: already printed" not in r @trap def explicit_hidden_stream_tail_display(self): # All the permutations of what's displayed when, are in # subsequent test, which does 'x in y' assertions; this one # here ensures the actual format of the display (newlines, etc) # is as desired. try: self._runner( exits=77, out=self._stdout, err=self._stderr ).run(_, hide=True) except UnexpectedExit as e: expected = """Encountered a bad command exit code! Command: '{}' Exit code: 77 Stdout: stdout 16 stdout 17 stdout 18 stdout 19 stdout 20 stdout 21 stdout 22 stdout 23 stdout 24 stdout 25 Stderr: stderr 16 stderr 17 stderr 18 stderr 19 stderr 20 stderr 21 stderr 22 stderr 23 stderr 24 stderr 25 """ assert str(e) == expected.format(_) @trap def displays_tails_of_streams_only_when_hidden(self): def oops(msg, r, hide): return "{}! hide={}; str output:\n\n{}".format( msg, hide, r ) for hide, expect_out, expect_err in ( (False, False, False), (True, True, True), ("stdout", True, False), ("stderr", False, True), ("both", True, True), ): try: self._runner( exits=1, out=self._stdout, err=self._stderr ).run(_, hide=hide) except UnexpectedExit as e: r = str(e) # Expect that the top of output is never displayed err = oops("Too much stdout found", r, hide) assert "stdout 15" not in r, err err = oops("Too much stderr found", r, hide) assert "stderr 15" not in r, err # Expect to see tail of stdout if we expected it if expect_out: err = oops("Didn't see stdout", r, hide) assert "stdout 16" in r, err # Expect to see tail of stderr if we expected it if expect_err: err = oops("Didn't see stderr", r, hide) assert "stderr 16" in r, err else: assert False, "Failed to raise UnexpectedExit!" def _regular_error(self): self._runner(exits=1).run(_) def _watcher_error(self): klass = self._mock_stdin_writer() # Exited=None because real procs will have no useful .returncode() # result if they're aborted partway via an exception. runner = self._runner(klass=klass, out="stuff", exits=None) runner.run(_, watchers=[_RaisingWatcher()], hide=True) # TODO: may eventually turn into having Runner raise distinct Failure # subclasses itself, at which point `reason` would probably go away. class reason: def is_None_for_regular_nonzero_exits(self): try: self._regular_error() except Failure as e: assert e.reason is None else: assert False, "Failed to raise Failure!" def is_None_for_custom_command_exits(self): # TODO: when we implement 'exitcodes 1 and 2 are actually OK' skip() def is_exception_when_WatcherError_raised_internally(self): try: self._watcher_error() except Failure as e: assert isinstance(e.reason, WatcherError) else: assert False, "Failed to raise Failure!" # TODO: should these move elsewhere, eg to Result specific test file? # TODO: *is* there a nice way to split into multiple Response and/or # Failure subclasses? Given the split between "returned as a value when # no problem" and "raised as/attached to an exception when problem", # possibly not - complicates how the APIs need to be adhered to. class wrapped_result: def most_attrs_are_always_present(self): attrs = ("command", "shell", "env", "stdout", "stderr", "pty") for method in (self._regular_error, self._watcher_error): try: method() except Failure as e: for attr in attrs: assert getattr(e.result, attr) is not None else: assert False, "Did not raise Failure!" class shell_exit_failure: def exited_is_integer(self): try: self._regular_error() except Failure as e: assert isinstance(e.result.exited, int) else: assert False, "Did not raise Failure!" def ok_bool_etc_are_falsey(self): try: self._regular_error() except Failure as e: assert e.result.ok is False assert e.result.failed is True assert not bool(e.result) assert not e.result else: assert False, "Did not raise Failure!" def stringrep_notes_exit_status(self): try: self._regular_error() except Failure as e: assert "exited with status 1" in str(e.result) else: assert False, "Did not raise Failure!" class watcher_failure: def exited_is_None(self): try: self._watcher_error() except Failure as e: exited = e.result.exited err = "Expected None, got {!r}".format(exited) assert exited is None, err def ok_and_bool_still_are_falsey(self): try: self._watcher_error() except Failure as e: assert e.result.ok is False assert e.result.failed is True assert not bool(e.result) assert not e.result else: assert False, "Did not raise Failure!" def stringrep_lacks_exit_status(self): try: self._watcher_error() except Failure as e: assert "exited with status" not in str(e.result) expected = "not fully executed due to watcher error" assert expected in str(e.result) else: assert False, "Did not raise Failure!" class threading: # NOTE: see also the more generic tests in concurrency.py def errors_within_io_thread_body_bubble_up(self): class Oops(_Dummy): def handle_stdout(self, **kwargs): raise OhNoz() def handle_stderr(self, **kwargs): raise OhNoz() runner = Oops(Context()) try: runner.run("nah") except ThreadException as e: # Expect two separate OhNoz objects on 'e' assert len(e.exceptions) == 2 for tup in e.exceptions: assert isinstance(tup.value, OhNoz) assert isinstance(tup.traceback, types.TracebackType) assert tup.type == OhNoz # TODO: test the arguments part of the tuple too. It's pretty # implementation-specific, though, so possibly not worthwhile. else: assert False, "Did not raise ThreadException as expected!" def io_thread_errors_str_has_details(self): class Oops(_Dummy): def handle_stdout(self, **kwargs): raise OhNoz() runner = Oops(Context()) try: runner.run("nah") except ThreadException as e: message = str(e) # Just make sure salient bits appear present, vs e.g. default # representation happening instead. assert "Saw 1 exceptions within threads" in message assert "{'kwargs': " in message assert "Traceback (most recent call last):\n\n" in message assert "OhNoz" in message else: assert False, "Did not raise ThreadException as expected!" class watchers: # NOTE: it's initially tempting to consider using mocks or stub # Responder instances for many of these, but it really doesn't save # appreciable runtime or code read/write time. # NOTE: these strictly test interactions between # StreamWatcher/Responder and their host Runner; Responder-only tests # are in tests/watchers.py. def nothing_is_written_to_stdin_by_default(self): # NOTE: technically if some goofus ran the tests by hand and mashed # keys while doing so...this would fail. LOL? # NOTE: this test seems not too useful but is a) a sanity test and # b) guards against e.g. breaking the autoresponder such that it # responds to "" or "\n" or etc. klass = self._mock_stdin_writer() self._runner(klass=klass).run(_) assert not klass.write_proc_stdin.called def _expect_response(self, **kwargs): """ Execute a run() w/ ``watchers`` set from ``responses``. Any other ``**kwargs`` given are passed direct to ``_runner()``. :returns: The mocked ``write_proc_stdin`` method of the runner. """ watchers = [ Responder(pattern=key, response=value) for key, value in kwargs.pop("responses").items() ] kwargs["klass"] = klass = self._mock_stdin_writer() runner = self._runner(**kwargs) runner.run(_, watchers=watchers, hide=True) return klass.write_proc_stdin def watchers_responses_get_written_to_proc_stdin(self): self._expect_response( out="the house was empty", responses={"empty": "handed"} ).assert_called_once_with("handed") def multiple_hits_yields_multiple_responses(self): holla = call("how high?") self._expect_response( out="jump, wait, jump, wait", responses={"jump": "how high?"} ).assert_has_calls([holla, holla]) def chunk_sizes_smaller_than_patterns_still_work_ok(self): klass = self._mock_stdin_writer() klass.read_chunk_size = 1 # < len('jump') responder = Responder("jump", "how high?") runner = self._runner(klass=klass, out="jump, wait, jump, wait") runner.run(_, watchers=[responder], hide=True) holla = call("how high?") # Responses happened, period. klass.write_proc_stdin.assert_has_calls([holla, holla]) # And there weren't duplicates! assert len(klass.write_proc_stdin.call_args_list) == 2 def both_out_and_err_are_scanned(self): bye = call("goodbye") # Would only be one 'bye' if only scanning stdout self._expect_response( out="hello my name is inigo", err="hello how are you", responses={"hello": "goodbye"}, ).assert_has_calls([bye, bye]) def multiple_patterns_works_as_expected(self): calls = [call("betty"), call("carnival")] self._expect_response( out="beep boop I am a robot", responses={"boop": "betty", "robot": "carnival"}, ).assert_has_calls(calls, any_order=True) def multiple_patterns_across_both_streams(self): responses = { "boop": "betty", "robot": "carnival", "Destroy": "your ego", "humans": "are awful", } calls = map(lambda x: call(x), responses.values()) # CANNOT assume order due to simultaneous streams. # If we didn't say any_order=True we could get race condition fails self._expect_response( out="beep boop, I am a robot", err="Destroy all humans!", responses=responses, ).assert_has_calls(calls, any_order=True) def honors_watchers_config_option(self): klass = self._mock_stdin_writer() responder = Responder("my stdout", "and my axe") runner = self._runner( out="this is my stdout", # yielded stdout klass=klass, # mocked stdin writer run={"watchers": [responder]}, # ends up as config override ) runner.run(_, hide=True) klass.write_proc_stdin.assert_called_once_with("and my axe") def kwarg_overrides_config(self): # TODO: how to handle use cases where merging, not overriding, is # the expected/unsurprising default? probably another config-only # (not kwarg) setting, e.g. run.merge_responses? # TODO: now that this stuff is list, not dict, based, it should be # easier...BUT how to handle removal of defaults from config? Maybe # just document to be careful using the config as it won't _be_ # overridden? (Users can always explicitly set the config to be # empty-list if they want kwargs to be the entire set of # watchers...right?) klass = self._mock_stdin_writer() conf = Responder("my stdout", "and my axe") kwarg = Responder("my stdout", "and my body spray") runner = self._runner( out="this is my stdout", # yielded stdout klass=klass, # mocked stdin writer run={"watchers": [conf]}, # ends up as config override ) runner.run(_, hide=True, watchers=[kwarg]) klass.write_proc_stdin.assert_called_once_with("and my body spray") class io_sleeping: # NOTE: there's an explicit CPU-measuring test in the integration suite # which ensures the *point* of the sleeping - avoiding CPU hogging - is # actually functioning. These tests below just unit-test the mechanisms # around the sleep functionality (ensuring they are visible and can be # altered as needed). def input_sleep_attribute_defaults_to_hundredth_of_second(self): assert Runner(Context()).input_sleep == 0.01 @mock_subprocess() def subclasses_can_override_input_sleep(self): class MyRunner(_Dummy): input_sleep = 0.007 with patch("invoke.runners.time") as mock_time: MyRunner(Context()).run( _, in_stream=StringIO("foo"), out_stream=StringIO(), # null output to not pollute tests ) # Just make sure the first few sleeps all look good. Can't know # exact length of list due to stdin worker hanging out til end of # process. Still worth testing more than the first tho. assert mock_time.sleep.call_args_list[:3] == [call(0.007)] * 3 class stdin_mirroring: def _test_mirroring(self, expect_mirroring, **kwargs): # Setup fake_in = "I'm typing!" output = Mock() input_ = StringIO(fake_in) input_is_pty = kwargs.pop("in_pty", None) class MyRunner(_Dummy): def should_echo_stdin(self, input_, output): # Fake result of isatty() test here and only here; if we do # this farther up, it will affect stuff trying to run # termios & such, which is harder to mock successfully. if input_is_pty is not None: input_.isatty = lambda: input_is_pty return super().should_echo_stdin(input_, output) # Execute basic command with given parameters self._run( _, klass=MyRunner, in_stream=input_, out_stream=output, **kwargs ) # Examine mocked output stream to see if it was mirrored to if expect_mirroring: calls = output.write.call_args_list assert calls == list(map(lambda x: call(x), fake_in)) assert len(output.flush.call_args_list) == len(fake_in) # Or not mirrored to else: assert output.write.call_args_list == [] def when_pty_is_True_no_mirroring_occurs(self): self._test_mirroring(pty=True, expect_mirroring=False) def when_pty_is_False_we_write_in_stream_back_to_out_stream(self): self._test_mirroring(pty=False, in_pty=True, expect_mirroring=True) def mirroring_is_skipped_when_our_input_is_not_a_tty(self): self._test_mirroring(in_pty=False, expect_mirroring=False) def mirroring_can_be_forced_on(self): self._test_mirroring( # Subprocess pty normally disables echoing pty=True, # But then we forcibly enable it echo_stdin=True, # And expect it to happen expect_mirroring=True, ) def mirroring_can_be_forced_off(self): # Make subprocess pty False, stdin tty True, echo_stdin False, # prove no mirroring self._test_mirroring( # Subprocess lack of pty normally enables echoing pty=False, # Provided the controlling terminal _is_ a tty in_pty=True, # But then we forcibly disable it echo_stdin=False, # And expect it to not happen expect_mirroring=False, ) def mirroring_honors_configuration(self): self._test_mirroring( pty=False, in_pty=True, settings={"run": {"echo_stdin": False}}, expect_mirroring=False, ) @trap @skip_if_windows @patch("invoke.runners.sys.stdin") @patch("invoke.terminals.fcntl.ioctl") @patch("invoke.terminals.os") @patch("invoke.terminals.termios") @patch("invoke.terminals.tty") @patch("invoke.terminals.select") # NOTE: the no-fileno edition is handled at top of this local test # class, in the base case test. def reads_FIONREAD_bytes_from_stdin_when_fileno( self, select, tty, termios, mock_os, ioctl, stdin ): # Set stdin up as a file-like buffer which passes has_fileno stdin.fileno.return_value = 17 # arbitrary stdin_data = list("boo!") def fakeread(n): # Why is there no slice version of pop()? data = stdin_data[:n] del stdin_data[:n] return "".join(data) stdin.read.side_effect = fakeread # Without mocking this, we'll always get errors checking the above # bogus fileno() mock_os.tcgetpgrp.return_value = None # Ensure select() only spits back stdin one time, despite there # being multiple bytes to read (this at least partly fakes behavior # from issue #58) select.select.side_effect = chain( [([stdin], [], [])], repeat(([], [], [])) ) # Have ioctl yield our multiple number of bytes when called with # FIONREAD def fake_ioctl(fd, cmd, buf): # This works since each mocked attr will still be its own mock # object with a distinct 'is' identity. if cmd is termios.FIONREAD: return struct.pack("h", len(stdin_data)) ioctl.side_effect = fake_ioctl # Set up our runner as one w/ mocked stdin writing (simplest way to # assert how the reads & writes are happening) klass = self._mock_stdin_writer() self._runner(klass=klass).run(_) klass.write_proc_stdin.assert_called_once_with("boo!") class character_buffered_stdin: @skip_if_windows @patch("invoke.terminals.tty") def setcbreak_called_on_tty_stdins(self, mock_tty, mock_termios): mock_termios.tcgetattr.return_value = _make_tcattrs(echo=True) self._run(_) mock_tty.setcbreak.assert_called_with(sys.stdin) @skip_if_windows @patch("invoke.terminals.tty") def setcbreak_not_called_on_non_tty_stdins(self, mock_tty): self._run(_, in_stream=StringIO()) assert not mock_tty.setcbreak.called @skip_if_windows @patch("invoke.terminals.tty") @patch("invoke.terminals.os") def setcbreak_not_called_if_process_not_foregrounded( self, mock_os, mock_tty ): # Re issue #439. mock_os.getpgrp.return_value = 1337 mock_os.tcgetpgrp.return_value = 1338 self._run(_) assert not mock_tty.setcbreak.called # Sanity mock_os.tcgetpgrp.assert_called_once_with(sys.stdin.fileno()) @skip_if_windows @patch("invoke.terminals.tty") def tty_stdins_have_settings_restored_by_default( self, mock_tty, mock_termios ): # Get already-cbroken attrs since that's an easy way to get the # right format/layout attrs = _make_tcattrs(echo=True) mock_termios.tcgetattr.return_value = attrs self._run(_) # Ensure those old settings are being restored mock_termios.tcsetattr.assert_called_once_with( sys.stdin, mock_termios.TCSADRAIN, attrs ) @skip_if_windows @patch("invoke.terminals.tty") # stub def tty_stdins_have_settings_restored_on_KeyboardInterrupt( self, mock_tty, mock_termios ): # This test is re: GH issue #303 sentinel = _make_tcattrs(echo=True) mock_termios.tcgetattr.return_value = sentinel # Don't actually bubble up the KeyboardInterrupt... try: self._run(_, klass=_KeyboardInterruptingRunner) except KeyboardInterrupt: pass # Did we restore settings?! mock_termios.tcsetattr.assert_called_once_with( sys.stdin, mock_termios.TCSADRAIN, sentinel ) @skip_if_windows @patch("invoke.terminals.tty") def setcbreak_not_called_if_terminal_seems_already_cbroken( self, mock_tty, mock_termios ): # Proves #559, sorta, insofar as it only passes when the fixed # behavior is in place. (Proving the old bug is hard as it is race # condition reliant; the new behavior sidesteps that entirely.) # Test both bytes and ints versions of CC values, since docs # disagree with at least some platforms' realities on that. for is_ints in (True, False): mock_termios.tcgetattr.return_value = _make_tcattrs( cc_is_ints=is_ints ) self._run(_) # Ensure tcsetattr and setcbreak were never called assert not mock_tty.setcbreak.called assert not mock_termios.tcsetattr.called class send_interrupt: def _run_with_mocked_interrupt(self, klass): runner = klass(Context()) runner.send_interrupt = Mock() try: runner.run(_) except _GenericException: pass return runner def called_on_KeyboardInterrupt(self): runner = self._run_with_mocked_interrupt( _KeyboardInterruptingRunner ) assert runner.send_interrupt.called def not_called_for_other_exceptions(self): runner = self._run_with_mocked_interrupt(_GenericExceptingRunner) assert not runner.send_interrupt.called def sends_escape_byte_sequence(self): for pty in (True, False): runner = _KeyboardInterruptingRunner(Context()) mock_stdin = Mock() runner.write_proc_stdin = mock_stdin runner.run(_, pty=pty) mock_stdin.assert_called_once_with("\x03") class timeout: def start_timer_called_with_config_value(self): runner = self._runner(timeouts={"command": 7}) runner.start_timer = Mock() assert runner.context.config.timeouts.command == 7 runner.run(_) runner.start_timer.assert_called_once_with(7) def run_kwarg_honored(self): runner = self._runner() runner.start_timer = Mock() assert runner.context.config.timeouts.command is None runner.run(_, timeout=3) runner.start_timer.assert_called_once_with(3) def kwarg_wins_over_config(self): runner = self._runner(timeouts={"command": 7}) runner.start_timer = Mock() assert runner.context.config.timeouts.command == 7 runner.run(_, timeout=3) runner.start_timer.assert_called_once_with(3) def raises_CommandTimedOut_with_timeout_info(self): runner = self._runner( klass=_TimingOutRunner, timeouts={"command": 7} ) with raises(CommandTimedOut) as info: runner.run(_) assert info.value.timeout == 7 _repr = "" assert repr(info.value) == _repr expected = """ Command did not complete within 7 seconds! Command: 'nope' Stdout: already printed Stderr: already printed """.lstrip() assert str(info.value) == expected @patch("invoke.runners.threading.Timer") def start_timer_gives_its_timer_the_kill_method(self, Timer): runner = self._runner() runner.start_timer(30) Timer.assert_called_once_with(30, runner.kill) def _mocked_timer(self): runner = self._runner() runner._timer = Mock() return runner def run_always_stops_timer(self): runner = _GenericExceptingRunner(Context()) runner._timer = Mock() with raises(_GenericException): runner.run(_) runner._timer.cancel.assert_called_once_with() def timer_aliveness_is_test_of_timing_out(self): # Might be redundant, but easy enough to unit test runner = Runner(Context()) runner._timer = Mock() runner._timer.is_alive.return_value = False assert runner.timed_out runner._timer.is_alive.return_value = True assert not runner.timed_out def timeout_specified_but_no_timer_means_no_exception(self): # Weird corner case but worth testing runner = Runner(Context()) runner._timer = None assert not runner.timed_out class stop: def always_runs_no_matter_what(self): runner = _GenericExceptingRunner(context=Context()) runner.stop = Mock() with raises(_GenericException): runner.run(_) runner.stop.assert_called_once_with() def cancels_timer(self): runner = self._runner() runner._timer = Mock() runner.stop() runner._timer.cancel.assert_called_once_with() class asynchronous: def returns_Promise_immediately_and_finishes_on_join(self): # Dummy subclass with controllable process_is_finished flag class _Finisher(_Dummy): _finished = False @property def process_is_finished(self): return self._finished runner = _Finisher(Context()) # Set up mocks and go runner.start = Mock() for method in self._stop_methods: setattr(runner, method, Mock()) result = runner.run(_, asynchronous=True) # Got a Promise (its attrs etc are in its own test subsuite) assert isinstance(result, Promise) # Started, but did not stop (as would've happened for disown) assert runner.start.called for method in self._stop_methods: assert not getattr(runner, method).called # Set proc completion flag to truthy and join() runner._finished = True result.join() for method in self._stop_methods: assert getattr(runner, method).called @trap def hides_output(self): # Run w/ faux subproc stdout/err data, but async self._runner(out="foo", err="bar").run(_, asynchronous=True).join() # Expect that default out/err streams did not get printed to. assert sys.stdout.getvalue() == "" assert sys.stderr.getvalue() == "" def does_not_forward_stdin(self): class MockedHandleStdin(_Dummy): pass MockedHandleStdin.handle_stdin = Mock() runner = self._runner(klass=MockedHandleStdin) runner.run(_, asynchronous=True).join() # As with the main test for setting this to False, we know that # when stdin is disabled, the handler is never even called (no # thread is created for it). assert not MockedHandleStdin.handle_stdin.called def leaves_overridden_streams_alone(self): # NOTE: technically a duplicate test of the generic tests for #637 # re: intersect of hide and overridden streams. But that's an # implementation detail so this is still valuable. klass = self._mock_stdin_writer() out, err, in_ = StringIO(), StringIO(), StringIO("hallo") runner = self._runner(out="foo", err="bar", klass=klass) runner.run( _, asynchronous=True, out_stream=out, err_stream=err, in_stream=in_, ).join() assert out.getvalue() == "foo" assert err.getvalue() == "bar" assert klass.write_proc_stdin.called # lazy class disown: @patch.object(threading.Thread, "start") def starts_and_returns_None_but_does_nothing_else(self, thread_start): runner = Runner(Context()) runner.start = Mock() not_called = self._stop_methods + ["wait"] for method in not_called: setattr(runner, method, Mock()) result = runner.run(_, disown=True) # No Result object! assert result is None # Subprocess kicked off assert runner.start.called # No timer or IO threads started assert not thread_start.called # No wait or shutdown related Runner methods called for method in not_called: assert not getattr(runner, method).called def cannot_be_given_alongside_asynchronous(self): with raises(ValueError) as info: self._runner().run(_, asynchronous=True, disown=True) sentinel = "Cannot give both 'asynchronous' and 'disown'" assert sentinel in str(info.value) class _FastLocal(Local): # Neuter this for same reason as in _Dummy above input_sleep = 0 class Local_: def _run(self, *args, **kwargs): return _run(*args, **dict(kwargs, klass=_FastLocal)) def _runner(self, *args, **kwargs): return _runner(*args, **dict(kwargs, klass=_FastLocal)) class stop: @mock_subprocess() def calls_super(self): # Re #910 runner = self._runner() runner._timer = Mock() # twiddled by parent class stop() runner.run(_) runner._timer.cancel.assert_called_once_with() class pty: @mock_pty() def when_pty_True_we_use_pty_fork_and_os_exec(self): "when pty=True, we use pty.fork and os.exec*" self._run(_, pty=True) # @mock_pty's asserts check os/pty calls for us. @mock_pty(insert_os=True) def _expect_exit_check(self, exited, mock_os): if exited: expected_check = mock_os.WIFEXITED expected_get = mock_os.WEXITSTATUS unexpected_check = mock_os.WIFSIGNALED unexpected_get = mock_os.WTERMSIG else: expected_check = mock_os.WIFSIGNALED expected_get = mock_os.WTERMSIG unexpected_check = mock_os.WIFEXITED unexpected_get = mock_os.WEXITSTATUS expected_check.return_value = True unexpected_check.return_value = False self._run(_, pty=True) exitstatus = mock_os.waitpid.return_value[1] expected_get.assert_called_once_with(exitstatus) assert not unexpected_get.called def pty_uses_WEXITSTATUS_if_WIFEXITED(self): self._expect_exit_check(True) def pty_uses_WTERMSIG_if_WIFSIGNALED(self): self._expect_exit_check(False) @mock_pty(insert_os=True) def WTERMSIG_result_turned_negative_to_match_subprocess(self, mock_os): mock_os.WIFEXITED.return_value = False mock_os.WIFSIGNALED.return_value = True mock_os.WTERMSIG.return_value = 2 assert self._run(_, pty=True, warn=True).exited == -2 @mock_pty() def pty_is_set_to_controlling_terminal_size(self): self._run(_, pty=True) # @mock_pty's asserts check the TIOC[GS]WINSZ calls for us def warning_only_fires_once(self): # I.e. if implementation checks pty-ness >1 time, only one warning # is emitted. This is kinda implementation-specific, but... skip() @patch("invoke.runners.sys") def replaced_stdin_objects_dont_explode(self, mock_sys): # Replace sys.stdin with an object lacking .isatty(), which # normally causes an AttributeError unless we are being careful. mock_sys.stdin = object() # Test. If bug is present, this will error. runner = Local(Context()) assert runner.should_use_pty(pty=True, fallback=True) is False @mock_pty(trailing_error=OSError("Input/output error")) def spurious_OSErrors_handled_gracefully(self): # Doesn't-blow-up test. self._run(_, pty=True) @mock_pty(trailing_error=OSError("I/O error")) def other_spurious_OSErrors_handled_gracefully(self): # Doesn't-blow-up test. self._run(_, pty=True) @mock_pty(trailing_error=OSError("wat")) def non_spurious_OSErrors_bubble_up(self): try: self._run(_, pty=True) except ThreadException as e: e = e.exceptions[0] assert e.type == OSError assert str(e.value) == "wat" @mock_pty(os_close_error=True) def stop_mutes_errors_on_pty_close(self): # Another doesn't-blow-up test, this time around os.close() of the # pty itself (due to os_close_error=True) self._run(_, pty=True) class fallback: @mock_pty(isatty=False) def can_be_overridden_by_kwarg(self): self._run(_, pty=True, fallback=False) # @mock_pty's asserts will be mad if pty-related os/pty calls # didn't fire, so we're done. @mock_pty(isatty=False) def can_be_overridden_by_config(self): self._runner(run={"fallback": False}).run(_, pty=True) # @mock_pty's asserts will be mad if pty-related os/pty calls # didn't fire, so we're done. @trap @mock_subprocess(isatty=False) def affects_result_pty_value(self, *mocks): assert self._run(_, pty=True).pty is False @mock_pty(isatty=False) def overridden_fallback_affects_result_pty_value(self): assert self._run(_, pty=True, fallback=False).pty is True class shell: @mock_pty(insert_os=True) def defaults_to_bash_or_cmdexe_when_pty_True(self, mock_os): # NOTE: yea, windows can't run pty is true, but this is really # testing config behavior, so...meh self._run(_, pty=True) _expect_platform_shell(mock_os.execve.call_args_list[0][0][0]) @mock_subprocess(insert_Popen=True) def defaults_to_bash_or_cmdexe_when_pty_False(self, mock_Popen): self._run(_, pty=False) _expect_platform_shell( mock_Popen.call_args_list[0][1]["executable"] ) @mock_pty(insert_os=True) def may_be_overridden_when_pty_True(self, mock_os): self._run(_, pty=True, shell="/bin/zsh") assert mock_os.execve.call_args_list[0][0][0] == "/bin/zsh" @mock_subprocess(insert_Popen=True) def may_be_overridden_when_pty_False(self, mock_Popen): self._run(_, pty=False, shell="/bin/zsh") assert mock_Popen.call_args_list[0][1]["executable"] == "/bin/zsh" class env: # NOTE: update-vs-replace semantics are tested 'purely' up above in # regular Runner tests. @mock_subprocess(insert_Popen=True) def uses_Popen_kwarg_for_pty_False(self, mock_Popen): self._run(_, pty=False, env={"FOO": "BAR"}) expected = dict(os.environ, FOO="BAR") env = mock_Popen.call_args_list[0][1]["env"] assert env == expected @mock_pty(insert_os=True) def uses_execve_for_pty_True(self, mock_os): type(mock_os).environ = {"OTHERVAR": "OTHERVAL"} self._run(_, pty=True, env={"FOO": "BAR"}) expected = {"OTHERVAR": "OTHERVAL", "FOO": "BAR"} env = mock_os.execve.call_args_list[0][0][2] assert env == expected class close_proc_stdin: def raises_SubprocessPipeError_when_pty_in_use(self): with raises(SubprocessPipeError): runner = Local(Context()) runner.using_pty = True runner.close_proc_stdin() def closes_process_stdin(self): runner = Local(Context()) runner.process = Mock() runner.using_pty = False runner.close_proc_stdin() runner.process.stdin.close.assert_called_once_with() class timeout: @patch("invoke.runners.os") def kill_uses_self_pid_when_pty(self, mock_os): runner = self._runner() runner.using_pty = True runner.pid = 50 runner.kill() mock_os.kill.assert_called_once_with(50, signal.SIGKILL) @patch("invoke.runners.os") def kill_uses_self_process_pid_when_not_pty(self, mock_os): runner = self._runner() runner.using_pty = False runner.process = Mock(pid=30) runner.kill() mock_os.kill.assert_called_once_with(30, signal.SIGKILL) class Result_: def nothing_is_required(self): Result() def first_posarg_is_stdout(self): assert Result("foo").stdout == "foo" def command_defaults_to_empty_string(self): assert Result().command == "" def shell_defaults_to_empty_string(self): assert Result().shell == "" def encoding_defaults_to_local_default_encoding(self): assert Result().encoding == default_encoding() def env_defaults_to_empty_dict(self): assert Result().env == {} def stdout_defaults_to_empty_string(self): assert Result().stdout == "" def stderr_defaults_to_empty_string(self): assert Result().stderr == "" def exited_defaults_to_zero(self): assert Result().exited == 0 def pty_defaults_to_False(self): assert Result().pty is False def repr_contains_useful_info(self): assert repr(Result(command="foo")) == "" class tail: def setup_method(self): self.sample = "\n".join(str(x) for x in range(25)) def returns_last_10_lines_of_given_stream_plus_whitespace(self): expected = """ 15 16 17 18 19 20 21 22 23 24""" assert Result(stdout=self.sample).tail("stdout") == expected def line_count_is_configurable(self): expected = """ 23 24""" tail = Result(stdout=self.sample).tail("stdout", count=2) assert tail == expected def works_for_stderr_too(self): # Dumb test is dumb, but whatever expected = """ 23 24""" tail = Result(stderr=self.sample).tail("stderr", count=2) assert tail == expected class Promise_: def exposes_read_only_run_params(self): runner = _runner() promise = runner.run( _, pty=True, encoding="utf-17", shell="sea", asynchronous=True ) assert promise.command == _ assert promise.pty is True assert promise.encoding == "utf-17" assert promise.shell == "sea" assert not hasattr(promise, "stdout") assert not hasattr(promise, "stderr") class join: # NOTE: high level Runner lifecycle mechanics of join() (re: wait(), # process_is_finished() etc) are tested in main suite. def returns_Result_on_success(self): result = _runner().run(_, asynchronous=True).join() assert isinstance(result, Result) # Sanity assert result.command == _ assert result.exited == 0 def raises_main_thread_exception_on_kaboom(self): runner = _runner(klass=_GenericExceptingRunner) with raises(_GenericException): runner.run(_, asynchronous=True).join() def raises_subthread_exception_on_their_kaboom(self): class Kaboom(_Dummy): def handle_stdout(self, **kwargs): raise OhNoz() runner = _runner(klass=Kaboom) promise = runner.run(_, asynchronous=True) with raises(ThreadException) as info: promise.join() assert isinstance(info.value.exceptions[0].value, OhNoz) def raises_Failure_on_failure(self): runner = _runner(exits=1) promise = runner.run(_, asynchronous=True) with raises(Failure): promise.join() class context_manager: def calls_join_or_wait_on_close_of_block(self): promise = _runner().run(_, asynchronous=True) promise.join = Mock() with promise: pass promise.join.assert_called_once_with() def yields_self(self): promise = _runner().run(_, asynchronous=True) with promise as value: assert value is promise invoke-2.2.0/tests/task.py000066400000000000000000000415641445356551000155060ustar00rootroot00000000000000from unittest.mock import Mock from pytest import raises, skip from invoke import Context, Config, task, Task, Call, Collection from invoke import FilesystemLoader as Loader from _util import support # # NOTE: Most Task tests use @task as it's the primary interface and is a very # thin wrapper around Task itself. This way we don't have to write 2x tests for # both Task and @task. Meh :) # def _func(c): pass class task_: "@task" def _load(self, name): mod, _ = self.loader.load(name) return Collection.from_module(mod) def setup_method(self): self.loader = Loader(start=support) self.vanilla = self._load("decorators") def allows_access_to_wrapped_object(self): def lolcats(c): pass assert task(lolcats).body == lolcats def allows_alias_specification(self): assert self.vanilla["foo"] == self.vanilla["bar"] def allows_multiple_aliases(self): assert self.vanilla["foo"] == self.vanilla["otherbar"] def allows_default_specification(self): assert self.vanilla[""] == self.vanilla["biz"] def has_autoprint_option(self): ap = self._load("autoprint") assert ap["nope"].autoprint is False assert ap["yup"].autoprint is True def raises_ValueError_on_multiple_defaults(self): with raises(ValueError): self._load("decorator_multi_default") def sets_arg_kind(self): skip() def sets_which_args_are_optional(self): assert self.vanilla["optional_values"].optional == ("myopt",) def allows_annotating_args_as_positional(self): assert self.vanilla["one_positional"].positional == ["pos"] assert self.vanilla["two_positionals"].positional == ["pos1", "pos2"] def allows_annotating_args_as_iterable(self): assert self.vanilla["iterable_values"].iterable == ["mylist"] def allows_annotating_args_as_incrementable(self): arg = self.vanilla["incrementable_values"] assert arg.incrementable == ["verbose"] def when_positional_arg_missing_all_non_default_args_are_positional(self): arg = self.vanilla["implicit_positionals"] assert arg.positional == ["pos1", "pos2"] def context_arguments_should_not_appear_in_implicit_positional_list(self): @task def mytask(c): pass assert len(mytask.positional) == 0 def pre_tasks_stored_directly(self): @task def whatever(c): pass @task(pre=[whatever]) def func(c): pass assert func.pre == [whatever] def allows_star_args_as_shortcut_for_pre(self): @task def pre1(c): pass @task def pre2(c): pass @task(pre1, pre2) def func(c): pass assert func.pre == (pre1, pre2) def disallows_ambiguity_between_star_args_and_pre_kwarg(self): @task def pre1(c): pass @task def pre2(c): pass with raises(TypeError): @task(pre1, pre=[pre2]) def func(c): pass def sets_name(self): @task(name="foo") def bar(c): pass assert bar.name == "foo" def returns_Task_instances_by_default(self): @task def mytask(c): pass assert isinstance(mytask, Task) def klass_kwarg_allows_overriding_class_used(self): class MyTask(Task): pass @task(klass=MyTask) def mytask(c): pass assert isinstance(mytask, MyTask) def klass_kwarg_works_for_subclassers_without_kwargs(self): # I.e. the previous test doesn't catch this particular use case class MyTask(Task): pass def uses_MyTask(*args, **kwargs): kwargs.setdefault("klass", MyTask) return task(*args, **kwargs) @uses_MyTask def mytask(c): pass assert isinstance(mytask, MyTask) def unknown_kwargs_get_mad_at_Task_level(self): # NOTE: this was previously untested behavior. We actually just # modified HOW TypeError gets raised (Task constructor, implicitly, vs # explicitly in @task itself) but the end result is the same for anyone # not trying to be stringly typed based on exception message. with raises(TypeError): @task(whatever="man") def mytask(c): pass class Task_: def has_useful_repr(self): i = repr(Task(_func)) assert "_func" in i, "'func' not found in {!r}".format(i) e = repr(Task(_func, name="funky")) assert "funky" in e, "'funky' not found in {!r}".format(e) assert "_func" not in e, "'_func' unexpectedly seen in {!r}".format(e) def equality_testing(self): t1 = Task(_func, name="foo") t2 = Task(_func, name="foo") assert t1 == t2 t3 = Task(_func, name="bar") assert t1 != t3 def equality_testing_false_for_non_task_objects(self): t = Task(_func, name="foo") # No name attribute at all assert t != object() # Name attr, but not a Task class Named: name = "foo" assert t != Named() class function_like_behavior: # Things that help them eg show up in autodoc easier def inherits_module_from_body(self): mytask = Task(_func, name="funky") assert mytask.__module__ is _func.__module__ class attributes: def has_default_flag(self): assert Task(_func).is_default is False def name_defaults_to_body_name(self): assert Task(_func).name == "_func" def can_override_name(self): assert Task(_func, name="foo").name == "foo" class callability: def setup_method(self): @task def foo(c): "My docstring" return 5 self.task = foo def dunder_call_wraps_body_call(self): context = Context() assert self.task(context) == 5 def errors_if_first_arg_not_Context(self): @task def mytask(c): pass with raises(TypeError): mytask(5) def errors_if_no_first_arg_at_all(self): with raises(TypeError): @task def mytask(): pass def tracks_times_called(self): context = Context() assert self.task.called is False self.task(context) assert self.task.called is True assert self.task.times_called == 1 self.task(context) assert self.task.times_called == 2 def wraps_body_docstring(self): assert self.task.__doc__ == "My docstring" def wraps_body_name(self): assert self.task.__name__ == "foo" class get_arguments: def setup_method(self): @task(positional=["arg_3", "arg1"], optional=["arg1"]) def mytask(c, arg1, arg2=False, arg_3=5): pass self.task = mytask self.args = self.task.get_arguments() self.argdict = self._arglist_to_dict(self.args) def _arglist_to_dict(self, arglist): # This kinda duplicates Context.add_arg(x) for x in arglist :( ret = {} for arg in arglist: for name in arg.names: ret[name] = arg return ret def _task_to_dict(self, task): return self._arglist_to_dict(task.get_arguments()) def positional_args_come_first(self): assert self.args[0].name == "arg_3" assert self.args[1].name == "arg1" assert self.args[2].name == "arg2" def kinds_are_preserved(self): # Remember that the default 'kind' is a string. assert [x.kind for x in self.args] == [int, str, bool] def positional_flag_is_preserved(self): assert [x.positional for x in self.args] == [True, True, False] def optional_flag_is_preserved(self): assert [x.optional for x in self.args] == [False, True, False] def optional_prevents_bool_defaults_from_affecting_kind(self): # Re #416. See notes in the function under test for rationale. @task(optional=["myarg"]) def mytask(c, myarg=False): pass arg = mytask.get_arguments()[0] assert arg.kind is str # not bool! def optional_plus_nonbool_default_does_not_override_kind(self): @task(optional=["myarg"]) def mytask(c, myarg=17): pass arg = mytask.get_arguments()[0] assert arg.kind is int # not str! def turns_function_signature_into_Arguments(self): assert len(self.args), 3 == str(self.args) assert "arg2" in self.argdict def shortflags_created_by_default(self): assert "a" in self.argdict assert self.argdict["a"] is self.argdict["arg1"] def shortflags_dont_care_about_positionals(self): "Positionalness doesn't impact whether shortflags are made" for short, long_ in (("a", "arg1"), ("r", "arg2"), ("g", "arg-3")): assert self.argdict[short] is self.argdict[long_] def autocreated_short_flags_can_be_disabled(self): @task(auto_shortflags=False) def mytask(c, arg): pass args = self._task_to_dict(mytask) assert "a" not in args assert "arg" in args def autocreated_shortflags_dont_collide(self): "auto-created short flags don't collide" @task def mytask(c, arg1, arg2, barg): pass args = self._task_to_dict(mytask) assert "a" in args assert args["a"] is args["arg1"] assert "r" in args assert args["r"] is args["arg2"] assert "b" in args assert args["b"] is args["barg"] def early_auto_shortflags_shouldnt_lock_out_real_shortflags(self): # I.e. "task --foo -f" => --foo should NOT get to pick '-f' for its # shortflag or '-f' is totally fucked. @task def mytask(c, longarg, l): # noqa pass args = self._task_to_dict(mytask) assert "longarg" in args assert "o" in args assert args["o"] is args["longarg"] assert "l" in args def context_arguments_are_not_returned(self): @task def mytask(c): pass assert len(mytask.get_arguments()) == 0 def underscores_become_dashes(self): @task def mytask(c, longer_arg): pass arg = mytask.get_arguments()[0] assert arg.names == ("longer-arg", "l") assert arg.attr_name == "longer_arg" assert arg.name == "longer_arg" class help: def setup_method(self): @task( help={ "simple": "key", "with_underscores": "yup", "with-dashes": "also yup", } ) def mytask(c, simple, with_underscores, with_dashes): pass self.help = { arg.name: arg.help for arg in mytask.get_arguments() } def base_case(self): assert self.help["simple"] == "key" def underscored_name_via_underscores(self): assert self.help["with_underscores"] == "yup" def underscored_name_via_dashes(self): assert self.help["with_dashes"] == "also yup" def raises_ValueError_on_keys_not_found_in_task_args(self): @task(help={"non-existing-param": "Help text"}) def no_parameters(c): pass err = r"field was set.*don't exist:.*'non-existing-param'*" with raises(ValueError, match=err): no_parameters.get_arguments() def no_ValueError_on_unfound_keys_when_configured_otherwise(self): @task( help={"non-existing-param": "Help text", "param": "ganoes"} ) def malazan(c, param): pass arg_dict = { arg.name: arg.help for arg in malazan.get_arguments(ignore_unknown_help=True) } assert "non_existing_param" not in arg_dict assert arg_dict["param"] == "ganoes" def arg_value_is_copied_to_avoid_state_bleed_when_shared(self): # TODO: the 'real' solve here is to make sharing common # arguments between tasks a first class citizen; there's # tickets for that. shared_help = {"shared": "help"} @task(help=shared_help) def first(c, shared): pass @task(help=shared_help) def second(c, shared): pass # Running get_arguments() is required to trigger bug or lack # thereof, but also lets us do a nice safety check after. helps = [x.get_arguments()[0].help for x in (first, second)] assert helps == ["help", "help"] # Dummy task for Call tests _ = object() class Call_: def setup_method(self): self.task = Task(Mock(__name__="mytask")) class init: class task: def is_required(self): with raises(TypeError): Call() def is_first_posarg(self): assert Call(_).task is _ class called_as: def defaults_to_None(self): assert Call(_).called_as is None def may_be_given(self): assert Call(_, called_as="foo").called_as == "foo" class args: def defaults_to_empty_tuple(self): assert Call(_).args == tuple() def may_be_given(self): assert Call(_, args=(1, 2, 3)).args == (1, 2, 3) class kwargs: def defaults_to_empty_dict(self): assert Call(_).kwargs == dict() def may_be_given(self): assert Call(_, kwargs={"foo": "bar"}).kwargs == {"foo": "bar"} class stringrep: "__str__" def includes_task_name(self): call = Call(self.task) assert str(call) == "" def works_for_subclasses(self): class MyCall(Call): pass call = MyCall(self.task) assert "" class make_context: def requires_config_argument(self): with raises(TypeError): Call(_).make_context() def creates_a_new_Context_from_given_config(self): conf = Config(defaults={"foo": "bar"}) c = Call(_).make_context(conf) assert isinstance(c, Context) assert c.foo == "bar" class clone: def returns_new_but_equivalent_object(self): orig = Call(self.task) clone = orig.clone() assert clone is not orig assert clone == orig def can_clone_into_a_subclass(self): orig = Call(self.task) class MyCall(Call): pass clone = orig.clone(into=MyCall) assert clone == orig assert isinstance(clone, MyCall) def can_be_given_extra_kwargs_to_clone_with(self): orig = Call(self.task) class MyCall(Call): def __init__(self, *args, **kwargs): self.hooray = kwargs.pop("hooray") super().__init__(*args, **kwargs) clone = orig.clone(into=MyCall, with_={"hooray": "woo"}) assert clone.hooray == "woo" invoke-2.2.0/tests/terminals.py000066400000000000000000000056011445356551000165320ustar00rootroot00000000000000import fcntl import termios from unittest.mock import Mock, patch from pytest import skip, mark from invoke.terminals import pty_size, bytes_to_read, WINDOWS # Skip on Windows CI, it may blow up on one of these tests pytestmark = mark.skipif( WINDOWS, reason="Low level terminal tests only work well on POSIX" ) # NOTE: 'with character_buffered()' tests are in runners.py as it's a lot # easier to test some aspects in a non-unit sense (e.g. a keyboard-interrupting # Runner subclass). MEH. class terminals: class pty_size: @patch("fcntl.ioctl", wraps=fcntl.ioctl) def calls_fcntl_with_TIOCGWINSZ(self, ioctl): # Test the default (Unix) implementation because that's all we # can realistically do here. pty_size() assert ioctl.call_args_list[0][0][1] == termios.TIOCGWINSZ @patch("sys.stdout") @patch("fcntl.ioctl") def defaults_to_80x24_when_stdout_not_a_tty(self, ioctl, stdout): # Make sure stdout acts like a real stream (means failure is # more obvious) stdout.fileno.return_value = 1 # Ensure it fails the isatty() test too stdout.isatty.return_value = False # Test assert pty_size() == (80, 24) @patch("sys.stdout") @patch("fcntl.ioctl") def uses_default_when_stdout_lacks_fileno(self, ioctl, stdout): # i.e. when accessing it throws AttributeError stdout.fileno.side_effect = AttributeError assert pty_size() == (80, 24) @patch("sys.stdout") @patch("fcntl.ioctl") def uses_default_when_stdout_triggers_ioctl_error(self, ioctl, stdout): ioctl.side_effect = TypeError assert pty_size() == (80, 24) class bytes_to_read_: @patch("invoke.terminals.fcntl") def returns_1_when_stream_lacks_fileno(self, fcntl): # A fileno() that exists but returns a non-int is a quick way # to fail util.has_fileno(). assert bytes_to_read(Mock(fileno=lambda: None)) == 1 assert not fcntl.ioctl.called @patch("invoke.terminals.fcntl") def returns_1_when_stream_has_fileno_but_is_not_a_tty(self, fcntl): # It blows up otherwise anyways (struct.unpack gets mad because # result isn't a string of the right length) but let's make # ioctl die similarly to the real world case we're testing for # here (#425) fcntl.ioctl.side_effect = IOError( "Operation not supported by device" ) stream = Mock(isatty=lambda: False, fileno=lambda: 17) # arbitrary assert bytes_to_read(stream) == 1 assert not fcntl.ioctl.called def returns_FIONREAD_result_when_stream_is_a_tty(self): skip() def returns_1_on_windows(self): skip() invoke-2.2.0/tests/util.py000066400000000000000000000031751445356551000155150ustar00rootroot00000000000000from invoke.util import helpline class util: class helpline: def is_None_if_no_docstring(self): def foo(c): pass assert helpline(foo) is None def is_None_if_whitespace_only_docstring(self): def foo(c): """ """ pass assert helpline(foo) is None def is_entire_thing_if_docstring_one_liner(self): def foo(c): "foo!" pass assert helpline(foo) == "foo!" def left_strips_newline_bearing_one_liners(self): def foo(c): """ foo! """ pass assert helpline(foo) == "foo!" def is_first_line_in_multiline_docstrings(self): def foo(c): """ foo? foo! """ pass assert helpline(foo) == "foo?" def is_None_if_docstring_matches_object_type(self): # I.e. we don't want a docstring that is coming from the class # instead of the instance. class Foo: "I am Foo" pass foo = Foo() assert helpline(foo) is None def instance_attached_docstring_is_still_displayed(self): # This is actually a property of regular object semantics, but # whatever, why not have a test for it. class Foo: "I am Foo" pass foo = Foo() foo.__doc__ = "I am foo" assert helpline(foo) == "I am foo" invoke-2.2.0/tests/watchers.py000066400000000000000000000102171445356551000163530ustar00rootroot00000000000000from queue import Queue, Empty from threading import Thread, Event from invoke import Responder, FailingResponder, ResponseNotAccepted # NOTE: StreamWatcher is basically just an interface/protocol; no behavior to # test of its own. So this file tests Responder primarily, and some subclasses. class Responder_: def keeps_track_of_seen_index_per_thread(self): # Instantiate a single object which will be used in >1 thread r = Responder(pattern="foo", response="bar fight") # meh # Thread body func allowing us to mimic actual IO thread behavior, with # Queues used in place of actual pipes/files def body(responder, in_q, out_q, finished): while not finished.is_set(): try: # NOTE: use nowait() so our loop is hot & can shutdown ASAP # if finished gets set. stream = in_q.get_nowait() for response in r.submit(stream): out_q.put_nowait(response) except Empty: pass # Create two threads from that body func, and queues/etc for each t1_in, t1_out, t1_finished = Queue(), Queue(), Event() t2_in, t2_out, t2_finished = Queue(), Queue(), Event() t1 = Thread(target=body, args=(r, t1_in, t1_out, t1_finished)) t2 = Thread(target=body, args=(r, t2_in, t2_out, t2_finished)) # Start the threads t1.start() t2.start() try: stream = "foo fighters" # First thread will basically always work t1_in.put(stream) assert t1_out.get() == "bar fight" # Second thread get() will block/timeout if threadlocals aren't in # use, because the 2nd thread's copy of the responder will not have # its own index & will thus already be 'past' the `foo` in the # stream. t2_in.put(stream) assert t2_out.get(timeout=1) == "bar fight" except Empty: assert ( False ), "Unable to read from thread 2 - implies threadlocal indices are broken!" # noqa # Close up. finally: t1_finished.set() t2_finished.set() t1.join() t2.join() def yields_response_when_regular_string_pattern_seen(self): r = Responder(pattern="empty", response="handed") assert list(r.submit("the house was empty")) == ["handed"] def yields_response_when_regex_seen(self): r = Responder(pattern=r"tech.*debt", response="pay it down") response = r.submit("technically, it's still debt") assert list(response) == ["pay it down"] def multiple_hits_within_stream_yield_multiple_responses(self): r = Responder(pattern="jump", response="how high?") assert list(r.submit("jump, wait, jump, wait")) == ["how high?"] * 2 def patterns_span_multiple_lines(self): r = Responder(pattern=r"call.*problem", response="So sorry") output = """ You only call me when you have a problem You never call me Just to say hi """ assert list(r.submit(output)) == ["So sorry"] class FailingResponder_: def behaves_like_regular_responder_by_default(self): r = FailingResponder( pattern="ju[^ ]{2}", response="how high?", sentinel="lolnope" ) assert list(r.submit("jump, wait, jump, wait")) == ["how high?"] * 2 def raises_failure_exception_when_sentinel_detected(self): r = FailingResponder( pattern="ju[^ ]{2}", response="how high?", sentinel="lolnope" ) # Behaves normally initially assert list(r.submit("jump")) == ["how high?"] # But then! try: r.submit("lolnope") except ResponseNotAccepted as e: message = str(e) # Expect useful bits in exception text err = "Didn't see pattern in {!r}".format(message) assert "ju[^ ]{2}" in message, err err = "Didn't see failure sentinel in {!r}".format(message) assert "lolnope" in message, err else: assert False, "Did not raise ResponseNotAccepted!" invoke-2.2.0/tox.ini000066400000000000000000000005611445356551000143330ustar00rootroot00000000000000# Tox (http://tox.testrun.org/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. [tox] envlist = py36, py37, py38, py39, 'py310', py311 [testenv] commands = pip install -r dev-requirements.txt spec